diff --git a/server/pyproject.toml b/server/pyproject.toml index 8f707763c19..91eca66abef 100644 --- a/server/pyproject.toml +++ b/server/pyproject.toml @@ -190,13 +190,13 @@ ignore_missing_imports = true # Ignore any python files not in the parsec module -[[tool.mypy.overrides]] -module = "tests.*" -ignore_errors = true +# [[tool.mypy.overrides]] +# module = "tests.*" +# ignore_errors = true -[[tool.mypy.overrides]] -module = "tests.common.freeze_time" -ignore_errors = false +# [[tool.mypy.overrides]] +# module = "tests.common.freeze_time" +# ignore_errors = false [[tool.mypy.overrides]] module = "misc.*" diff --git a/server/setup.cfg b/server/setup.cfg index 8edb63864b4..ca0f3593af8 100644 --- a/server/setup.cfg +++ b/server/setup.cfg @@ -21,11 +21,10 @@ exclude_lines = [tool:pytest] +# Remove the need for explicit `@pytest.mark.asyncio` decorators +asyncio_mode = auto # Filter warnings filterwarnings = - # Ignore trio deprecation warnings outside of parsec - ignore::trio.TrioDeprecationWarning: - default::trio.TrioDeprecationWarning:tests\.(.*)|parsec\.(.*) # Ignore general deprecation warnings outside of parsec ignore::DeprecationWarning default::DeprecationWarning:tests\.(.*)|parsec\.(.*) diff --git a/server/tests/api/__init__.py b/server/tests/api/__init__.py deleted file mode 100644 index 05e02a3b569..00000000000 --- a/server/tests/api/__init__.py +++ /dev/null @@ -1 +0,0 @@ -# Parsec Cloud (https://parsec.cloud) Copyright (c) BUSL-1.1 2016-present Scille SAS diff --git a/server/tests/api/test_data_certif.py b/server/tests/api/test_data_certif.py deleted file mode 100644 index ee2a664b34d..00000000000 --- a/server/tests/api/test_data_certif.py +++ /dev/null @@ -1,196 +0,0 @@ -# Parsec Cloud (https://parsec.cloud) Copyright (c) BUSL-1.1 2016-present Scille SAS -from __future__ import annotations - -import zlib - -import pytest - -from parsec._parsec import DateTime -from parsec.api.data import DataError, DeviceCertificate, RevokedUserCertificate, UserCertificate -from parsec.api.protocol import UserProfile -from parsec.serde import packb, unpackb - - -def test_unsecure_read_device_certificate_bad_data(): - with pytest.raises(DataError): - DeviceCertificate.unsecure_load(b"dummy") - - -def test_unsecure_read_revoked_user_certificate_bad_data(): - with pytest.raises(DataError): - RevokedUserCertificate.unsecure_load(b"dummy") - - -def test_unsecure_read_user_certificate_bad_data(): - with pytest.raises(DataError): - UserCertificate.unsecure_load(b"dummy") - - -def test_build_user_certificate(alice, bob, mallory): - now = DateTime.now() - certif = UserCertificate( - author=alice.device_id, - timestamp=now, - user_id=bob.user_id, - human_handle=bob.human_handle, - public_key=bob.public_key, - profile=UserProfile.ADMIN, - ).dump_and_sign(alice.signing_key) - assert isinstance(certif, bytes) - - unsecure = UserCertificate.unsecure_load(certif) - assert isinstance(unsecure, UserCertificate) - assert unsecure.user_id == bob.user_id - assert unsecure.public_key == bob.public_key - assert unsecure.timestamp == now - assert unsecure.author == alice.device_id - assert unsecure.profile == UserProfile.ADMIN - - verified = UserCertificate.verify_and_load( - certif, author_verify_key=alice.verify_key, expected_author=alice.device_id - ) - assert verified == unsecure - - with pytest.raises(DataError) as exc: - UserCertificate.verify_and_load( - certif, author_verify_key=alice.verify_key, expected_author=mallory.device_id - ) - assert str(exc.value) == "Invalid author: expected `mallory@dev1`, got `alice@dev1`" - - with pytest.raises(DataError) as exc: - UserCertificate.verify_and_load( - certif, author_verify_key=mallory.verify_key, expected_author=alice.device_id - ) - assert str(exc.value) == "Invalid signature" - - with pytest.raises(DataError) as exc: - UserCertificate.verify_and_load( - certif, - author_verify_key=alice.verify_key, - expected_author=alice.device_id, - expected_user=mallory.user_id, - ) - assert str(exc.value) == "Invalid user ID: expected `mallory`, got `bob`" - - -def test_user_certificate_supports_legacy_is_admin_field(alice, bob): - now = DateTime.now() - certif = UserCertificate( - author=bob.device_id, - timestamp=now, - user_id=alice.user_id, - human_handle=None, - public_key=alice.public_key, - profile=alice.profile, - ) - - # Manually craft a certificate in legacy format - raw_legacy_certif = { - "type": "user_certificate", - "author": bob.device_id.str, - "timestamp": now, - "user_id": alice.user_id.str, - "public_key": alice.public_key.encode(), - "is_admin": True, - } - dumped_legacy_certif = bob.signing_key.sign(zlib.compress(packb(raw_legacy_certif))) - - # Make sure the legacy format can be loaded - legacy_certif = UserCertificate.verify_and_load( - dumped_legacy_certif, - author_verify_key=bob.verify_key, - expected_author=bob.device_id, - expected_user=alice.user_id, - expected_human_handle=None, - ) - assert legacy_certif == certif - - # Manually decode new format to check it is compatible with legacy - dumped_certif = certif.dump_and_sign(bob.signing_key) - raw_certif = unpackb(zlib.decompress(bob.verify_key.verify(dumped_certif))) - assert raw_certif == {**raw_legacy_certif, "profile": alice.profile.str, "human_handle": None} - - -def test_build_device_certificate(alice, bob, mallory): - now = DateTime.now() - certif = DeviceCertificate( - author=alice.device_id, - timestamp=now, - device_id=bob.device_id, - device_label=bob.device_label, - verify_key=bob.verify_key, - ).dump_and_sign(alice.signing_key) - assert isinstance(certif, bytes) - - unsecure = DeviceCertificate.unsecure_load(certif) - assert isinstance(unsecure, DeviceCertificate) - assert unsecure.device_id == bob.device_id - assert unsecure.verify_key == bob.verify_key - assert unsecure.timestamp == now - assert unsecure.author == alice.device_id - - verified = DeviceCertificate.verify_and_load( - certif, author_verify_key=alice.verify_key, expected_author=alice.device_id - ) - assert verified == unsecure - - with pytest.raises(DataError) as exc: - DeviceCertificate.verify_and_load( - certif, author_verify_key=alice.verify_key, expected_author=mallory.device_id - ) - assert str(exc.value) == "Invalid author: expected `mallory@dev1`, got `alice@dev1`" - - with pytest.raises(DataError) as exc: - DeviceCertificate.verify_and_load( - certif, author_verify_key=mallory.verify_key, expected_author=alice.device_id - ) - assert str(exc.value) == "Invalid signature" - - with pytest.raises(DataError) as exc: - DeviceCertificate.verify_and_load( - certif, - author_verify_key=alice.verify_key, - expected_author=alice.device_id, - expected_device=mallory.device_id, - ) - assert str(exc.value) == "Invalid device ID: expected `mallory@dev1`, got `bob@dev1`" - - -def test_build_revoked_user_certificate(alice, bob, mallory): - now = DateTime.now() - certif = RevokedUserCertificate( - author=alice.device_id, timestamp=now, user_id=bob.user_id - ).dump_and_sign(alice.signing_key) - assert isinstance(certif, bytes) - - unsecure = RevokedUserCertificate.unsecure_load(certif) - assert isinstance(unsecure, RevokedUserCertificate) - assert unsecure.user_id == bob.user_id - assert unsecure.timestamp == now - assert unsecure.author == alice.device_id - - verified = RevokedUserCertificate.verify_and_load( - certif, author_verify_key=alice.verify_key, expected_author=alice.device_id - ) - assert verified == unsecure - - with pytest.raises(DataError) as exc: - RevokedUserCertificate.verify_and_load( - certif, author_verify_key=alice.verify_key, expected_author=mallory.device_id - ) - assert str(exc.value) == "Invalid author: expected `mallory@dev1`, got `alice@dev1`" - - with pytest.raises(DataError) as exc: - RevokedUserCertificate.verify_and_load( - certif, author_verify_key=mallory.verify_key, expected_author=alice.device_id - ) - assert str(exc.value) == "Invalid signature" - - with pytest.raises(DataError) as exc: - RevokedUserCertificate.verify_and_load( - certif, - author_verify_key=alice.verify_key, - expected_author=alice.device_id, - expected_user=mallory.user_id, - ) - assert str(exc.value) == "Invalid user ID: expected `mallory`, got `bob`" diff --git a/server/tests/api/test_handshake.py b/server/tests/api/test_handshake.py deleted file mode 100644 index 0cf4437adf7..00000000000 --- a/server/tests/api/test_handshake.py +++ /dev/null @@ -1,444 +0,0 @@ -# Parsec Cloud (https://parsec.cloud) Copyright (c) BUSL-1.1 2016-present Scille SAS -from __future__ import annotations - -from unittest.mock import ANY -from uuid import uuid4 - -import pytest - -from parsec._parsec import ApiVersion, DateTime -from parsec.api.protocol import ( - AuthenticatedClientHandshake, - BaseClientHandshake, - HandshakeBadAdministrationToken, - HandshakeBadIdentity, - HandshakeFailedChallenge, - HandshakeOrganizationExpired, - HandshakeRevokedDevice, - HandshakeRVKMismatch, - HandshakeType, - IncompatibleAPIVersionsError, - InvalidMessageError, - InvitationToken, - InvitationType, - InvitedClientHandshake, - OrganizationID, - ServerHandshake, - packb, - unpackb, -) -from parsec.api.protocol.handshake import answer_serializer -from parsec.utils import BALLPARK_CLIENT_EARLY_OFFSET, BALLPARK_CLIENT_LATE_OFFSET - -API_VERSION = ApiVersion.API_V3_VERSION - - -def test_good_authenticated_handshake(alice): - sh = ServerHandshake() - - ch = AuthenticatedClientHandshake( - alice.organization_id, alice.device_id, alice.signing_key, alice.root_verify_key - ) - assert sh.state == "stalled" - - challenge_req = sh.build_challenge_req() - assert sh.state == "challenge" - - answer_req = ch.process_challenge_req(challenge_req) - - sh.process_answer_req(answer_req) - assert sh.state == "answer" - assert sh.answer_type == HandshakeType.AUTHENTICATED - assert sh.answer_data == { - "answer": ANY, - "client_api_version": API_VERSION, - "organization_id": alice.organization_id, - "device_id": alice.device_id, - "rvk": alice.root_verify_key, - } - result_req = sh.build_result_req(alice.verify_key) - assert sh.state == "result" - - ch.process_result_req(result_req) - assert sh.client_api_version == API_VERSION - - -@pytest.mark.parametrize("invitation_type", (InvitationType.USER, InvitationType.DEVICE)) -def test_good_invited_handshake(coolorg, invitation_type): - organization_id = OrganizationID("Org") - token = InvitationToken.new() - - sh = ServerHandshake() - ch = InvitedClientHandshake( - organization_id=organization_id, invitation_type=invitation_type, token=token - ) - assert sh.state == "stalled" - - challenge_req = sh.build_challenge_req() - assert sh.state == "challenge" - - answer_req = ch.process_challenge_req(challenge_req) - - sh.process_answer_req(answer_req) - assert sh.state == "answer" - assert sh.answer_type == HandshakeType.INVITED - assert sh.answer_data == { - "client_api_version": API_VERSION, - "organization_id": organization_id, - "invitation_type": invitation_type, - "token": token, - } - - result_req = sh.build_result_req() - assert sh.state == "result" - - ch.process_result_req(result_req) - assert sh.client_api_version == API_VERSION - - -# 1) Server build challenge (nothing more to test...) - - -# 2) Client process challenge - - -@pytest.mark.parametrize( - "req", - [ - {}, - {"handshake": "foo", "challenge": b"1234567890", "supported_api_versions": [(3, 2)]}, - {"handshake": "challenge", "challenge": b"1234567890"}, - {"challenge": b"1234567890"}, - {"challenge": b"1234567890", "supported_api_versions": [(3, 2)]}, - {"handshake": "challenge", "challenge": None}, - {"handshake": "challenge", "challenge": None, "supported_api_versions": [(3, 2)]}, - {"handshake": "challenge", "challenge": 42, "supported_api_versions": [(3, 2)]}, - {"handshake": "challenge", "challenge": b"1234567890"}, - {"handshake": "challenge", "challenge": b"1234567890", "supported_api_versions": "invalid"}, - ], -) -def test_process_challenge_req_bad_format(alice, req): - ch = AuthenticatedClientHandshake( - alice.organization_id, alice.device_id, alice.signing_key, alice.root_verify_key - ) - with pytest.raises(InvalidMessageError): - ch.process_challenge_req(packb(req)) - - -# 2-b) Client check API version - - -@pytest.mark.parametrize( - "client_version, backend_version, valid", - [ - ((2, 22), (1, 0), False), - ((2, 22), (1, 111), False), - ((2, 22), (2, 0), True), - ((2, 22), (2, 22), True), - ((2, 22), (2, 222), True), - ((2, 22), (3, 0), False), - ((2, 22), (3, 33), False), - ((2, 22), (3, 333), False), - ], - ids=str, -) -def test_process_challenge_req_good_api_version( - alice, monkeypatch, client_version, backend_version, valid -): - # Cast parameters - client_version = ApiVersion(*client_version) - backend_version = ApiVersion(*backend_version) - - ch = AuthenticatedClientHandshake( - alice.organization_id, alice.device_id, alice.signing_key, alice.root_verify_key - ) - req = { - "handshake": "challenge", - "challenge": b"1234567890", - "supported_api_versions": [(backend_version.version, backend_version.revision)], - "backend_timestamp": DateTime.now(), - "ballpark_client_early_offset": BALLPARK_CLIENT_EARLY_OFFSET, - "ballpark_client_late_offset": BALLPARK_CLIENT_LATE_OFFSET, - } - monkeypatch.setattr(ch, "SUPPORTED_API_VERSIONS", [client_version]) - - if not valid: - # Invalid versioning - with pytest.raises(IncompatibleAPIVersionsError) as context: - ch.process_challenge_req(packb(req)) - assert context.value.client_versions == [client_version] - assert context.value.backend_versions == [backend_version] - - else: - # Valid versioning - ch.process_challenge_req(packb(req)) - assert ch.challenge_data["supported_api_versions"] == [backend_version] - assert ch.backend_api_version == backend_version - assert ch.client_api_version == client_version - - -@pytest.mark.parametrize( - "client_versions, backend_versions, expected_client_version, expected_backend_version", - [ - ([(2, 22), (3, 33)], [(0, 000), (1, 111)], None, None), - ([(2, 22), (3, 33)], [(1, 111), (2, 222)], (2, 22), (2, 222)), - ([(2, 22), (3, 33)], [(2, 222), (3, 333)], (3, 33), (3, 333)), - ([(2, 22), (3, 33)], [(3, 333), (4, 444)], (3, 33), (3, 333)), - ([(2, 22), (3, 33)], [(4, 444), (5, 555)], None, None), - ([(2, 22), (4, 44)], [(1, 111), (2, 222)], (2, 22), (2, 222)), - ([(2, 22), (4, 44)], [(1, 111), (3, 333)], None, None), - ([(2, 22), (4, 44)], [(2, 222), (3, 333)], (2, 22), (2, 222)), - ([(2, 22), (4, 44)], [(2, 222), (4, 444)], (4, 44), (4, 444)), - ([(2, 22), (4, 44)], [(3, 333), (4, 444)], (4, 44), (4, 444)), - ([(2, 22), (4, 44)], [(3, 333), (5, 555)], None, None), - ([(2, 22), (4, 44)], [(4, 444), (5, 555)], (4, 44), (4, 444)), - ([(2, 22), (4, 44)], [(4, 444), (6, 666)], (4, 44), (4, 444)), - ([(2, 22), (4, 44)], [(5, 555), (6, 666)], None, None), - ], - ids=str, -) -def test_process_challenge_req_good_multiple_api_version( - alice, - monkeypatch, - client_versions, - backend_versions, - expected_client_version, - expected_backend_version, -): - # Cast parameters - client_versions = [ApiVersion(*args) for args in client_versions] - backend_versions = [ApiVersion(*args) for args in backend_versions] - if expected_client_version: - expected_client_version = ApiVersion(*expected_client_version) - if expected_backend_version: - expected_backend_version = ApiVersion(*expected_backend_version) - - ch = AuthenticatedClientHandshake( - alice.organization_id, alice.device_id, alice.signing_key, alice.root_verify_key - ) - req = { - "handshake": "challenge", - "challenge": b"1234567890", - "supported_api_versions": [(x.version, x.revision) for x in backend_versions], - "backend_timestamp": DateTime.now(), - "ballpark_client_early_offset": BALLPARK_CLIENT_EARLY_OFFSET, - "ballpark_client_late_offset": BALLPARK_CLIENT_LATE_OFFSET, - } - monkeypatch.setattr(ch, "SUPPORTED_API_VERSIONS", client_versions) - - if expected_client_version is None: - # Invalid versioning - with pytest.raises(IncompatibleAPIVersionsError) as context: - ch.process_challenge_req(packb(req)) - assert context.value.client_versions == client_versions - assert context.value.backend_versions == backend_versions - - else: - # Valid versioning - ch.process_challenge_req(packb(req)) - assert ch.challenge_data["supported_api_versions"] == list(backend_versions) - assert ch.backend_api_version == expected_backend_version - assert ch.client_api_version == expected_client_version - - -# 3) Server process answer - - -@pytest.mark.parametrize( - "req", - [ - {}, - {"handshake": "answer", "type": "dummy"}, # Invalid type - # Authenticated answer - { - "handshake": "answer", - "type": HandshakeType.AUTHENTICATED.value, - "organization_id": "", - "device_id": "", - # Missing rvk - "answer": b"good answer", - }, - { - "handshake": "answer", - "type": HandshakeType.AUTHENTICATED.value, - "organization_id": "", - # Missing device_id - "rvk": "", - "answer": b"good answer", - }, - { - "handshake": "answer", - "type": HandshakeType.AUTHENTICATED.value, - "organization_id": "", - "device_id": "", - "rvk": "", - # Missing answer - }, - { - "handshake": "answer", - "type": HandshakeType.AUTHENTICATED.value, - "organization_id": "", - "device_id": "", - "rvk": "", - "answer": 42, # Bad type - }, - { - "handshake": "answer", - "type": HandshakeType.AUTHENTICATED.value, - "organization_id": "", - "device_id": "dummy", # Invalid DeviceID - "rvk": "", - "answer": b"good answer", - }, - { - "handshake": "answer", - "type": HandshakeType.AUTHENTICATED.value, - "organization_id": "", - "device_id": "", - "rvk": b"dummy", # Invalid VerifyKey - "answer": b"good answer", - }, - # Invited answer - { - "handshake": "answer", - "type": HandshakeType.INVITED.value, - "invitation_type": InvitationType.USER.str, - "organization_id": "d@mmy", # Invalid OrganizationID - "token": "", - }, - { - "handshake": "answer", - "type": HandshakeType.INVITED.value, - "invitation_type": "dummy", # Invalid invitation_type - "organization_id": "", - "token": "", - }, - { - "handshake": "answer", - "type": HandshakeType.INVITED.value, - "invitation_type": InvitationType.USER.str, - "organization_id": "", - "token": "abc123", # Invalid token type - }, - ], -) -def test_process_answer_req_bad_format(req, alice): - for key, good_value in [ - ("organization_id", alice.organization_id.str), - ("device_id", alice.device_id.str), - ("rvk", alice.root_verify_key.encode()), - ("token", uuid4()), - ]: - if req.get(key) == "": - req[key] = good_value - req["client_api_version"] = (API_VERSION.version, API_VERSION.revision) - sh = ServerHandshake() - sh.build_challenge_req() - with pytest.raises(InvalidMessageError): - sh.process_answer_req(packb(req)) - - -# 4) Server build result - - -def test_build_result_req_bad_key(alice, bob): - sh = ServerHandshake() - sh.build_challenge_req() - answer = { - "handshake": "answer", - "type": HandshakeType.AUTHENTICATED.value, - "client_api_version": (API_VERSION.version, API_VERSION.revision), - "organization_id": alice.organization_id.str, - "device_id": alice.device_id.str, - "rvk": alice.root_verify_key.encode(), - "answer": alice.signing_key.sign(answer_serializer.dumps({"answer": sh.challenge})), - } - sh.process_answer_req(packb(answer)) - with pytest.raises(HandshakeFailedChallenge): - sh.build_result_req(bob.verify_key) - - -def test_build_result_req_bad_challenge(alice): - sh = ServerHandshake() - sh.build_challenge_req() - answer = { - "handshake": "answer", - "type": HandshakeType.AUTHENTICATED.value, - "client_api_version": (API_VERSION.version, API_VERSION.revision), - "organization_id": alice.organization_id.str, - "device_id": alice.device_id.str, - "rvk": alice.root_verify_key.encode(), - "answer": alice.signing_key.sign( - answer_serializer.dumps({"answer": sh.challenge + b"-dummy"}) - ), - } - sh.process_answer_req(packb(answer)) - with pytest.raises(HandshakeFailedChallenge): - sh.build_result_req(alice.verify_key) - - -@pytest.mark.parametrize( - "method,expected_result", - [ - ("build_bad_protocol_result_req", "bad_protocol"), - ("build_bad_identity_result_req", "bad_identity"), - ("build_organization_expired_result_req", "organization_expired"), - ("build_rvk_mismatch_result_req", "rvk_mismatch"), - ("build_revoked_device_result_req", "revoked_device"), - ("build_bad_administration_token_result_req", "bad_admin_token"), - ], -) -def test_build_bad_outcomes(alice, method, expected_result): - sh = ServerHandshake() - sh.build_challenge_req() - answer = { - "handshake": "answer", - "type": HandshakeType.AUTHENTICATED.value, - "client_api_version": (API_VERSION.version, API_VERSION.revision), - "organization_id": alice.organization_id.str, - "device_id": alice.device_id.str, - "rvk": alice.root_verify_key.encode(), - "answer": alice.signing_key.sign(answer_serializer.dumps({"answer": sh.challenge})), - } - sh.process_answer_req(packb(answer)) - req = getattr(sh, method)() - assert unpackb(req) == {"handshake": "result", "result": expected_result, "help": ANY} - - -# 5) Client process result - - -@pytest.mark.parametrize( - "req", - [ - {}, - {"handshake": "foo", "result": "ok"}, - {"result": "ok"}, - {"handshake": "result", "result": "error"}, - ], -) -def test_process_result_req_bad_format(req): - ch = BaseClientHandshake() - with pytest.raises(InvalidMessageError): - ch.process_result_req(packb(req)) - - -@pytest.mark.parametrize( - "result,exc_cls", - [ - ("bad_identity", HandshakeBadIdentity), - ("organization_expired", HandshakeOrganizationExpired), - ("rvk_mismatch", HandshakeRVKMismatch), - ("revoked_device", HandshakeRevokedDevice), - ("bad_admin_token", HandshakeBadAdministrationToken), - ("dummy", InvalidMessageError), - ], -) -def test_process_result_req_bad_outcome(result, exc_cls): - ch = BaseClientHandshake() - with pytest.raises(exc_cls): - ch.process_result_req(packb({"handshake": "result", "result": result})) - - -# TODO: test with revoked device -# TODO: test with user with all devices revoked diff --git a/server/tests/api/test_transport.py b/server/tests/api/test_transport.py deleted file mode 100644 index 4fce557626b..00000000000 --- a/server/tests/api/test_transport.py +++ /dev/null @@ -1,122 +0,0 @@ -# Parsec Cloud (https://parsec.cloud) Copyright (c) BUSL-1.1 2016-present Scille SAS -from __future__ import annotations - -from functools import partial - -import pytest -import trio - -from parsec.api.protocol.base import MsgpackSerializer -from parsec.api.transport import Transport, TransportClosedByPeer -from parsec.serde import BaseSchema, fields - - -async def serve_tcp_testbed(*conns): - host = "127.0.0.1" - send_channel, receive_channel = trio.open_memory_channel(0) - - async def _serve_client(stream): - server_fn = await receive_channel.receive() - transport = await Transport.init_for_server(stream) - await server_fn(transport) - - async def _store_handlers(*, task_status=trio.TASK_STATUS_IGNORED): - async with trio.open_service_nursery() as handler_nursery: - task_status.started(handler_nursery) - await trio.sleep_forever() - - async with trio.open_service_nursery() as nursery: - handler_nursery = await nursery.start(_store_handlers) - listeners = await nursery.start( - partial( - trio.serve_tcp, - _serve_client, - port=0, - host="127.0.0.1", - handler_nursery=handler_nursery, - ) - ) - _, port, *_ = listeners[0].socket.getsockname() - assert not handler_nursery.child_tasks - - for client_fn, server_fn in conns: - stream = await trio.open_tcp_stream(host, port) - await send_channel.send(server_fn) - - transport = await Transport.init_for_client(stream, host) - await client_fn(transport) - - await trio.testing.wait_all_tasks_blocked() - # No pending connections should remain - assert not handler_nursery.child_tasks - - await send_channel.aclose() - nursery.cancel_scope.cancel() - - -@pytest.mark.trio -@pytest.mark.parametrize("closing_end", ["client", "server"]) -async def test_no_transport_leaks_one_end_close(closing_end): - async def closing_end_fn(transport): - with pytest.raises(TransportClosedByPeer): - await transport.recv() - await transport.aclose() - - async def listening_end_fn(transport): - await transport.aclose() - - if closing_end == "server": - await serve_tcp_testbed((listening_end_fn, closing_end_fn)) - else: - await serve_tcp_testbed((closing_end_fn, listening_end_fn)) - - -# TODO: basically a benchmark to showcase the performances issues with -# marshmallow/json serialization -@pytest.mark.slow -@pytest.mark.trio -async def test_big_buffer_bench(backend_addr): - server_stream, client_stream = trio.testing.memory_stream_pair() - - client_transport = None - server_transport = None - - async def _boot_server(): - nonlocal server_transport - server_transport = await Transport.init_for_server(server_stream) - - async def _boot_client(): - nonlocal client_transport - client_transport = await Transport.init_for_client( - client_stream, host=backend_addr.hostname - ) - - async with trio.open_service_nursery() as nursery: - nursery.start_soon(_boot_client) - nursery.start_soon(_boot_server) - - class Schema(BaseSchema): - data = fields.Bytes() - - schema = MsgpackSerializer(Schema) - - # Base64 encoding of the bytes make the payload bigger once serialized - # roughly_max = int(TCPTransport.MAX_MSG_SIZE * 2 / 3) - roughly_max = int(Transport.RECEIVE_BYTES * 2 / 3) - payload = {"data": b"x" * roughly_max} - - for _ in range(10): - await client_transport.send(schema.dumps(payload)) - raw = await server_transport.recv() - server_payload = schema.loads(raw) - assert server_payload == payload - del raw - - await server_transport.send(schema.dumps(server_payload)) - raw = await client_transport.recv() - roundtrip_payload = schema.loads(raw) - assert roundtrip_payload == payload - del raw - - -# TODO: test websocket can work with message sent across mutiple TCP frames diff --git a/server/tests/api/test_types.py b/server/tests/api/test_types.py deleted file mode 100644 index 4612a818ac3..00000000000 --- a/server/tests/api/test_types.py +++ /dev/null @@ -1,274 +0,0 @@ -# Parsec Cloud (https://parsec.cloud) Copyright (c) BUSL-1.1 2016-present Scille SAS -from __future__ import annotations - -import zlib -from unicodedata import normalize - -import pytest - -from parsec._parsec import ( - DataError, - DeviceID, - DeviceName, - EntryName, - EntryNameError, - HumanHandle, - OrganizationID, - SecretKey, - UserID, -) -from parsec._parsec import ( - FileManifest as RemoteFileManifest, -) -from parsec._parsec import ( - FolderManifest as RemoteFolderManifest, -) -from parsec._parsec import ( - UserManifest as RemoteUserManifest, -) -from parsec._parsec import ( - WorkspaceManifest as RemoteWorkspaceManifest, -) -from parsec.serde import packb -from tests.common import LocalDevice - - -@pytest.mark.parametrize("cls", (UserID, DeviceName, OrganizationID)) -@pytest.mark.parametrize( - "data", - ( - "!x", # Invalid character - " x", # Invalid character - "x" * 33, # Too long - # Sinogramme encoded on 3 bytes with utf8, so those 11 characters - # form a 33 bytes long utf8 string ! - "飞" * 11, - "😀", # Not a unicode word - "", - ), -) -def test_max_bytes_size(cls, data): - with pytest.raises(ValueError): - cls(data) - - -@pytest.mark.parametrize("cls", (UserID, DeviceName, OrganizationID)) -def test_normalization(cls): - nfc_str = normalize("NFC", "àæßšūÿź") # cspell: disable-line - nfd_str = normalize("NFD", nfc_str) - - assert nfc_str != nfd_str - assert cls(nfd_str).str == nfc_str - assert cls(nfc_str).str == nfc_str - assert cls(nfc_str + nfd_str).str == nfc_str + nfc_str - - -@pytest.mark.parametrize("cls", (UserID, DeviceName, OrganizationID)) -@pytest.mark.parametrize( - "data", ("x", "x" * 32, "飞" * 10 + "xx", "X1-_é飞") # 32 bytes long utf8 string # Mix-and-match -) -def test_good_pattern(cls, data): - cls(data) - - -@pytest.mark.parametrize( - "data", - ( - "!x@x", # Invalid character - "x@ ", # Invalid character - "x" * 66, # Too long - # Sinogramme encoded on 3 bytes with utf8, so those 22 characters - # form a 66 bytes long utf8 string ! - "飞" * 22, - "😀@x", # Not a unicode word - "x", # Missing @ separator - "@x", - "x@", - "x" * 62 + "@x", # Respect overall length but not UserID length - "x@" + "x" * 62, # Respect overall length but not DeviceName length - "", - ), -) -def test_max_bytes_size_device_id(data): - with pytest.raises(ValueError): - DeviceID(data) - - -@pytest.mark.parametrize( - "data", - ( - "x@x", - "x" * 32 + "@" + "x" * 32, - "飞" * 10 + "xx@xx" + "飞" * 10, # 65 bytes long utf8 string - "X1-_é飞@X1-_é飞", # Mix-and-match - ), -) -def test_good_pattern_device_id(data): - DeviceID(data) - - -def test_human_handle_compare(): - a = HumanHandle(email="alice@example.com", label="Alice") - a2 = HumanHandle(email="alice@example.com", label="Whatever") - b = HumanHandle(email="bob@example.com", label="Bob") - assert a == a2 - assert a != b - assert b == b - - -@pytest.mark.parametrize( - "email,label", - ( - ("alice@example.com", "Alice"), - ("a@x", "A"), # Smallest size - (f"{'a' * 64}@{'x' * 185}.com", "x" * 254), # Max sizes - (f"{'飞' * 21}@{'飞' * 62}.com", f"{'飞' * 84}xx"), # Unicode & max size - ("john.doe@example.com", "J.D."), - ), -) -def test_valid_human_handle(email, label): - HumanHandle(email, label) - - -@pytest.mark.parametrize( - "email,label", - ( - ("alice@example.com", "x" * 255), - (f"{'@example.com':a>255}", "Alice"), - ("alice@example.com", "飞" * 85), # 255 bytes long utf8 label - (f"{'飞' * 21}@{'飞' * 63}.x", "Alice"), # 255 bytes long utf8 email - ("alice@example.com", ""), # Empty label - ("", "Alice"), # Empty email - ("", "Alice "), # Empty email and misleading label - ("Alice ", ""), # Empty label and misleading label - ("Alice <@example.com>", "Alice"), # Missing local part in email - ), -) -def test_invalid_human_handle(email, label): - with pytest.raises(ValueError): - HumanHandle(email, label) - - -def test_human_handle_normalization(): - nfc_label = normalize("NFC", "àæßšūÿź") # cspell: disable-line - nfd_label = normalize("NFD", nfc_label) - nfc_email = normalize("NFC", "àæßš@ūÿ.ź") # cspell: disable-line - nfd_email = normalize("NFD", nfc_email) - assert nfc_label != nfd_label - assert nfc_email != nfd_email - - hh = HumanHandle(nfd_email, nfd_label) - assert hh.email == nfc_email - assert hh.label == nfc_label - - hh = HumanHandle(nfc_email, nfc_label) - assert hh.email == nfc_email - assert hh.label == nfc_label - - -@pytest.mark.parametrize( - "data", - ( - "foo", - "foo.txt", - "x" * 255, # Max size - "飞" * 85, # Unicode & max size - "X1-_é飞", - "🌍☄️==🦕🦖💀", # Probably a bad name for a real folder... - ".a", # Dot and dot-dot are allowed if they are not alone - "..a", - "a..", - "a.", - ), -) -def test_valid_entry_name(data): - EntryName(data) - - -@pytest.mark.parametrize("data", ("x" * 256, "飞" * 85 + "x")) -def test_entry_name_too_long(data): - with pytest.raises(EntryNameError): - EntryName(data) - - -@pytest.mark.parametrize( - "data", - ( - ".", # Not allowed - "..", # Not allowed - "/x", # Slash not allowed - "x/x", - "x/", - "/", - "\x00x", # Null-byte not allowed - "x\x00x", - "x\x00", - "\x00", - ), -) -def test_invalid_entry_name(data): - with pytest.raises(ValueError): - EntryName(data) - - -def test_entry_name_normalization(): - nfc_str = normalize( - "NFC", "àáâäæãåāçćčèéêëēėęîïíīįìłñńôöòóœøōõßśšûüùúūÿžźż" # cspell: disable-line - ) - nfd_str = normalize("NFD", nfc_str) - - assert nfc_str != nfd_str - assert EntryName(nfd_str).str == nfc_str - assert EntryName(nfc_str).str == nfc_str - assert EntryName(nfc_str + nfd_str).str == nfc_str + nfc_str - - -def test_remote_manifests_load_invalid_data(alice: LocalDevice): - key = SecretKey.generate() - valid_zip_msgpack_but_bad_fields = zlib.compress(packb({"foo": 42})) - valid_zip_bud_bad_msgpack = zlib.compress(b"dummy") - invalid_zip = b"\x42" * 10 - - for cls in ( - RemoteFileManifest, - RemoteFolderManifest, - RemoteWorkspaceManifest, - RemoteUserManifest, - ): - print(f"Testing class {cls.__name__}") - with pytest.raises(DataError): - cls.decrypt_verify_and_load( - b"", - key=key, - author_verify_key=alice.verify_key, - expected_author=alice.device_id, - expected_timestamp=alice.timestamp(), - ) - - with pytest.raises(DataError): - cls.decrypt_verify_and_load( - invalid_zip, - key=key, - author_verify_key=alice.verify_key, - expected_author=alice.device_id, - expected_timestamp=alice.timestamp(), - ) - - with pytest.raises(DataError): - cls.decrypt_verify_and_load( - valid_zip_bud_bad_msgpack, - key=key, - author_verify_key=alice.verify_key, - expected_author=alice.device_id, - expected_timestamp=alice.timestamp(), - ) - - # Valid to deserialize, invalid fields - with pytest.raises(DataError): - cls.decrypt_verify_and_load( - valid_zip_msgpack_but_bad_fields, - key=key, - author_verify_key=alice.verify_key, - expected_author=alice.device_id, - expected_timestamp=alice.timestamp(), - ) diff --git a/server/tests/backend/__init__.py b/server/tests/backend/__init__.py deleted file mode 100644 index 05e02a3b569..00000000000 --- a/server/tests/backend/__init__.py +++ /dev/null @@ -1 +0,0 @@ -# Parsec Cloud (https://parsec.cloud) Copyright (c) BUSL-1.1 2016-present Scille SAS diff --git a/server/tests/backend/common.py b/server/tests/backend/common.py deleted file mode 100644 index d4367751b93..00000000000 --- a/server/tests/backend/common.py +++ /dev/null @@ -1,429 +0,0 @@ -# Parsec Cloud (https://parsec.cloud) Copyright (c) BUSL-1.1 2016-present Scille SAS -from __future__ import annotations - -from contextlib import asynccontextmanager - -import trio - -from parsec._parsec import DateTime, InvitationType -from parsec.api.protocol import anonymous_cmds, authenticated_cmds, invited_cmds -from tests.common import BaseRpcApiClient, real_clock_timeout - - -def craft_http_request( - target: str, method: str, headers: dict, body: bytes | None, protocol: str = "1.0" -) -> bytes: - if body is None: - body = b"" - else: - assert isinstance(body, bytes) - headers = {**headers, "content-length": len(body)} - - # Use HTTP 1.0 by default given 1.1 requires Host header - req = f"{method} {target} HTTP/{protocol}\r\n" - req += "\r\n".join(f"{key}: {value}" for key, value in headers.items()) - while not req.endswith("\r\n\r\n"): - req += "\r\n" - - return req.encode("ascii") + body - - -def parse_http_response(raw: bytes): - head, _ = raw.split(b"\r\n\r\n", 1) # Ignore the body part - status, *headers = head.split(b"\r\n") - protocol, status_code, status_label = status.split(b" ", 2) - assert protocol == b"HTTP/1.1" - cooked_status = (int(status_code.decode("ascii")), status_label.decode("ascii")) - cooked_headers = {} - for header in headers: - key, value = header.split(b": ") - cooked_headers[key.decode("ascii").lower()] = value.decode("ascii") - return cooked_status, cooked_headers - - -async def do_http_request( - stream: trio.abc.Stream, - target: str | None = None, - method: str = "GET", - req: bytes | None = None, - headers: dict | None = None, - body: bytes | None = None, -): - if req is None: - assert target is not None - req = craft_http_request(target, method, headers or {}, body) - else: - assert target is None - assert headers is None - assert body is None - await stream.send_all(req) - - # In theory there is no guarantee `stream.receive_some()` outputs - # an entire HTTP request (it typically depends on the TCP stack and - # the network). - # However given we communicate only on the localhost loop, we can - # cross our fingers really hard and expect the http header part will come - # as a single trame. - rep = b"" - while b"\r\n\r\n" not in rep: - part = await stream.receive_some() - if not part: - # Connection closed by peer - raise trio.BrokenResourceError - rep += part - status, rep_headers = parse_http_response(rep) - rep_content = rep.split(b"\r\n\r\n", 1)[1] - content_size = int(rep_headers.get("content-length", "0")) - if content_size: - while len(rep_content) < content_size: - rep_content += await stream.receive_some() - # No need to check for another request beeing put after the - # body in the buffer given we don't use keep alive - assert len(rep_content) == content_size - else: - # In case the current request is a connection upgrade to websocket, the - # server is allowed to start sending websocket messages right away that - # may end up as part of the TCP trame that contained the response - if b"Connection: Upgrade" not in rep: - assert rep_content == b"" - - return status, rep_headers, rep_content - - -class CmdSock: - def __init__(self, cmd_module, parse_args=None, check_rep_by_default=False): - self.cmd_module = cmd_module - self.check_rep_by_default = check_rep_by_default - self.parse_args = parse_args - - async def __call__(self, ws_or_rpc, *args, check_rep=None, **kwargs): - if self.parse_args: - kwargs = self.parse_args(*args, **kwargs) - req = self.cmd_module.Req(**kwargs) - else: - req = self.cmd_module.Req(*args, **kwargs) - - if isinstance(ws_or_rpc, BaseRpcApiClient): - raw_rep = await ws_or_rpc.send( - req=req.dump(), - ) - return self.cmd_module.Rep.load(raw_rep) - - else: - raw_req = req.dump() - await ws_or_rpc.send(raw_req) - raw_rep = await ws_or_rpc.receive() - rep = self.cmd_module.Rep.load(raw_rep) - check_rep = check_rep if check_rep is not None else self.check_rep_by_default - if check_rep: - assert type(rep).__name__ == "RepOk" - return rep - - class AsyncCallRepBox: - def __init__(self, do_recv): - self._do_recv = do_recv - self.rep_done = False - self._rep = None - - @property - def rep(self): - assert self.rep_done - return self._rep - - async def do_recv(self): - assert not self.rep_done - self.rep_done = True - self._rep = await self._do_recv() - - @asynccontextmanager - async def async_call(self, sock, *args, check_rep=None, **kwargs): - if self.parse_args: - kwargs = self.parse_args(*args, **kwargs) - req = self.cmd_module.Req(**kwargs) - else: - req = self.cmd_module.Req(*args, **kwargs) - - raw_req = req.dump() - await sock.send(raw_req) - - check_rep = check_rep if check_rep is not None else self.check_rep_by_default - - async def _do_rep(): - raw_rep = await sock.receive() - rep = self.cmd_module.Rep.load(raw_rep) - if check_rep: - assert type(rep).__name__ == "RepOk" - return rep - - box = self.AsyncCallRepBox(do_recv=_do_rep) - yield box - - if not box.rep_done: - async with real_clock_timeout(): - await box.do_recv() - - -### Ping ### - - -authenticated_ping = CmdSock( - authenticated_cmds.latest.ping, - parse_args=lambda ping="": {"ping": ping}, - check_rep_by_default=True, -) - -invited_ping = CmdSock( - invited_cmds.latest.ping, - check_rep_by_default=True, -) - - -### Organization ### - - -organization_config = CmdSock( - authenticated_cmds.latest.organization_config, check_rep_by_default=True -) - - -organization_stats = CmdSock( - authenticated_cmds.latest.organization_stats, check_rep_by_default=True -) - - -organization_bootstrap = CmdSock( - anonymous_cmds.latest.organization_bootstrap, - check_rep_by_default=True, -) - - -### Block ### - - -block_create = CmdSock( - authenticated_cmds.latest.block_create, - check_rep_by_default=True, -) -block_read = CmdSock( - authenticated_cmds.latest.block_read, parse_args=lambda block_id: {"block_id": block_id} -) - - -### Realm ### - - -realm_create = CmdSock( - authenticated_cmds.latest.realm_create, -) -realm_status = CmdSock( - authenticated_cmds.latest.realm_status, -) -realm_stats = CmdSock( - authenticated_cmds.latest.realm_stats, parse_args=lambda realm_id: {"realm_id": realm_id} -) -realm_update_roles = CmdSock( - authenticated_cmds.latest.realm_update_roles, - parse_args=lambda role_certificate, recipient_message=None: { - "role_certificate": role_certificate, - "recipient_message": recipient_message, - }, - check_rep_by_default=True, -) -realm_start_reencryption_maintenance = CmdSock( - authenticated_cmds.latest.realm_start_reencryption_maintenance, - check_rep_by_default=True, -) -realm_finish_reencryption_maintenance = CmdSock( - authenticated_cmds.latest.realm_finish_reencryption_maintenance, - check_rep_by_default=True, -) - - -### Vlob ### - - -vlob_create = CmdSock( - authenticated_cmds.latest.vlob_create, - parse_args=lambda realm_id, vlob_id, blob, timestamp=None, encryption_revision=1, sequester_blob=None: { - "realm_id": realm_id, - "vlob_id": vlob_id, - "blob": blob, - "timestamp": timestamp or DateTime.now(), - "encryption_revision": encryption_revision, - "sequester_blob": sequester_blob, - }, - check_rep_by_default=True, -) -vlob_read = CmdSock( - authenticated_cmds.latest.vlob_read, - parse_args=lambda vlob_id, version=None, timestamp=None, encryption_revision=1: { - "vlob_id": vlob_id, - "version": version, - "timestamp": timestamp, - "encryption_revision": encryption_revision, - }, -) -vlob_update = CmdSock( - authenticated_cmds.latest.vlob_update, - parse_args=lambda vlob_id, version, blob, timestamp=None, encryption_revision=1, sequester_blob=None: { - "vlob_id": vlob_id, - "version": version, - "blob": blob, - "encryption_revision": encryption_revision, - "timestamp": timestamp or DateTime.now(), - "sequester_blob": sequester_blob, - }, - check_rep_by_default=True, -) -vlob_list_versions = CmdSock( - authenticated_cmds.latest.vlob_list_versions, -) -vlob_poll_changes = CmdSock( - authenticated_cmds.latest.vlob_poll_changes, -) -vlob_maintenance_get_reencryption_batch = CmdSock( - authenticated_cmds.latest.vlob_maintenance_get_reencryption_batch, - parse_args=lambda realm_id, encryption_revision, size=100: { - "realm_id": realm_id, - "encryption_revision": encryption_revision, - "size": size, - }, -) -vlob_maintenance_save_reencryption_batch = CmdSock( - authenticated_cmds.latest.vlob_maintenance_save_reencryption_batch, - check_rep_by_default=True, -) - - -### Events ### - - -_events_listen = CmdSock(authenticated_cmds.latest.events_listen) - - -@asynccontextmanager -async def events_listen(sock): - async with _events_listen.async_call(sock) as box: - yield box - - -# TODO: kept only to avoid import error, tests using it should migrate to regular `events_listen` -async def apiv2v3_events_listen_nowait(): - raise NotImplementedError - - -async def apiv2v3_events_listen_wait(): - raise NotImplementedError - - -async def apiv2v3_events_subscribe(): - raise NotImplementedError - - -@asynccontextmanager -async def apiv2v3_events_listen(sock): - raise NotImplementedError - - -### Message ### - - -message_get = CmdSock( - authenticated_cmds.latest.message_get, parse_args=lambda offset=0: {"offset": offset} -) - - -### User ### - - -user_create = CmdSock( - authenticated_cmds.latest.user_create, - parse_args=lambda user_certificate, device_certificate, redacted_user_certificate, redacted_device_certificate: { - k: v - for k, v in { - "user_certificate": user_certificate, - "device_certificate": device_certificate, - "redacted_user_certificate": redacted_user_certificate, - "redacted_device_certificate": redacted_device_certificate, - }.items() - if v is not None - }, -) -user_revoke = CmdSock( - authenticated_cmds.latest.user_revoke, -) -device_create = CmdSock( - authenticated_cmds.latest.device_create, -) - - -### Invite ### - - -invite_new = CmdSock( - authenticated_cmds.latest.invite_new, - parse_args=lambda type, send_email=False, claimer_email=None: { - "unit": ( - authenticated_cmds.latest.invite_new.UserOrDeviceUser( - claimer_email=claimer_email, send_email=send_email - ) - if type == InvitationType.USER - else authenticated_cmds.latest.invite_new.UserOrDeviceDevice(send_email) - ) - }, -) -invite_list = CmdSock(authenticated_cmds.latest.invite_list) -invite_delete = CmdSock( - authenticated_cmds.latest.invite_delete, -) -invite_info = CmdSock(invited_cmds.latest.invite_info) -invite_1_claimer_wait_peer = CmdSock( - invited_cmds.latest.invite_1_claimer_wait_peer, -) -invite_1_greeter_wait_peer = CmdSock( - authenticated_cmds.latest.invite_1_greeter_wait_peer, -) -invite_2a_claimer_send_hashed_nonce = CmdSock( - invited_cmds.latest.invite_2a_claimer_send_hashed_nonce, -) -invite_2a_greeter_get_hashed_nonce = CmdSock( - authenticated_cmds.latest.invite_2a_greeter_get_hashed_nonce, -) -invite_2b_greeter_send_nonce = CmdSock( - authenticated_cmds.latest.invite_2b_greeter_send_nonce, -) -invite_2b_claimer_send_nonce = CmdSock( - invited_cmds.latest.invite_2b_claimer_send_nonce, -) -invite_3a_greeter_wait_peer_trust = CmdSock( - authenticated_cmds.latest.invite_3a_greeter_wait_peer_trust, -) -invite_3a_claimer_signify_trust = CmdSock(invited_cmds.latest.invite_3a_claimer_signify_trust) -invite_3b_claimer_wait_peer_trust = CmdSock(invited_cmds.latest.invite_3b_claimer_wait_peer_trust) -invite_3b_greeter_signify_trust = CmdSock( - authenticated_cmds.latest.invite_3b_greeter_signify_trust, -) -invite_4_greeter_communicate = CmdSock( - authenticated_cmds.latest.invite_4_greeter_communicate, -) -invite_4_claimer_communicate = CmdSock( - invited_cmds.latest.invite_4_claimer_communicate, -) - - -### PKI enrollment ### - - -pki_enrollment_submit = CmdSock( - anonymous_cmds.latest.pki_enrollment_submit, -) -pki_enrollment_info = CmdSock( - anonymous_cmds.latest.pki_enrollment_info, -) -pki_enrollment_list = CmdSock(authenticated_cmds.latest.pki_enrollment_list) -pki_enrollment_reject = CmdSock( - authenticated_cmds.latest.pki_enrollment_reject, -) -pki_enrollment_accept = CmdSock( - authenticated_cmds.latest.pki_enrollment_accept, -) diff --git a/server/tests/backend/conftest.py b/server/tests/backend/conftest.py deleted file mode 100644 index b3b048e6c87..00000000000 --- a/server/tests/backend/conftest.py +++ /dev/null @@ -1,222 +0,0 @@ -# Parsec Cloud (https://parsec.cloud) Copyright (c) BUSL-1.1 2016-present Scille SAS -from __future__ import annotations - -from contextlib import asynccontextmanager - -import pytest - -from parsec._parsec import BackendEventRealmRolesUpdated, DateTime -from parsec.api.data import RealmRoleCertificate -from parsec.api.protocol import ( - AuthenticatedClientHandshake, - InvitationToken, - InvitationType, - InvitedClientHandshake, - OrganizationID, - RealmRole, - VlobID, -) -from parsec.backend.realm import RealmGrantedRole -from tests.common import LocalDevice - - -@pytest.fixture -def backend_invited_ws_factory(): - @asynccontextmanager - async def _backend_invited_ws_factory( - backend_asgi_app, - organization_id: OrganizationID, - invitation_type: InvitationType, - token: InvitationToken, - ): - client = backend_asgi_app.test_client() - async with client.websocket("/ws") as ws: - ch = InvitedClientHandshake( - organization_id=organization_id, invitation_type=invitation_type, token=token - ) - challenge_req = await ws.receive() - answer_req = ch.process_challenge_req(challenge_req) - await ws.send(answer_req) - result_req = await ws.receive() - ch.process_result_req(result_req) - - yield ws - - return _backend_invited_ws_factory - - -@pytest.fixture -def backend_authenticated_ws_factory(): - # APIv2's invited handshake is not compatible with this - # fixture because it requires purpose information (invitation_type/token) - @asynccontextmanager - async def _backend_authenticated_ws_factory(backend_asgi_app, auth_as: LocalDevice): - client = backend_asgi_app.test_client() - async with client.websocket("/ws") as ws: - # Handshake - ch = AuthenticatedClientHandshake( - auth_as.organization_id, - auth_as.device_id, - auth_as.signing_key, - auth_as.root_verify_key, - ) - challenge_req = await ws.receive() - answer_req = ch.process_challenge_req(challenge_req) - await ws.send(answer_req) - result_req = await ws.receive() - ch.process_result_req(result_req) - - yield ws - - return _backend_authenticated_ws_factory - - -@pytest.fixture -async def alice_ws(backend_asgi_app, alice, backend_authenticated_ws_factory): - async with backend_authenticated_ws_factory(backend_asgi_app, alice) as ws: - yield ws - - -@pytest.fixture -async def alice2_ws(backend_asgi_app, alice2, backend_authenticated_ws_factory): - async with backend_authenticated_ws_factory(backend_asgi_app, alice2) as ws: - yield ws - - -@pytest.fixture -async def bob_ws(backend_asgi_app, bob, backend_authenticated_ws_factory): - async with backend_authenticated_ws_factory(backend_asgi_app, bob) as ws: - yield ws - - -@pytest.fixture -async def adam_ws(backend_asgi_app, adam, backend_authenticated_ws_factory): - async with backend_authenticated_ws_factory(backend_asgi_app, adam) as ws: - yield ws - - -class AnonymousClientFakingWebsocket: - def __init__(self, client, organization_id: OrganizationID): - self.organization_id = organization_id - self.client = client - self.last_request_response = None - - async def send(self, msg: bytes): - response = await self.client.post( - f"/anonymous/{self.organization_id.str}", - headers={"Content-Type": "application/msgpack"}, - data=msg, - ) - assert response.status_code == 200 - self.last_request_response = await response.get_data() - - async def receive(self) -> bytes: - assert self.last_request_response is not None - rep = self.last_request_response - self.last_request_response = None - return rep - - -@pytest.fixture -def backend_anonymous_ws_factory(): - """ - Not really a ws, but we keep the name because it usage is similar than alice_ws&co - """ - - @asynccontextmanager - async def _backend_anonymous_ws_factory(backend_asgi_app, organization_id: OrganizationID): - client = backend_asgi_app.test_client() - yield AnonymousClientFakingWebsocket(client, organization_id) - - return _backend_anonymous_ws_factory - - -@pytest.fixture -async def anonymous_backend_ws(backend_asgi_app, backend_anonymous_ws_factory, coolorg): - async with backend_anonymous_ws_factory(backend_asgi_app, coolorg.organization_id) as ws: - yield ws - - -@pytest.fixture -def realm_factory(next_timestamp): - async def _realm_factory(backend, author, realm_id=None, now=None): - realm_id = realm_id or VlobID.new() - now = now or next_timestamp() - certif = RealmRoleCertificate.build_realm_root_certif( - author=author.device_id, timestamp=now, realm_id=realm_id - ).dump_and_sign(author.signing_key) - with backend.event_bus.listen() as spy: - await backend.realm.create( - organization_id=author.organization_id, - self_granted_role=RealmGrantedRole( - realm_id=realm_id, - user_id=author.user_id, - certificate=certif, - role=RealmRole.OWNER, - granted_by=author.device_id, - granted_on=now, - ), - ) - await spy.wait_with_timeout(BackendEventRealmRolesUpdated) - return realm_id - - return _realm_factory - - -@pytest.fixture -async def realm(backend, alice, realm_factory): - realm_id = VlobID.from_hex("A0000000000000000000000000000000") - return await realm_factory(backend, alice, realm_id, DateTime(2000, 1, 2)) - - -@pytest.fixture -async def vlobs(backend, alice, realm): - vlob_ids = ( - VlobID.from_hex("10000000000000000000000000000000"), - VlobID.from_hex("20000000000000000000000000000000"), - ) - await backend.vlob.create( - organization_id=alice.organization_id, - author=alice.device_id, - realm_id=realm, - encryption_revision=1, - vlob_id=vlob_ids[0], - timestamp=DateTime(2000, 1, 2, 1), - blob=b"r:A b:1 v:1", - ) - await backend.vlob.update( - organization_id=alice.organization_id, - author=alice.device_id, - encryption_revision=1, - vlob_id=vlob_ids[0], - version=2, - timestamp=DateTime(2000, 1, 3), - blob=b"r:A b:1 v:2", - ) - await backend.vlob.create( - organization_id=alice.organization_id, - author=alice.device_id, - realm_id=realm, - encryption_revision=1, - vlob_id=vlob_ids[1], - timestamp=DateTime(2000, 1, 4), - blob=b"r:A b:2 v:1", - ) - return vlob_ids - - -@pytest.fixture -async def vlob_atoms(vlobs): - return [(vlobs[0], 1), (vlobs[0], 2), (vlobs[1], 1)] - - -@pytest.fixture -async def other_realm(backend, alice, realm_factory): - realm_id = VlobID.from_hex("B0000000000000000000000000000000") - return await realm_factory(backend, alice, realm_id, DateTime(2000, 1, 2)) - - -@pytest.fixture -async def bob_realm(backend, bob, realm_factory): - realm_id = VlobID.from_hex("C0000000000000000000000000000000") - return await realm_factory(backend, bob, realm_id, DateTime(2000, 1, 2)) diff --git a/server/tests/backend/invite/__init__.py b/server/tests/backend/invite/__init__.py deleted file mode 100644 index 05e02a3b569..00000000000 --- a/server/tests/backend/invite/__init__.py +++ /dev/null @@ -1 +0,0 @@ -# Parsec Cloud (https://parsec.cloud) Copyright (c) BUSL-1.1 2016-present Scille SAS diff --git a/server/tests/backend/invite/conftest.py b/server/tests/backend/invite/conftest.py deleted file mode 100644 index ec1ddf8cf8e..00000000000 --- a/server/tests/backend/invite/conftest.py +++ /dev/null @@ -1,281 +0,0 @@ -# Parsec Cloud (https://parsec.cloud) Copyright (c) BUSL-1.1 2016-present Scille SAS -from __future__ import annotations - -import pytest -import trio - -from parsec._parsec import ( - DateTime, - HashDigest, - PrivateKey, -) -from parsec.api.protocol import ( - Invite1ClaimerWaitPeerRepOk, - Invite1GreeterWaitPeerRepOk, - Invite2aClaimerSendHashedNonceRepOk, - Invite2aGreeterGetHashedNonceRepOk, - Invite2bClaimerSendNonceRepOk, - Invite2bGreeterSendNonceRepOk, - Invite3aClaimerSignifyTrustRepOk, - Invite3aGreeterWaitPeerTrustRepOk, - Invite3bClaimerWaitPeerTrustRepOk, - Invite3bClaimerWaitPeerTrustRepUnknownStatus, - Invite3bGreeterSignifyTrustRepOk, - Invite4ClaimerCommunicateRepOk, - Invite4GreeterCommunicateRepOk, - InviteDeleteRepOk, - InviteInfoRepOk, - InviteListRepOk, - InviteNewRepOk, -) -from tests.backend.common import ( - invite_1_claimer_wait_peer, - invite_1_greeter_wait_peer, - invite_2a_claimer_send_hashed_nonce, - invite_2a_greeter_get_hashed_nonce, - invite_2b_claimer_send_nonce, - invite_2b_greeter_send_nonce, - invite_3a_claimer_signify_trust, - invite_3a_greeter_wait_peer_trust, - invite_3b_claimer_wait_peer_trust, - invite_3b_greeter_signify_trust, - invite_4_claimer_communicate, - invite_4_greeter_communicate, - invite_info, -) - - -class PeerControler: - def __init__(self): - self._orders_sender, self._orders_receiver = trio.open_memory_channel(0) - self._orders_ack_sender, self._orders_ack_receiver = trio.open_memory_channel(0) - self._results_sender, self._results_receiver = trio.open_memory_channel(1) - - # Methods used to control the peer - - async def send_order(self, order, step_4_payload=None): - assert self._results_receiver.statistics().current_buffer_used == 0 - await self._orders_sender.send((order, step_4_payload)) - await self._orders_ack_receiver.receive() - - async def get_result(self): - is_exc, ret = await self._results_receiver.receive() - if is_exc: - raise ret - else: - return ret - - async def assert_ok_rep(self): - rep = await self.get_result() - assert isinstance( - rep, - ( - Invite1ClaimerWaitPeerRepOk, - Invite1GreeterWaitPeerRepOk, - Invite2aClaimerSendHashedNonceRepOk, - Invite2aGreeterGetHashedNonceRepOk, - Invite2bClaimerSendNonceRepOk, - Invite2bGreeterSendNonceRepOk, - Invite3aClaimerSignifyTrustRepOk, - Invite3aGreeterWaitPeerTrustRepOk, - Invite3bClaimerWaitPeerTrustRepOk, - Invite3bClaimerWaitPeerTrustRepUnknownStatus, - Invite3bGreeterSignifyTrustRepOk, - Invite4ClaimerCommunicateRepOk, - Invite4GreeterCommunicateRepOk, - InviteDeleteRepOk, - InviteInfoRepOk, - InviteListRepOk, - InviteNewRepOk, - ), - ) - - # Methods used by the peer - - async def peer_do(self, action, *args, **kwargs): - req_done = False - try: - async with action.async_call(*args, **kwargs) as async_rep: - req_done = True - await self._orders_ack_sender.send(None) - - # Explicit use of `do_recv` instead of relying on the __aexit__ - # which contains a `fail_after` timeout (some commands such - # as `1_wait_peer` wait for a peer to finish, on which we - # have here no control on) - await async_rep.do_recv() - await self._results_sender.send((False, async_rep.rep)) - - except Exception as exc: - if not req_done: - await self._orders_ack_sender.send(None) - await self._results_sender.send((True, exc)) - - async def peer_next_order(self): - return await self._orders_receiver.receive() - - -class ExchangeTestBed: - def __init__( - self, - organization_id, - greeter, - invitation, - greeter_privkey, - claimer_privkey, - greeter_ctlr, - claimer_ctlr, - greeter_ws, - claimer_ws, - ): - self.organization_id = organization_id - self.greeter = greeter - self.invitation = invitation - self.greeter_privkey = greeter_privkey - self.claimer_privkey = claimer_privkey - self.greeter_ctlr = greeter_ctlr - self.claimer_ctlr = claimer_ctlr - self.greeter_ws = greeter_ws - self.claimer_ws = claimer_ws - - async def send_order(self, who, order, step_4_payload=b""): - assert who in ("greeter", "claimer") - ctlr = getattr(self, f"{who}_ctlr") - await ctlr.send_order(order, step_4_payload=step_4_payload) - - async def get_result(self, who): - assert who in ("greeter", "claimer") - ctlr = getattr(self, f"{who}_ctlr") - return await ctlr.get_result() - - async def assert_ok_rep(self, who): - assert who in ("greeter", "claimer") - ctlr = getattr(self, f"{who}_ctlr") - return await ctlr.assert_ok_rep() - - -@pytest.fixture -async def exchange_testbed(backend_asgi_app, alice, alice_ws, backend_invited_ws_factory): - async def _run_greeter(tb): - peer_controller = tb.greeter_ctlr - while True: - order, step_4_payload = await peer_controller.peer_next_order() - - if order == "1_wait_peer": - await peer_controller.peer_do( - invite_1_greeter_wait_peer, - tb.greeter_ws, - token=tb.invitation.token, - greeter_public_key=tb.greeter_privkey.public_key, - ) - - elif order == "2a_get_hashed_nonce": - await peer_controller.peer_do( - invite_2a_greeter_get_hashed_nonce, tb.greeter_ws, token=tb.invitation.token - ) - - elif order == "2b_send_nonce": - await peer_controller.peer_do( - invite_2b_greeter_send_nonce, - tb.greeter_ws, - token=tb.invitation.token, - greeter_nonce=b"", - ) - - elif order == "3a_wait_peer_trust": - await peer_controller.peer_do( - invite_3a_greeter_wait_peer_trust, tb.greeter_ws, token=tb.invitation.token - ) - - elif order == "3b_signify_trust": - await peer_controller.peer_do( - invite_3b_greeter_signify_trust, tb.greeter_ws, token=tb.invitation.token - ) - - elif order == "4_communicate": - assert step_4_payload is not None - await peer_controller.peer_do( - invite_4_greeter_communicate, - tb.greeter_ws, - token=tb.invitation.token, - payload=step_4_payload, - ) - - else: - assert False - - async def _run_claimer(tb): - peer_controller = tb.claimer_ctlr - while True: - order, step_4_payload = await peer_controller.peer_next_order() - - if order == "invite_info": - await peer_controller.peer_do(invite_info, tb.claimer_ws) - - elif order == "1_wait_peer": - await peer_controller.peer_do( - invite_1_claimer_wait_peer, - tb.claimer_ws, - claimer_public_key=tb.claimer_privkey.public_key, - ) - - elif order == "2a_send_hashed_nonce": - await peer_controller.peer_do( - invite_2a_claimer_send_hashed_nonce, - tb.claimer_ws, - claimer_hashed_nonce=HashDigest.from_data(b""), - ) - - elif order == "2b_send_nonce": - await peer_controller.peer_do( - invite_2b_claimer_send_nonce, tb.claimer_ws, claimer_nonce=b"" - ) - - elif order == "3a_signify_trust": - await peer_controller.peer_do(invite_3a_claimer_signify_trust, tb.claimer_ws) - - elif order == "3b_wait_peer_trust": - await peer_controller.peer_do(invite_3b_claimer_wait_peer_trust, tb.claimer_ws) - - elif order == "4_communicate": - assert step_4_payload is not None - await peer_controller.peer_do( - invite_4_claimer_communicate, tb.claimer_ws, payload=step_4_payload - ) - - else: - assert False - - greeter_ctlr = PeerControler() - claimer_ctlr = PeerControler() - greeter_privkey = PrivateKey.generate() - claimer_privkey = PrivateKey.generate() - - invitation = await backend_asgi_app.backend.invite.new_for_device( - organization_id=alice.organization_id, - greeter_user_id=alice.user_id, - created_on=DateTime(2000, 1, 2), - ) - async with backend_invited_ws_factory( - backend_asgi_app, - organization_id=alice.organization_id, - invitation_type=invitation.TYPE, - token=invitation.token, - ) as claimer_ws: - async with trio.open_nursery() as nursery: - tb = ExchangeTestBed( - organization_id=alice.organization_id, - greeter=alice, - invitation=invitation, - greeter_privkey=greeter_privkey, - claimer_privkey=claimer_privkey, - greeter_ctlr=greeter_ctlr, - claimer_ctlr=claimer_ctlr, - greeter_ws=alice_ws, - claimer_ws=claimer_ws, - ) - nursery.start_soon(_run_greeter, tb) - nursery.start_soon(_run_claimer, tb) - yield tb - - nursery.cancel_scope.cancel() diff --git a/server/tests/backend/invite/test_exchange.py b/server/tests/backend/invite/test_exchange.py deleted file mode 100644 index 0c46fc4db08..00000000000 --- a/server/tests/backend/invite/test_exchange.py +++ /dev/null @@ -1,697 +0,0 @@ -# Parsec Cloud (https://parsec.cloud) Copyright (c) BUSL-1.1 2016-present Scille SAS -from __future__ import annotations - -import pytest -import trio - -from parsec._parsec import ( - BackendEventInviteConduitUpdated, - DateTime, - HashDigest, - PrivateKey, -) -from parsec.api.protocol import ( - InvitationType, - Invite1ClaimerWaitPeerRepOk, - Invite1GreeterWaitPeerRepOk, - Invite2aClaimerSendHashedNonceRepInvalidState, - Invite2aClaimerSendHashedNonceRepOk, - Invite2aGreeterGetHashedNonceRepInvalidState, - Invite2aGreeterGetHashedNonceRepOk, - Invite2bClaimerSendNonceRepOk, - Invite2bGreeterSendNonceRepInvalidState, - Invite2bGreeterSendNonceRepOk, - Invite3aClaimerSignifyTrustRepInvalidState, - Invite3aClaimerSignifyTrustRepOk, - Invite3aGreeterWaitPeerTrustRepInvalidState, - Invite3aGreeterWaitPeerTrustRepOk, - Invite3bClaimerWaitPeerTrustRepInvalidState, - Invite3bClaimerWaitPeerTrustRepOk, - Invite3bClaimerWaitPeerTrustRepUnknownStatus, - Invite3bGreeterSignifyTrustRepInvalidState, - Invite3bGreeterSignifyTrustRepOk, - Invite4ClaimerCommunicateRepInvalidState, - Invite4ClaimerCommunicateRepOk, - Invite4GreeterCommunicateRepInvalidState, - Invite4GreeterCommunicateRepOk, - InviteDeleteRepOk, - InviteInfoRepOk, - InviteListRepOk, - InviteNewRepOk, -) -from tests.backend.common import ( - invite_1_claimer_wait_peer, - invite_1_greeter_wait_peer, - invite_2a_claimer_send_hashed_nonce, - invite_2a_greeter_get_hashed_nonce, - invite_2b_claimer_send_nonce, - invite_2b_greeter_send_nonce, - invite_3a_claimer_signify_trust, - invite_3a_greeter_wait_peer_trust, - invite_3b_claimer_wait_peer_trust, - invite_3b_greeter_signify_trust, - invite_4_claimer_communicate, - invite_4_greeter_communicate, -) -from tests.common import real_clock_timeout - - -@pytest.fixture -async def invitation(backend, alice): - invitation = await backend.invite.new_for_device( - organization_id=alice.organization_id, - greeter_user_id=alice.user_id, - created_on=DateTime(2000, 1, 2), - ) - return invitation - - -@pytest.fixture -async def invited_ws(backend_asgi_app, backend_invited_ws_factory, alice, invitation): - async with backend_invited_ws_factory( - backend_asgi_app, - organization_id=alice.organization_id, - invitation_type=InvitationType.DEVICE, - token=invitation.token, - ) as invited_ws: - yield invited_ws - - -class PeerControler: - def __init__(self): - self._orders_sender, self._orders_receiver = trio.open_memory_channel(0) - self._orders_ack_sender, self._orders_ack_receiver = trio.open_memory_channel(0) - self._results_sender, self._results_receiver = trio.open_memory_channel(1) - - async def send_order(self, order, order_arg=None): - assert self._results_receiver.statistics().current_buffer_used == 0 - await self._orders_sender.send((order, order_arg)) - await self._orders_ack_receiver.receive() - - async def get_result(self): - return await self._results_receiver.receive() - - async def assert_ok_rep(self): - rep = await self.get_result() - assert type(rep) in [ - Invite1ClaimerWaitPeerRepOk, - Invite1GreeterWaitPeerRepOk, - Invite2aClaimerSendHashedNonceRepOk, - Invite2aGreeterGetHashedNonceRepOk, - Invite2bClaimerSendNonceRepOk, - Invite2bGreeterSendNonceRepOk, - Invite3aClaimerSignifyTrustRepOk, - Invite3aGreeterWaitPeerTrustRepOk, - Invite3bClaimerWaitPeerTrustRepOk, - Invite3bClaimerWaitPeerTrustRepUnknownStatus, - Invite3bGreeterSignifyTrustRepOk, - Invite4ClaimerCommunicateRepOk, - Invite4GreeterCommunicateRepOk, - InviteDeleteRepOk, - InviteInfoRepOk, - InviteListRepOk, - InviteNewRepOk, - ] - - async def peer_do(self, action, *args, **kwargs): - async with action.async_call(*args, **kwargs) as async_rep: - await self._orders_ack_sender.send(None) - await self._results_sender.send(async_rep.rep) - return True - - async def peer_next_order(self): - return await self._orders_receiver.receive() - - -@pytest.fixture -async def exchange_testbed(alice_ws, invitation, invited_ws): - greeter_privkey = PrivateKey.generate() - claimer_privkey = PrivateKey.generate() - - async def _run_greeter(peer_controller): - while True: - order, order_arg = await peer_controller.peer_next_order() - - if order == "1_wait_peer": - await peer_controller.peer_do( - invite_1_greeter_wait_peer, - alice_ws, - token=invitation.token, - greeter_public_key=greeter_privkey.public_key, - ) - - elif order == "2a_get_hashed_nonce": - await peer_controller.peer_do( - invite_2a_greeter_get_hashed_nonce, alice_ws, token=invitation.token - ) - - elif order == "2b_send_nonce": - await peer_controller.peer_do( - invite_2b_greeter_send_nonce, - alice_ws, - token=invitation.token, - greeter_nonce=b"", - ) - - elif order == "3a_wait_peer_trust": - await peer_controller.peer_do( - invite_3a_greeter_wait_peer_trust, alice_ws, token=invitation.token - ) - - elif order == "3b_signify_trust": - await peer_controller.peer_do( - invite_3b_greeter_signify_trust, alice_ws, token=invitation.token - ) - - elif order == "4_communicate": - await peer_controller.peer_do( - invite_4_greeter_communicate, - alice_ws, - token=invitation.token, - payload=order_arg, - ) - - else: - assert False - - async def _run_claimer(peer_controller): - while True: - order, order_arg = await peer_controller.peer_next_order() - - if order == "1_wait_peer": - await peer_controller.peer_do( - invite_1_claimer_wait_peer, - invited_ws, - claimer_public_key=claimer_privkey.public_key, - ) - - elif order == "2a_send_hashed_nonce": - await peer_controller.peer_do( - invite_2a_claimer_send_hashed_nonce, - invited_ws, - claimer_hashed_nonce=HashDigest.from_data(b""), - ) - - elif order == "2b_send_nonce": - await peer_controller.peer_do( - invite_2b_claimer_send_nonce, invited_ws, claimer_nonce=b"" - ) - - elif order == "3a_signify_trust": - await peer_controller.peer_do(invite_3a_claimer_signify_trust, invited_ws) - - elif order == "3b_wait_peer_trust": - await peer_controller.peer_do(invite_3b_claimer_wait_peer_trust, invited_ws) - - elif order == "4_communicate": - assert order_arg is not None - await peer_controller.peer_do( - invite_4_claimer_communicate, invited_ws, payload=order_arg - ) - - else: - assert False - - greeter_ctlr = PeerControler() - claimer_ctlr = PeerControler() - async with trio.open_nursery() as nursery: - nursery.start_soon(_run_greeter, greeter_ctlr) - nursery.start_soon(_run_claimer, claimer_ctlr) - - yield greeter_privkey, claimer_privkey, greeter_ctlr, claimer_ctlr - - nursery.cancel_scope.cancel() - - -@pytest.mark.trio -@pytest.mark.parametrize("leader", ("claimer", "greeter")) -async def test_conduit_exchange_good(exchange_testbed, leader): - greeter_privkey, claimer_privkey, greeter_ctlr, claimer_ctlr = exchange_testbed - - # Step 1 - if leader == "greeter": - await greeter_ctlr.send_order("1_wait_peer") - await claimer_ctlr.send_order("1_wait_peer") - else: - await claimer_ctlr.send_order("1_wait_peer") - await greeter_ctlr.send_order("1_wait_peer") - - greeter_rep = await greeter_ctlr.get_result() - claimer_rep = await claimer_ctlr.get_result() - - assert greeter_rep.claimer_public_key == claimer_privkey.public_key - assert claimer_rep.greeter_public_key == greeter_privkey.public_key - - # Step 2 - if leader == "greeter": - await greeter_ctlr.send_order("2a_get_hashed_nonce") - await claimer_ctlr.send_order("2a_send_hashed_nonce") - else: - await claimer_ctlr.send_order("2a_send_hashed_nonce") - await greeter_ctlr.send_order("2a_get_hashed_nonce") - - greeter_rep = await greeter_ctlr.get_result() - assert greeter_rep.claimer_hashed_nonce == HashDigest.from_data(b"") - await greeter_ctlr.send_order("2b_send_nonce") - - claimer_rep = await claimer_ctlr.get_result() - assert claimer_rep.greeter_nonce == b"" - await claimer_ctlr.send_order("2b_send_nonce") - - greeter_rep = await greeter_ctlr.get_result() - assert greeter_rep.claimer_nonce == b"" - claimer_rep = await claimer_ctlr.get_result() - - # Step 3a - if leader == "greeter": - await greeter_ctlr.send_order("3a_wait_peer_trust") - await claimer_ctlr.send_order("3a_signify_trust") - else: - await claimer_ctlr.send_order("3a_signify_trust") - await greeter_ctlr.send_order("3a_wait_peer_trust") - greeter_rep = await greeter_ctlr.get_result() - assert isinstance(greeter_rep, Invite3aGreeterWaitPeerTrustRepOk) - claimer_rep = await claimer_ctlr.get_result() - assert isinstance(claimer_rep, Invite3aClaimerSignifyTrustRepOk) - - # Step 3b - if leader == "greeter": - await greeter_ctlr.send_order("3b_signify_trust") - await claimer_ctlr.send_order("3b_wait_peer_trust") - else: - await claimer_ctlr.send_order("3b_wait_peer_trust") - await greeter_ctlr.send_order("3b_signify_trust") - greeter_rep = await greeter_ctlr.get_result() - assert isinstance(greeter_rep, Invite3bGreeterSignifyTrustRepOk) - claimer_rep = await claimer_ctlr.get_result() - assert isinstance(claimer_rep, Invite3bClaimerWaitPeerTrustRepOk) - - # Step 4 - if leader == "greeter": - await greeter_ctlr.send_order("4_communicate", b"hello from greeter") - await claimer_ctlr.send_order("4_communicate", b"hello from claimer") - else: - await claimer_ctlr.send_order("4_communicate", b"hello from claimer") - await greeter_ctlr.send_order("4_communicate", b"hello from greeter") - greeter_rep = await greeter_ctlr.get_result() - assert greeter_rep.payload == b"hello from claimer" - claimer_rep = await claimer_ctlr.get_result() - assert claimer_rep.payload == b"hello from greeter" - - if leader == "greeter": - await greeter_ctlr.send_order("4_communicate", b"") - await claimer_ctlr.send_order("4_communicate", b"") - else: - await claimer_ctlr.send_order("4_communicate", b"") - await greeter_ctlr.send_order("4_communicate", b"") - - greeter_rep = await greeter_ctlr.get_result() - assert greeter_rep.payload == b"" - claimer_rep = await claimer_ctlr.get_result() - assert claimer_rep.payload == b"" - - -@pytest.mark.trio -async def test_conduit_exchange_reset(exchange_testbed): - greeter_privkey, claimer_privkey, greeter_ctlr, claimer_ctlr = exchange_testbed - - # Step 1 - await greeter_ctlr.send_order("1_wait_peer") - await claimer_ctlr.send_order("1_wait_peer") - await greeter_ctlr.assert_ok_rep() - await claimer_ctlr.assert_ok_rep() - - # Claimer reset just before step 2a - for leader in ("claimer", "greeter"): - if leader == "claimer": - await claimer_ctlr.send_order("1_wait_peer") - await greeter_ctlr.send_order("2a_get_hashed_nonce") - else: - await greeter_ctlr.send_order("2a_get_hashed_nonce") - await claimer_ctlr.send_order("1_wait_peer") - greeter_rep = await greeter_ctlr.get_result() - assert isinstance(greeter_rep, Invite2aGreeterGetHashedNonceRepInvalidState) - await greeter_ctlr.send_order("1_wait_peer") - await greeter_ctlr.assert_ok_rep() - await claimer_ctlr.assert_ok_rep() - - # Greeter reset just before step 2a - for leader in ("claimer", "greeter"): - if leader == "claimer": - await claimer_ctlr.send_order("2a_send_hashed_nonce") - await greeter_ctlr.send_order("1_wait_peer") - else: - await greeter_ctlr.send_order("1_wait_peer") - await claimer_ctlr.send_order("2a_send_hashed_nonce") - claimer_rep = await claimer_ctlr.get_result() - assert isinstance(claimer_rep, Invite2aClaimerSendHashedNonceRepInvalidState) - await claimer_ctlr.send_order("1_wait_peer") - await claimer_ctlr.assert_ok_rep() - await greeter_ctlr.assert_ok_rep() - - # Step 2a - await greeter_ctlr.send_order("2a_get_hashed_nonce") - await claimer_ctlr.send_order("2a_send_hashed_nonce") - greeter_rep = await greeter_ctlr.assert_ok_rep() - # Greeter reset after retrieving claimer hashed nonce - await greeter_ctlr.send_order("1_wait_peer") - claimer_rep = await claimer_ctlr.get_result() - assert isinstance(claimer_rep, Invite2aClaimerSendHashedNonceRepInvalidState) - await claimer_ctlr.send_order("1_wait_peer") - await claimer_ctlr.assert_ok_rep() - await greeter_ctlr.assert_ok_rep() - - # Step 2a - await greeter_ctlr.send_order("2a_get_hashed_nonce") - await claimer_ctlr.send_order("2a_send_hashed_nonce") - await greeter_ctlr.assert_ok_rep() - # Step 2b - await greeter_ctlr.send_order("2b_send_nonce") - await claimer_ctlr.assert_ok_rep() - # Claimer reset after retrieving greeter nonce - await claimer_ctlr.send_order("1_wait_peer") - greeter_rep = await greeter_ctlr.get_result() - assert isinstance(greeter_rep, Invite2bGreeterSendNonceRepInvalidState) - await greeter_ctlr.send_order("1_wait_peer") - await greeter_ctlr.assert_ok_rep() - await claimer_ctlr.assert_ok_rep() - - # Step 2a - await greeter_ctlr.send_order("2a_get_hashed_nonce") - await claimer_ctlr.send_order("2a_send_hashed_nonce") - await greeter_ctlr.assert_ok_rep() - # Step 2b - await greeter_ctlr.send_order("2b_send_nonce") - await claimer_ctlr.assert_ok_rep() - await claimer_ctlr.send_order("2b_send_nonce") - await claimer_ctlr.assert_ok_rep() - await greeter_ctlr.assert_ok_rep() - # Greeter reset just before step 3a - for leader in ("claimer", "greeter"): - if leader == "claimer": - await claimer_ctlr.send_order("3a_signify_trust") - await greeter_ctlr.send_order("1_wait_peer") - else: - await greeter_ctlr.send_order("1_wait_peer") - await claimer_ctlr.send_order("3a_signify_trust") - claimer_rep = await claimer_ctlr.get_result() - assert isinstance(claimer_rep, Invite3aClaimerSignifyTrustRepInvalidState) - await claimer_ctlr.send_order("1_wait_peer") - await claimer_ctlr.assert_ok_rep() - await greeter_ctlr.assert_ok_rep() - - # Step 2a - await greeter_ctlr.send_order("2a_get_hashed_nonce") - await claimer_ctlr.send_order("2a_send_hashed_nonce") - await greeter_ctlr.assert_ok_rep() - # Step 2b - await greeter_ctlr.send_order("2b_send_nonce") - await claimer_ctlr.assert_ok_rep() - await claimer_ctlr.send_order("2b_send_nonce") - await claimer_ctlr.assert_ok_rep() - await greeter_ctlr.assert_ok_rep() - # Claimer reset just before step 3a - for leader in ("claimer", "greeter"): - if leader == "claimer": - await claimer_ctlr.send_order("1_wait_peer") - await greeter_ctlr.send_order("3a_wait_peer_trust") - else: - await greeter_ctlr.send_order("3a_wait_peer_trust") - await claimer_ctlr.send_order("1_wait_peer") - greeter_rep = await greeter_ctlr.get_result() - assert isinstance(greeter_rep, Invite3aGreeterWaitPeerTrustRepInvalidState) - await greeter_ctlr.send_order("1_wait_peer") - await greeter_ctlr.assert_ok_rep() - await claimer_ctlr.assert_ok_rep() - - # Step 2a - await greeter_ctlr.send_order("2a_get_hashed_nonce") - await claimer_ctlr.send_order("2a_send_hashed_nonce") - await greeter_ctlr.assert_ok_rep() - # Step 2b - await greeter_ctlr.send_order("2b_send_nonce") - await claimer_ctlr.assert_ok_rep() - await claimer_ctlr.send_order("2b_send_nonce") - await claimer_ctlr.assert_ok_rep() - await greeter_ctlr.assert_ok_rep() - # Step 3a - await greeter_ctlr.send_order("3a_wait_peer_trust") - await claimer_ctlr.send_order("3a_signify_trust") - await claimer_ctlr.assert_ok_rep() - await greeter_ctlr.assert_ok_rep() - # Greeter reset just before step 3b - for leader in ("claimer", "greeter"): - if leader == "claimer": - await claimer_ctlr.send_order("3b_wait_peer_trust") - await greeter_ctlr.send_order("1_wait_peer") - else: - await greeter_ctlr.send_order("1_wait_peer") - await claimer_ctlr.send_order("3b_wait_peer_trust") - claimer_rep = await claimer_ctlr.get_result() - assert isinstance(claimer_rep, Invite3bClaimerWaitPeerTrustRepInvalidState) - await claimer_ctlr.send_order("1_wait_peer") - await claimer_ctlr.assert_ok_rep() - await greeter_ctlr.assert_ok_rep() - - # Step 2a - await greeter_ctlr.send_order("2a_get_hashed_nonce") - await claimer_ctlr.send_order("2a_send_hashed_nonce") - await greeter_ctlr.assert_ok_rep() - # Step 2b - await greeter_ctlr.send_order("2b_send_nonce") - await claimer_ctlr.assert_ok_rep() - await claimer_ctlr.send_order("2b_send_nonce") - await claimer_ctlr.assert_ok_rep() - await greeter_ctlr.assert_ok_rep() - # Step 3a - await greeter_ctlr.send_order("3a_wait_peer_trust") - await claimer_ctlr.send_order("3a_signify_trust") - await claimer_ctlr.assert_ok_rep() - await greeter_ctlr.assert_ok_rep() - # Claimer reset just before step 3b - for leader in ("claimer", "greeter"): - if leader == "claimer": - await claimer_ctlr.send_order("1_wait_peer") - await greeter_ctlr.send_order("3b_signify_trust") - else: - await greeter_ctlr.send_order("3b_signify_trust") - await claimer_ctlr.send_order("1_wait_peer") - greeter_rep = await greeter_ctlr.get_result() - assert isinstance(greeter_rep, Invite3bGreeterSignifyTrustRepInvalidState) - await greeter_ctlr.send_order("1_wait_peer") - await greeter_ctlr.assert_ok_rep() - await claimer_ctlr.assert_ok_rep() - - # Step 2a - await greeter_ctlr.send_order("2a_get_hashed_nonce") - await claimer_ctlr.send_order("2a_send_hashed_nonce") - await greeter_ctlr.assert_ok_rep() - # Step 2b - await greeter_ctlr.send_order("2b_send_nonce") - await claimer_ctlr.assert_ok_rep() - await claimer_ctlr.send_order("2b_send_nonce") - await claimer_ctlr.assert_ok_rep() - await greeter_ctlr.assert_ok_rep() - # Step 3a - await greeter_ctlr.send_order("3a_wait_peer_trust") - await claimer_ctlr.send_order("3a_signify_trust") - await claimer_ctlr.assert_ok_rep() - await greeter_ctlr.assert_ok_rep() - # Step 3b - await greeter_ctlr.send_order("3b_signify_trust") - await claimer_ctlr.send_order("3b_wait_peer_trust") - await claimer_ctlr.assert_ok_rep() - await greeter_ctlr.assert_ok_rep() - # Greeter reset just before step 4 - for leader in ("claimer", "greeter"): - if leader == "claimer": - await claimer_ctlr.send_order("4_communicate", b"") - await greeter_ctlr.send_order("1_wait_peer") - else: - await greeter_ctlr.send_order("1_wait_peer") - await claimer_ctlr.send_order("4_communicate", b"") - claimer_rep = await claimer_ctlr.get_result() - assert isinstance(claimer_rep, Invite4ClaimerCommunicateRepInvalidState) - await claimer_ctlr.send_order("1_wait_peer") - await claimer_ctlr.assert_ok_rep() - await greeter_ctlr.assert_ok_rep() - - # Step 2a - await greeter_ctlr.send_order("2a_get_hashed_nonce") - await claimer_ctlr.send_order("2a_send_hashed_nonce") - await greeter_ctlr.assert_ok_rep() - # Step 2b - await greeter_ctlr.send_order("2b_send_nonce") - await claimer_ctlr.assert_ok_rep() - await claimer_ctlr.send_order("2b_send_nonce") - await claimer_ctlr.assert_ok_rep() - await greeter_ctlr.assert_ok_rep() - # Step 3a - await greeter_ctlr.send_order("3a_wait_peer_trust") - await claimer_ctlr.send_order("3a_signify_trust") - await claimer_ctlr.assert_ok_rep() - await greeter_ctlr.assert_ok_rep() - # Step 3b - await greeter_ctlr.send_order("3b_signify_trust") - await claimer_ctlr.send_order("3b_wait_peer_trust") - await claimer_ctlr.assert_ok_rep() - await greeter_ctlr.assert_ok_rep() - # Claimer reset just before step 4 - for leader in ("claimer", "greeter"): - if leader == "claimer": - await claimer_ctlr.send_order("1_wait_peer") - await greeter_ctlr.send_order("4_communicate", b"") - else: - await greeter_ctlr.send_order("4_communicate", b"") - await claimer_ctlr.send_order("1_wait_peer") - greeter_rep = await greeter_ctlr.get_result() - assert isinstance(greeter_rep, Invite4GreeterCommunicateRepInvalidState) - await greeter_ctlr.send_order("1_wait_peer") - await greeter_ctlr.assert_ok_rep() - await claimer_ctlr.assert_ok_rep() - - -@pytest.mark.trio -async def test_claimer_step_1_retry( - backend_asgi_app, alice, backend_invited_ws_factory, alice_ws, invitation -): - greeter_privkey = PrivateKey.generate() - claimer_privkey = PrivateKey.generate() - - async with backend_invited_ws_factory( - backend_asgi_app, - organization_id=alice.organization_id, - invitation_type=InvitationType.DEVICE, - token=invitation.token, - ) as invited_ws: - with backend_asgi_app.backend.event_bus.listen() as spy: - with trio.CancelScope() as cancel_scope: - async with invite_1_claimer_wait_peer.async_call( - invited_ws, claimer_public_key=claimer_privkey.public_key - ): - await spy.wait_with_timeout( - BackendEventInviteConduitUpdated( - organization_id=alice.organization_id, - token=invitation.token, - ) - ) - # Here greeter is waiting for claimer, that the time we choose to close greeter connection - cancel_scope.cancel() - - # Now retry the first step with a new connection - async with backend_invited_ws_factory( - backend_asgi_app, - organization_id=alice.organization_id, - invitation_type=InvitationType.DEVICE, - token=invitation.token, - ) as invited_ws: - async with real_clock_timeout(): - with backend_asgi_app.backend.event_bus.listen() as spy: - async with invite_1_claimer_wait_peer.async_call( - invited_ws, claimer_public_key=claimer_privkey.public_key - ) as claimer_async_rep: - # Must wait for the reset command to update the conduit - # before starting the greeter command otherwise it will - # also be reseted - await spy.wait_with_timeout( - BackendEventInviteConduitUpdated( - organization_id=alice.organization_id, - token=invitation.token, - ) - ) - greeter_rep = await invite_1_greeter_wait_peer( - alice_ws, - token=invitation.token, - greeter_public_key=greeter_privkey.public_key, - ) - - assert greeter_rep.claimer_public_key == claimer_privkey.public_key - assert claimer_async_rep.rep.greeter_public_key == greeter_privkey.public_key - - -@pytest.mark.trio -async def test_claimer_step_2_retry( - backend_asgi_app, alice, backend_authenticated_ws_factory, alice_ws, invitation, invited_ws -): - greeter_privkey = PrivateKey.generate() - claimer_privkey = PrivateKey.generate() - greeter_retry_privkey = PrivateKey.generate() - claimer_retry_privkey = PrivateKey.generate() - - # Step 1 - async with real_clock_timeout(): - async with invite_1_greeter_wait_peer.async_call( - alice_ws, token=invitation.token, greeter_public_key=greeter_privkey.public_key - ) as greeter_async_rep: - claimer_rep = await invite_1_claimer_wait_peer( - invited_ws, claimer_public_key=claimer_privkey.public_key - ) - assert claimer_rep.greeter_public_key == greeter_privkey.public_key - assert greeter_async_rep.rep.claimer_public_key == claimer_privkey.public_key - - # Greeter initiates step 2a... - async with real_clock_timeout(): - with backend_asgi_app.backend.event_bus.listen() as spy: - async with invite_2a_greeter_get_hashed_nonce.async_call( - alice_ws, token=invitation.token - ) as greeter_2a_async_rep: - await spy.wait_with_timeout( - BackendEventInviteConduitUpdated( - organization_id=alice.organization_id, token=invitation.token - ) - ) - - # ...but changes his mind and reset from another connection ! - async with backend_authenticated_ws_factory(backend_asgi_app, alice) as alice_ws2: - async with invite_1_greeter_wait_peer.async_call( - alice_ws2, - token=invitation.token, - greeter_public_key=greeter_retry_privkey.public_key, - ) as greeter_retry_1_async_rep: - # First connection should be notified of the reset - await greeter_2a_async_rep.do_recv() - assert isinstance( - greeter_2a_async_rep.rep, Invite2aGreeterGetHashedNonceRepInvalidState - ) - - # Claimer now arrives and try to do step 2a - rep = await invite_2a_claimer_send_hashed_nonce( - invited_ws, - claimer_hashed_nonce=HashDigest.from_data(b""), - ) - - assert isinstance(rep, Invite2aClaimerSendHashedNonceRepInvalidState) - - # So claimer returns to step 1 - rep = await invite_1_claimer_wait_peer( - invited_ws, claimer_public_key=claimer_retry_privkey.public_key - ) - assert rep.greeter_public_key == greeter_retry_privkey.public_key - - assert ( - greeter_retry_1_async_rep.rep.claimer_public_key - == claimer_retry_privkey.public_key - ) - - # Finally retry and achieve step 2 - - async def _claimer_step_2(): - rep = await invite_2a_greeter_get_hashed_nonce(alice_ws, token=invitation.token) - assert rep.claimer_hashed_nonce == HashDigest.from_data(b"") - rep = await invite_2b_greeter_send_nonce( - alice_ws, token=invitation.token, greeter_nonce=b"greeter nonce" - ) - assert rep.claimer_nonce == b"claimer nonce" - - async def _greeter_step_2(): - rep = await invite_2a_claimer_send_hashed_nonce( - invited_ws, claimer_hashed_nonce=HashDigest.from_data(b"") - ) - assert rep.greeter_nonce == b"greeter nonce" - rep = await invite_2b_claimer_send_nonce(invited_ws, claimer_nonce=b"claimer nonce") - assert isinstance(rep, Invite2bClaimerSendNonceRepOk) - - async with real_clock_timeout(): - async with trio.open_nursery() as nursery: - nursery.start_soon(_claimer_step_2) - nursery.start_soon(_greeter_step_2) diff --git a/server/tests/backend/invite/test_exchange_bad_claimer.py b/server/tests/backend/invite/test_exchange_bad_claimer.py deleted file mode 100644 index 9cb78ee00b9..00000000000 --- a/server/tests/backend/invite/test_exchange_bad_claimer.py +++ /dev/null @@ -1,144 +0,0 @@ -# Parsec Cloud (https://parsec.cloud) Copyright (c) BUSL-1.1 2016-present Scille SAS -from __future__ import annotations - -import pytest -import trio -from quart.testing.connections import WebsocketDisconnectError - -from parsec._parsec import BackendEventInviteStatusChanged, DateTime -from parsec.api.protocol import InvitationDeletedReason -from tests.common import real_clock_timeout - - -@pytest.mark.trio -@pytest.mark.parametrize("during_step", ("not_started", "1", "2a", "2b", "3a", "3b", "4")) -async def test_delete_invitation_while_claimer_connected(exchange_testbed, backend, during_step): - tb = exchange_testbed - - async def _delete_invitation_and_assert_claimer_left(retrieve_previous_result): - # Delete the invitation, claimer connection should be closed automatically - with backend.event_bus.listen() as spy: - await backend.invite.delete( - organization_id=tb.organization_id, - greeter=tb.greeter.user_id, - token=tb.invitation.token, - on=DateTime(2000, 1, 2), - reason=InvitationDeletedReason.ROTTEN, - ) - await spy.wait_with_timeout(BackendEventInviteStatusChanged) - - with pytest.raises(WebsocketDisconnectError): - async with real_clock_timeout(): - if retrieve_previous_result: - await tb.get_result("claimer") - # Even if we had to retrieve an existing result, it could have - # been returned by backend before the invitation delete occurred, - # hence we must poll with additional requests not matter what. - # On top of that claimer connection can take some time to be - # closed, so we need a polling loop here. - while True: - await tb.send_order("claimer", "invite_info") - rep = await tb.get_result("claimer") - # Invitation info are cached for the connection at handshake - # time, hence the command won't take into account the fact - # that the invitation has been deleted - assert rep["status"] == "ok" - - if during_step == "not_started": - await _delete_invitation_and_assert_claimer_left(retrieve_previous_result=False) - return - - # Step 1 - await tb.send_order("greeter", "1_wait_peer") - await tb.send_order("claimer", "1_wait_peer") - if during_step == "1": - await _delete_invitation_and_assert_claimer_left(retrieve_previous_result=True) - return - await tb.assert_ok_rep("greeter") - await tb.assert_ok_rep("claimer") - - # Step 2 - await tb.send_order("greeter", "2a_get_hashed_nonce") - await tb.send_order("claimer", "2a_send_hashed_nonce") - if during_step == "2a": - await _delete_invitation_and_assert_claimer_left(retrieve_previous_result=True) - return - - await tb.assert_ok_rep("greeter") - await tb.send_order("greeter", "2b_send_nonce") - - await tb.assert_ok_rep("claimer") - await tb.send_order("claimer", "2b_send_nonce") - if during_step == "2b": - await _delete_invitation_and_assert_claimer_left(retrieve_previous_result=True) - return - - await tb.assert_ok_rep("greeter") - await tb.assert_ok_rep("claimer") - - # Step 3a - await tb.send_order("greeter", "3a_wait_peer_trust") - await tb.send_order("claimer", "3a_signify_trust") - if during_step == "3a": - await _delete_invitation_and_assert_claimer_left(retrieve_previous_result=True) - return - await tb.assert_ok_rep("greeter") - await tb.assert_ok_rep("claimer") - - # Step 3b - await tb.send_order("greeter", "3b_signify_trust") - await tb.send_order("claimer", "3b_wait_peer_trust") - if during_step == "3b": - await _delete_invitation_and_assert_claimer_left(retrieve_previous_result=True) - return - await tb.assert_ok_rep("greeter") - await tb.assert_ok_rep("claimer") - - # Step 4 - await tb.send_order("greeter", "4_communicate", b"hello from greeter") - await tb.send_order("claimer", "4_communicate", b"hello from claimer") - if during_step == "4": - await _delete_invitation_and_assert_claimer_left(retrieve_previous_result=True) - return - await tb.assert_ok_rep("greeter") - await tb.assert_ok_rep("claimer") - - -@pytest.mark.trio -async def test_delete_invitation_then_claimer_action_before_backend_closes_connection( - exchange_testbed, backend -): - tb = exchange_testbed - - # Disable the callback responsible for closing the claimer's connection - # on invitation deletion. This way we can test connection behavior - # when the automatic closing takes time to be processed. - backend.event_bus.mute(BackendEventInviteStatusChanged) - - await backend.invite.delete( - organization_id=tb.organization_id, - greeter=tb.greeter.user_id, - token=tb.invitation.token, - on=DateTime(2000, 1, 2), - reason=InvitationDeletedReason.ROTTEN, - ) - - # No need to be in the correct exchange state here given checking - # the invitation status should be the very first thing done - for action in [ - # `invite_info` uses a cache populated during connection handshake - # so it will fail this test. However this is ok given not touching the - # db precisely makes it a read-only operation. - "1_wait_peer", - "2a_send_hashed_nonce", - "2b_send_nonce", - "3a_signify_trust", - "3b_wait_peer_trust", - "4_communicate", - ]: - # In theory we should also watch for `WebsocketDisconnectError`, but - # `quart_trio.testing.connection` implementation seems a bit broken... - with pytest.raises((WebsocketDisconnectError, trio.EndOfChannel)): - async with real_clock_timeout(): - await tb.send_order("claimer", action) - await tb.get_result("claimer") diff --git a/server/tests/backend/invite/test_exchange_bad_greeter.py b/server/tests/backend/invite/test_exchange_bad_greeter.py deleted file mode 100644 index d272f858ffd..00000000000 --- a/server/tests/backend/invite/test_exchange_bad_greeter.py +++ /dev/null @@ -1,110 +0,0 @@ -# Parsec Cloud (https://parsec.cloud) Copyright (c) BUSL-1.1 2016-present Scille SAS -from __future__ import annotations - -import pytest - -from parsec._parsec import ( - DateTime, - PrivateKey, -) -from parsec.api.protocol import ( - InvitationDeletedReason, - InvitationToken, - Invite1ClaimerWaitPeerRepNotFound, - Invite1GreeterWaitPeerRepAlreadyDeleted, - Invite1GreeterWaitPeerRepNotFound, - Invite2aClaimerSendHashedNonceRepAlreadyDeleted, - Invite2aClaimerSendHashedNonceRepNotFound, - Invite2aGreeterGetHashedNonceRepAlreadyDeleted, - Invite2aGreeterGetHashedNonceRepNotFound, - Invite2bClaimerSendNonceRepNotFound, - Invite2bGreeterSendNonceRepAlreadyDeleted, - Invite2bGreeterSendNonceRepNotFound, - Invite3aClaimerSignifyTrustRepNotFound, - Invite3aGreeterWaitPeerTrustRepAlreadyDeleted, - Invite3aGreeterWaitPeerTrustRepNotFound, - Invite3bClaimerWaitPeerTrustRepNotFound, - Invite3bGreeterSignifyTrustRepAlreadyDeleted, - Invite3bGreeterSignifyTrustRepNotFound, - Invite4ClaimerCommunicateRepNotFound, - Invite4GreeterCommunicateRepAlreadyDeleted, - Invite4GreeterCommunicateRepNotFound, - InviteDeleteRepAlreadyDeleted, - InviteDeleteRepNotFound, -) -from tests.backend.common import ( - invite_1_greeter_wait_peer, - invite_2a_greeter_get_hashed_nonce, - invite_2b_greeter_send_nonce, - invite_3a_greeter_wait_peer_trust, - invite_3b_greeter_signify_trust, - invite_4_greeter_communicate, -) -from tests.common import real_clock_timeout - - -@pytest.mark.trio -@pytest.mark.parametrize("reason", ("deleted_invitation", "unknown_token")) -async def test_greeter_exchange_bad_access(alice, backend, alice_ws, reason): - if reason == "deleted_invitation": - invitation = await backend.invite.new_for_device( - organization_id=alice.organization_id, greeter_user_id=alice.user_id - ) - await backend.invite.delete( - organization_id=alice.organization_id, - greeter=alice.user_id, - token=invitation.token, - on=DateTime(2000, 1, 2), - reason=InvitationDeletedReason.ROTTEN, - ) - token = invitation.token - status = "already_deleted" - else: - assert reason == "unknown_token" - token = InvitationToken.new() - status = "not_found" - - greeter_privkey = PrivateKey.generate() - for command, params in [ - ( - invite_1_greeter_wait_peer, - {"token": token, "greeter_public_key": greeter_privkey.public_key}, - ), - (invite_2a_greeter_get_hashed_nonce, {"token": token}), - (invite_2b_greeter_send_nonce, {"token": token, "greeter_nonce": b""}), - (invite_3a_greeter_wait_peer_trust, {"token": token}), - (invite_3b_greeter_signify_trust, {"token": token}), - (invite_4_greeter_communicate, {"token": token, "payload": b""}), - ]: - async with real_clock_timeout(): - rep = await command(alice_ws, **params) - - if status == "already_deleted": - status_types = ( - Invite1GreeterWaitPeerRepAlreadyDeleted, - Invite2aClaimerSendHashedNonceRepAlreadyDeleted, - Invite2aGreeterGetHashedNonceRepAlreadyDeleted, - Invite2bGreeterSendNonceRepAlreadyDeleted, - Invite3aGreeterWaitPeerTrustRepAlreadyDeleted, - Invite3bGreeterSignifyTrustRepAlreadyDeleted, - Invite4GreeterCommunicateRepAlreadyDeleted, - InviteDeleteRepAlreadyDeleted, - ) - else: - status_types = ( - Invite1ClaimerWaitPeerRepNotFound, - Invite1GreeterWaitPeerRepNotFound, - Invite2aClaimerSendHashedNonceRepNotFound, - Invite2aGreeterGetHashedNonceRepNotFound, - Invite2bClaimerSendNonceRepNotFound, - Invite2bGreeterSendNonceRepNotFound, - Invite3aClaimerSignifyTrustRepNotFound, - Invite3aGreeterWaitPeerTrustRepNotFound, - Invite3bClaimerWaitPeerTrustRepNotFound, - Invite3bGreeterSignifyTrustRepNotFound, - Invite4ClaimerCommunicateRepNotFound, - Invite4GreeterCommunicateRepNotFound, - InviteDeleteRepNotFound, - ) - - assert isinstance(rep, status_types) diff --git a/server/tests/backend/invite/test_exchange_good.py b/server/tests/backend/invite/test_exchange_good.py deleted file mode 100644 index 80dfc2c12c9..00000000000 --- a/server/tests/backend/invite/test_exchange_good.py +++ /dev/null @@ -1,348 +0,0 @@ -# Parsec Cloud (https://parsec.cloud) Copyright (c) BUSL-1.1 2016-present Scille SAS -from __future__ import annotations - -import pytest - -from parsec._parsec import ( - HashDigest, -) -from parsec.api.protocol import ( - Invite1ClaimerWaitPeerRepInvalidState, - Invite1GreeterWaitPeerRepInvalidState, - Invite2aClaimerSendHashedNonceRepInvalidState, - Invite2aGreeterGetHashedNonceRepInvalidState, - Invite2bClaimerSendNonceRepInvalidState, - Invite2bClaimerSendNonceRepOk, - Invite2bGreeterSendNonceRepInvalidState, - Invite3aClaimerSignifyTrustRepInvalidState, - Invite3aClaimerSignifyTrustRepOk, - Invite3aGreeterWaitPeerTrustRepInvalidState, - Invite3aGreeterWaitPeerTrustRepOk, - Invite3bClaimerWaitPeerTrustRepInvalidState, - Invite3bClaimerWaitPeerTrustRepOk, - Invite3bGreeterSignifyTrustRepInvalidState, - Invite3bGreeterSignifyTrustRepOk, - Invite4ClaimerCommunicateRepInvalidState, - Invite4GreeterCommunicateRepInvalidState, -) - -INVALID_STATES_TYPES = ( - Invite1ClaimerWaitPeerRepInvalidState, - Invite1GreeterWaitPeerRepInvalidState, - Invite2aClaimerSendHashedNonceRepInvalidState, - Invite2aGreeterGetHashedNonceRepInvalidState, - Invite2bClaimerSendNonceRepInvalidState, - Invite2bGreeterSendNonceRepInvalidState, - Invite3aClaimerSignifyTrustRepInvalidState, - Invite3aGreeterWaitPeerTrustRepInvalidState, - Invite3bClaimerWaitPeerTrustRepInvalidState, - Invite3bGreeterSignifyTrustRepInvalidState, - Invite4ClaimerCommunicateRepInvalidState, - Invite4GreeterCommunicateRepInvalidState, -) - - -@pytest.mark.trio -@pytest.mark.parametrize("leader", ("claimer", "greeter")) -async def test_conduit_exchange_good(exchange_testbed, leader): - tb = exchange_testbed - - # Step 1 - if leader == "greeter": - await tb.send_order("greeter", "1_wait_peer") - await tb.send_order("claimer", "1_wait_peer") - else: - await tb.send_order("claimer", "1_wait_peer") - await tb.send_order("greeter", "1_wait_peer") - greeter_rep = await tb.get_result("greeter") - claimer_rep = await tb.get_result("claimer") - assert greeter_rep.claimer_public_key == tb.claimer_privkey.public_key - assert claimer_rep.greeter_public_key == tb.greeter_privkey.public_key - - # Step 2 - if leader == "greeter": - await tb.send_order("greeter", "2a_get_hashed_nonce") - await tb.send_order("claimer", "2a_send_hashed_nonce") - else: - await tb.send_order("claimer", "2a_send_hashed_nonce") - await tb.send_order("greeter", "2a_get_hashed_nonce") - - greeter_rep = await tb.get_result("greeter") - assert greeter_rep.claimer_hashed_nonce == HashDigest.from_data(b"") - await tb.send_order("greeter", "2b_send_nonce") - - claimer_rep = await tb.get_result("claimer") - assert claimer_rep.greeter_nonce == b"" - await tb.send_order("claimer", "2b_send_nonce") - - greeter_rep = await tb.get_result("greeter") - assert greeter_rep.claimer_nonce == b"" - claimer_rep = await tb.get_result("claimer") - assert isinstance(claimer_rep, Invite2bClaimerSendNonceRepOk) - - # Step 3a - if leader == "greeter": - await tb.send_order("greeter", "3a_wait_peer_trust") - await tb.send_order("claimer", "3a_signify_trust") - else: - await tb.send_order("claimer", "3a_signify_trust") - await tb.send_order("greeter", "3a_wait_peer_trust") - greeter_rep = await tb.get_result("greeter") - assert isinstance(greeter_rep, Invite3aGreeterWaitPeerTrustRepOk) - claimer_rep = await tb.get_result("claimer") - assert isinstance(claimer_rep, Invite3aClaimerSignifyTrustRepOk) - - # Step 3b - if leader == "greeter": - await tb.send_order("greeter", "3b_signify_trust") - await tb.send_order("claimer", "3b_wait_peer_trust") - else: - await tb.send_order("claimer", "3b_wait_peer_trust") - await tb.send_order("greeter", "3b_signify_trust") - greeter_rep = await tb.get_result("greeter") - assert isinstance(greeter_rep, Invite3bGreeterSignifyTrustRepOk) - claimer_rep = await tb.get_result("claimer") - assert isinstance(claimer_rep, Invite3bClaimerWaitPeerTrustRepOk) - - # Step 4 - if leader == "greeter": - await tb.send_order("greeter", "4_communicate", b"hello from greeter") - await tb.send_order("claimer", "4_communicate", b"hello from claimer") - else: - await tb.send_order("claimer", "4_communicate", b"hello from claimer") - await tb.send_order("greeter", "4_communicate", b"hello from greeter") - greeter_rep = await tb.get_result("greeter") - assert greeter_rep.payload == b"hello from claimer" - claimer_rep = await tb.get_result("claimer") - assert claimer_rep.payload == b"hello from greeter" - - if leader == "greeter": - await tb.send_order("greeter", "4_communicate", b"") - await tb.send_order("claimer", "4_communicate", b"") - else: - await tb.send_order("claimer", "4_communicate", b"") - await tb.send_order("greeter", "4_communicate", b"") - greeter_rep = await tb.get_result("greeter") - assert greeter_rep.payload == b"" - claimer_rep = await tb.get_result("claimer") - assert claimer_rep.payload == b"" - - -@pytest.mark.trio -async def test_conduit_exchange_reset(exchange_testbed): - tb = exchange_testbed - - def possibilities(): - for leader in ("claimer", "greeter"): - for reset_step in ("2a", "2b_greeter", "2b_claimer", "3a", "3b", "4", "4'"): - for reset_actor in ("claimer", "greeter"): - print( - f"=== leader={leader}, reset_step={reset_step}, reset_actor={reset_actor} ===" - ) - yield leader, reset_step, reset_actor - - async def _send_twin_orders(leader, greeter_order, claimer_order): - # Greeter and claimer each send an order at the same time - if leader == "greeter": - await tb.send_order("greeter", greeter_order) - await tb.send_order("claimer", claimer_order) - else: - assert leader == "claimer" - await tb.send_order("claimer", claimer_order) - await tb.send_order("greeter", greeter_order) - - async def _reset_during_twin_orders(leader, reset_actor, greeter_order, claimer_order): - # Greeter and claimer were supposed to each send an order at the - # same time, but one of them send a reset instead - - if reset_actor == "greeter": - greeter_order = "1_wait_peer" - else: - assert reset_actor == "claimer" - claimer_order = "1_wait_peer" - - if leader == "greeter": - await tb.send_order("greeter", greeter_order) - await tb.send_order("claimer", claimer_order) - else: - assert leader == "claimer" - await tb.send_order("claimer", claimer_order) - await tb.send_order("greeter", greeter_order) - - if reset_actor == "greeter": - claimer_rep = await tb.get_result("claimer") - assert isinstance(claimer_rep, INVALID_STATES_TYPES) - await tb.send_order("claimer", "1_wait_peer") - else: - assert reset_actor == "claimer" - greeter_rep = await tb.get_result("greeter") - assert isinstance(greeter_rep, INVALID_STATES_TYPES) - await tb.send_order("greeter", "1_wait_peer") - - await tb.assert_ok_rep("greeter") - await tb.assert_ok_rep("claimer") - - async def _reset_during_peer_order_alone(reset_actor): - await tb.send_order(reset_actor, "1_wait_peer") - if reset_actor == "greeter": - claimer_rep = await tb.get_result("claimer") - assert isinstance(claimer_rep, INVALID_STATES_TYPES) - await tb.send_order("claimer", "1_wait_peer") - else: - assert reset_actor == "claimer" - greeter_rep = await tb.get_result("greeter") - assert isinstance(greeter_rep, INVALID_STATES_TYPES) - await tb.send_order("greeter", "1_wait_peer") - - await tb.assert_ok_rep("greeter") - await tb.assert_ok_rep("claimer") - - # Step 1 - await tb.send_order("greeter", "1_wait_peer") - await tb.send_order("claimer", "1_wait_peer") - await tb.assert_ok_rep("greeter") - await tb.assert_ok_rep("claimer") - - for leader, reset_step, reset_actor in possibilities(): - # Step 2a - if reset_step == "2a": - await _reset_during_twin_orders( - leader, - reset_actor, - greeter_order="2a_get_hashed_nonce", - claimer_order="2a_send_hashed_nonce", - ) - continue - else: - await _send_twin_orders( - leader, greeter_order="2a_get_hashed_nonce", claimer_order="2a_send_hashed_nonce" - ) - await tb.assert_ok_rep("greeter") - - # Step 2b - if reset_step == "2b_greeter": - await _reset_during_peer_order_alone("greeter") - continue - else: - await tb.send_order("greeter", "2b_send_nonce") - await tb.assert_ok_rep("claimer") - if reset_step == "2b_claimer": - await _reset_during_peer_order_alone("claimer") - continue - else: - await tb.send_order("claimer", "2b_send_nonce") - await tb.assert_ok_rep("claimer") - await tb.assert_ok_rep("greeter") - - # Step 3a - if reset_step == "3a": - await _reset_during_twin_orders( - leader, - reset_actor, - greeter_order="3a_wait_peer_trust", - claimer_order="3a_signify_trust", - ) - continue - else: - await _send_twin_orders( - leader, greeter_order="3a_wait_peer_trust", claimer_order="3a_signify_trust" - ) - await tb.assert_ok_rep("claimer") - await tb.assert_ok_rep("greeter") - - # Step 3b - if reset_step == "3b": - await _reset_during_twin_orders( - leader, - reset_actor, - greeter_order="3b_signify_trust", - claimer_order="3b_wait_peer_trust", - ) - continue - else: - await _send_twin_orders( - leader, greeter_order="3b_signify_trust", claimer_order="3b_wait_peer_trust" - ) - await tb.assert_ok_rep("claimer") - await tb.assert_ok_rep("greeter") - - # Claimer reset while starting step 4 - if reset_step == "4": - await _reset_during_twin_orders( - leader, reset_actor, greeter_order="4_communicate", claimer_order="4_communicate" - ) - continue - else: - await _send_twin_orders( - leader, greeter_order="4_communicate", claimer_order="4_communicate" - ) - await tb.assert_ok_rep("claimer") - await tb.assert_ok_rep("greeter") - - # Step 4 can be run an arbitrary number of times - if reset_step == "4'": - await _reset_during_twin_orders( - leader, reset_actor, greeter_order="4_communicate", claimer_order="4_communicate" - ) - continue - else: - # No reset occurred at all... `reset_step` must be wrong ! - assert False, reset_step - - -@pytest.mark.trio -async def test_change_connection_during_exchange( - backend_asgi_app, backend_invited_ws_factory, backend_authenticated_ws_factory, exchange_testbed -): - tb = exchange_testbed - - # Step 1 - await tb.send_order("greeter", "1_wait_peer") - await tb.send_order("claimer", "1_wait_peer") - await tb.assert_ok_rep("greeter") - await tb.assert_ok_rep("claimer") - - # Step 2 - await tb.send_order("greeter", "2a_get_hashed_nonce") - - # Change claimer sock, don't close previous sock - async with backend_invited_ws_factory( - backend_asgi_app, - organization_id=tb.organization_id, - invitation_type=tb.invitation.TYPE, - token=tb.invitation.token, - ) as claimer_ws: - tb.claimer_ws = claimer_ws - await tb.send_order("claimer", "2a_send_hashed_nonce") - - await tb.assert_ok_rep("greeter") - - # Change greeter sock, don't close previous sock - async with backend_authenticated_ws_factory(backend_asgi_app, tb.greeter) as greeter_ws: - tb.greeter_ws = greeter_ws - await tb.send_order("greeter", "2b_send_nonce") - - await tb.assert_ok_rep("claimer") - await tb.send_order("claimer", "2b_send_nonce") - - await tb.assert_ok_rep("greeter") - await tb.assert_ok_rep("claimer") - - # Close previously used claimer&greeter socks and continue with new ones - # Step 3a - async with backend_invited_ws_factory( - backend_asgi_app, - organization_id=tb.organization_id, - invitation_type=tb.invitation.TYPE, - token=tb.invitation.token, - ) as claimer_ws: - tb.claimer_ws = claimer_ws - async with backend_authenticated_ws_factory(backend_asgi_app, tb.greeter) as greeter_ws: - tb.greeter_ws = greeter_ws - - await tb.send_order("greeter", "3a_wait_peer_trust") - await tb.send_order("claimer", "3a_signify_trust") - await tb.assert_ok_rep("claimer") - await tb.assert_ok_rep("greeter") - - # Don't need to finish the exchange diff --git a/server/tests/backend/invite/test_invite_status_changed_event.py b/server/tests/backend/invite/test_invite_status_changed_event.py deleted file mode 100644 index acc5dbf6884..00000000000 --- a/server/tests/backend/invite/test_invite_status_changed_event.py +++ /dev/null @@ -1,76 +0,0 @@ -# Parsec Cloud (https://parsec.cloud) Copyright (c) BUSL-1.1 2016-present Scille SAS -from __future__ import annotations - -import pytest - -from parsec._parsec import ( - DateTime, - authenticated_cmds, -) -from parsec.api.protocol import ( - InvitationStatus, - InvitationType, - InviteListItemDevice, - InviteListRepOk, -) -from tests.backend.common import ( - events_listen, - invite_list, -) -from tests.common import real_clock_timeout - - -@pytest.mark.trio -async def test_greeter_event_on_claimer_join_and_leave( - alice, backend_asgi_app, alice_ws, backend_invited_ws_factory -): - invitation = await backend_asgi_app.backend.invite.new_for_device( - organization_id=alice.organization_id, - greeter_user_id=alice.user_id, - created_on=DateTime(2000, 1, 2), - ) - - async with events_listen(alice_ws) as alice_events_listener: - async with backend_invited_ws_factory( - backend_asgi_app, - organization_id=alice.organization_id, - invitation_type=InvitationType.DEVICE, - token=invitation.token, - ): - # Claimer is ready, this should be notified to greeter - - async with real_clock_timeout(): - rep = await alice_events_listener.do_recv() - # PostgreSQL event dispatching might be lagging behind and return - # the IDLE event first - if rep.unit.invitation_status == InvitationStatus.IDLE: - rep = await alice_events_listener.do_recv() - assert rep == authenticated_cmds.latest.events_listen.RepOk( - authenticated_cmds.latest.events_listen.APIEventInviteStatusChanged( - invitation.token, InvitationStatus.READY - ) - ) - - rep = await invite_list(alice_ws) - assert rep == InviteListRepOk( - [ - InviteListItemDevice( - invitation.token, DateTime(2000, 1, 2), InvitationStatus.READY - ) - ] - ) - - # Now claimer has left, greeter should be again notified - async with real_clock_timeout(): - rep = await alice_events_listener.do_recv() - - assert rep == authenticated_cmds.latest.events_listen.RepOk( - authenticated_cmds.latest.events_listen.APIEventInviteStatusChanged( - invitation.token, InvitationStatus.IDLE - ) - ) - - rep = await invite_list(alice_ws) - assert rep == InviteListRepOk( - [InviteListItemDevice(invitation.token, DateTime(2000, 1, 2), InvitationStatus.IDLE)] - ) diff --git a/server/tests/backend/invite/test_manage_invitations.py b/server/tests/backend/invite/test_manage_invitations.py deleted file mode 100644 index 8db90848b83..00000000000 --- a/server/tests/backend/invite/test_manage_invitations.py +++ /dev/null @@ -1,566 +0,0 @@ -# Parsec Cloud (https://parsec.cloud) Copyright (c) BUSL-1.1 2016-present Scille SAS -from __future__ import annotations - -from unittest.mock import ANY - -import pytest - -from parsec._parsec import ( - BackendEventInviteStatusChanged, - DateTime, - InvitationType, -) -from parsec.api.protocol import ( - HandshakeBadIdentity, - InvitationDeletedReason, - InvitationEmailSentStatus, - InvitationStatus, - InviteDeleteRepAlreadyDeleted, - InviteDeleteRepNotFound, - InviteDeleteRepOk, - InviteInfoRepOk, - InviteListItemDevice, - InviteListItemUser, - InviteListRepOk, - InviteNewRepAlreadyMember, - InviteNewRepNotAllowed, - InviteNewRepNotAvailable, - InviteNewRepOk, - UserOrDeviceDevice, - UserOrDeviceUser, - UserProfile, -) -from tests.backend.common import ( - invite_delete, - invite_info, - invite_list, - invite_new, -) -from tests.common import customize_fixtures, freeze_time, real_clock_timeout - - -@pytest.mark.trio -async def test_user_new_invitation_and_info( - backend_asgi_app, alice, alice_ws, alice2_ws, backend_invited_ws_factory -): - # Provide other unrelated invitations that should stay unchanged - with backend_asgi_app.backend.event_bus.listen() as spy: - other_device_invitation = await backend_asgi_app.backend.invite.new_for_device( - organization_id=alice.organization_id, - greeter_user_id=alice.user_id, - created_on=DateTime(2000, 1, 2), - ) - other_user_invitation = await backend_asgi_app.backend.invite.new_for_user( - organization_id=alice.organization_id, - greeter_user_id=alice.user_id, - claimer_email="other@example.com", - created_on=DateTime(2000, 1, 3), - ) - await spy.wait_multiple_with_timeout( - [BackendEventInviteStatusChanged, BackendEventInviteStatusChanged] - ) - - await apiv2v3_events_subscribe(alice2_ws) - - with freeze_time("2000-01-04"): - rep = await invite_new(alice_ws, type=InvitationType.USER, claimer_email="zack@example.com") - - assert isinstance(rep, InviteNewRepOk) - token = rep.token - - async with real_clock_timeout(): - rep = await apiv2v3_events_listen_wait(alice2_ws) - assert rep == ApiV2V3_EventsListenRepOk( - ApiV2V3_APIEventInviteStatusChanged(token, InvitationStatus.IDLE) - ) - - rep = await invite_list(alice_ws) - - assert rep == InviteListRepOk( - [ - InviteListItemDevice( - other_device_invitation.token, DateTime(2000, 1, 2), InvitationStatus.IDLE - ), - InviteListItemUser( - other_user_invitation.token, - DateTime(2000, 1, 3), - "other@example.com", - InvitationStatus.IDLE, - ), - InviteListItemUser( - token, DateTime(2000, 1, 4), "zack@example.com", InvitationStatus.IDLE - ), - ] - ) - - async with backend_invited_ws_factory( - backend_asgi_app, - organization_id=alice.organization_id, - invitation_type=InvitationType.USER, - token=token, - ) as invited_ws: - rep = await invite_info(invited_ws) - assert rep == InviteInfoRepOk( - UserOrDeviceUser("zack@example.com", alice.user_id, alice.human_handle) - ) - - -@pytest.mark.trio -async def test_device_new_invitation_and_info( - backend_asgi_app, alice, alice_ws, alice2_ws, backend_invited_ws_factory -): - # Provide other unrelated invitations that should stay unchanged - with backend_asgi_app.backend.event_bus.listen() as spy: - other_user_invitation = await backend_asgi_app.backend.invite.new_for_user( - organization_id=alice.organization_id, - greeter_user_id=alice.user_id, - claimer_email="other@example.com", - created_on=DateTime(2000, 1, 2), - ) - await spy.wait_multiple_with_timeout([BackendEventInviteStatusChanged]) - - await apiv2v3_events_subscribe(alice2_ws) - - with freeze_time("2000-01-03"): - rep = await invite_new(alice_ws, type=InvitationType.DEVICE) - assert isinstance(rep, InviteNewRepOk) - token = rep.token - - async with real_clock_timeout(): - rep = await apiv2v3_events_listen_wait(alice2_ws) - assert rep == ApiV2V3_EventsListenRepOk( - ApiV2V3_APIEventInviteStatusChanged(token, InvitationStatus.IDLE) - ) - - rep = await invite_list(alice_ws) - assert rep == InviteListRepOk( - [ - InviteListItemUser( - other_user_invitation.token, - DateTime(2000, 1, 2), - "other@example.com", - InvitationStatus.IDLE, - ), - InviteListItemDevice(token, DateTime(2000, 1, 3), InvitationStatus.IDLE), - ] - ) - - async with backend_invited_ws_factory( - backend_asgi_app, - organization_id=alice.organization_id, - invitation_type=InvitationType.DEVICE, - token=token, - ) as invited_ws: - rep = await invite_info(invited_ws) - assert rep == InviteInfoRepOk(UserOrDeviceDevice(alice.user_id, alice.human_handle)) - - -@pytest.mark.trio -async def test_invite_with_send_mail(alice, alice_ws, email_letterbox): - base_url = ( - "https" if alice.organization_addr.use_ssl else "http" - ) + f"://127.0.0.1:{alice.organization_addr.port}" - - # User invitation - rep = await invite_new( - alice_ws, type=InvitationType.USER, claimer_email="zack@example.com", send_email=True - ) - - assert isinstance(rep, InviteNewRepOk) - assert rep.email_sent == InvitationEmailSentStatus.SUCCESS - token = rep.token - email = await email_letterbox.get_next_with_timeout() - assert email == ("zack@example.com", ANY) - - # Lame checks on the sent email - body = str(email[1]) - assert body.startswith("Content-Type: multipart/alternative;") - assert 'Content-Type: text/plain; charset="us-ascii"' in body - assert 'Content-Type: text/html; charset="us-ascii"' in body - assert "Subject: [Parsec] Alicey McAliceFace invited you to CoolOrg" in body - assert "From: Parsec " in body - assert "To: zack@example.com" in body - assert "Reply-To: =?utf-8?q?Alicey_McAliceFace?= " in body - assert token.hex in body - assert ( - "You have received an invitation from Alicey McAliceFace to join the CoolOrg organization on Parsec." - in body - ) - # Check urls in the email - assert ( - f'Claim invitation' - in body - ) - assert ( - f'Parsec Logo' - in body - ) - - # Device invitation - rep = await invite_new(alice_ws, type=InvitationType.DEVICE, send_email=True) - assert isinstance(rep, InviteNewRepOk) - assert rep.email_sent == InvitationEmailSentStatus.SUCCESS - token = rep.token - email = await email_letterbox.get_next_with_timeout() - assert email == (alice.human_handle.email, ANY) - - # Lame checks on the sent email - body = str(email[1]) - assert body.startswith("Content-Type: multipart/alternative;") - assert 'Content-Type: text/plain; charset="us-ascii"' in body - assert 'Content-Type: text/html; charset="us-ascii"' in body - assert "Subject: [Parsec] New device invitation to CoolOrg" in body - assert "From: Parsec " in body - assert "To: alice@example.com" in body - assert "Reply-To: " not in body - assert token.hex in body - assert ( - "You have received an invitation to add a new device to the CoolOrg organization on Parsec." - in body - ) - # Check urls in the email - assert ( - f'Claim invitation' - in body - ) - assert ( - f'Parsec Logo' - in body - ) - - -@pytest.mark.trio -async def test_invite_with_mail_error(alice, alice_ws, monkeypatch): - async def _mocked_send_email(email_config, to_addr, message): - from parsec.backend.invite import InvitationEmailConfigError - - raise InvitationEmailConfigError(Exception()) - - monkeypatch.setattr("parsec.backend.invite.send_email", _mocked_send_email) - - # User invitation - rep = await invite_new( - alice_ws, type=InvitationType.USER, claimer_email="zack@example.com", send_email=True - ) - - assert isinstance(rep, InviteNewRepOk) - assert rep.email_sent == InvitationEmailSentStatus.NOT_AVAILABLE - - # Device invitation - rep = await invite_new(alice_ws, type=InvitationType.DEVICE, send_email=True) - assert isinstance(rep, InviteNewRepOk) - assert rep.email_sent == InvitationEmailSentStatus.NOT_AVAILABLE - - async def _mocked_send_email(email_config, to_addr, message): - from parsec.backend.invite import InvitationEmailRecipientError - - raise InvitationEmailRecipientError(Exception()) - - monkeypatch.setattr("parsec.backend.invite.send_email", _mocked_send_email) - - # User invitation - rep = await invite_new( - alice_ws, type=InvitationType.USER, claimer_email="zack@example.com", send_email=True - ) - - assert isinstance(rep, InviteNewRepOk) - assert rep.email_sent == InvitationEmailSentStatus.BAD_RECIPIENT - - # Device invitation - rep = await invite_new(alice_ws, type=InvitationType.DEVICE, send_email=True) - - assert isinstance(rep, InviteNewRepOk) - assert rep.email_sent == InvitationEmailSentStatus.BAD_RECIPIENT - - -@pytest.mark.trio -@customize_fixtures(alice_has_human_handle=False) -async def test_invite_with_send_mail_and_greeter_without_human_handle( - alice, alice_ws, email_letterbox -): - # User invitation - rep = await invite_new( - alice_ws, type=InvitationType.USER, claimer_email="zack@example.com", send_email=True - ) - - assert isinstance(rep, InviteNewRepOk) - assert rep.email_sent == InvitationEmailSentStatus.SUCCESS - token = rep.token - email = await email_letterbox.get_next_with_timeout() - assert email == ("zack@example.com", ANY) - - # Lame checks on the sent email - body = str(email[1]) - assert body.startswith("Content-Type: multipart/alternative;") - assert 'Content-Type: text/plain; charset="us-ascii"' in body - assert 'Content-Type: text/html; charset="us-ascii"' in body - assert "Subject: [Parsec] alice invited you to CoolOrg" in body - assert "From: Parsec " in body - assert "To: zack@example.com" in body - assert "Reply-To: " not in body - assert token.hex in body - - # Device invitation (not available given no human_handle means no email !) - rep = await invite_new(alice_ws, type=InvitationType.DEVICE, send_email=True) - assert isinstance(rep, InviteNewRepNotAvailable) - - -@pytest.mark.trio -@customize_fixtures(alice_profile=UserProfile.OUTSIDER) -async def test_invite_new_limited_for_outsider(alice_ws): - rep = await invite_new(alice_ws, type=InvitationType.DEVICE) - assert isinstance(rep, InviteNewRepOk) - - # Only ADMIN can invite new users - rep = await invite_new(alice_ws, type=InvitationType.USER, claimer_email="zack@example.com") - assert isinstance(rep, InviteNewRepNotAllowed) - - -@pytest.mark.trio -@customize_fixtures(alice_profile=UserProfile.STANDARD) -async def test_invite_new_limited_for_standard(alice_ws): - # Outsider can only invite new devices - rep = await invite_new(alice_ws, type=InvitationType.DEVICE) - assert isinstance(rep, InviteNewRepOk) - - # Only ADMIN can invite new users - rep = await invite_new(alice_ws, type=InvitationType.USER, claimer_email="zack@example.com") - assert isinstance(rep, InviteNewRepNotAllowed) - - -@pytest.mark.trio -async def test_delete_invitation( - alice, backend_asgi_app, alice_ws, alice2_ws, backend_invited_ws_factory -): - with backend_asgi_app.backend.event_bus.listen() as spy: - invitation = await backend_asgi_app.backend.invite.new_for_device( - organization_id=alice.organization_id, - greeter_user_id=alice.user_id, - created_on=DateTime(2000, 1, 2), - ) - await spy.wait_multiple_with_timeout([BackendEventInviteStatusChanged]) - - await apiv2v3_events_subscribe(alice2_ws) - - with backend_asgi_app.backend.event_bus.listen() as spy: - with freeze_time("2000-01-03"): - rep = await invite_delete( - alice_ws, token=invitation.token, reason=InvitationDeletedReason.CANCELLED - ) - - assert isinstance(rep, InviteDeleteRepOk) - await spy.wait_with_timeout(BackendEventInviteStatusChanged) - - async with real_clock_timeout(): - rep = await apiv2v3_events_listen_wait(alice2_ws) - assert rep == ApiV2V3_EventsListenRepOk( - ApiV2V3_APIEventInviteStatusChanged(invitation.token, InvitationStatus.DELETED) - ) - - # Deleted invitation are no longer visible - rep = await invite_list(alice_ws) - assert isinstance(rep, InviteListRepOk) - - # Can no longer use this invitation to connect to the backend - with pytest.raises(HandshakeBadIdentity): - async with backend_invited_ws_factory( - backend_asgi_app, - organization_id=alice.organization_id, - invitation_type=InvitationType.DEVICE, - token=invitation.token, - ): - pass - - -@pytest.mark.trio -@pytest.mark.parametrize("is_revoked", [True, False]) -async def test_new_user_invitation_on_already_member( - backend_data_binder, alice, bob, alice_ws, is_revoked -): - if is_revoked: - await backend_data_binder.bind_revocation(user_id=bob.user_id, certifier=alice) - - rep = await invite_new(alice_ws, type=InvitationType.USER, claimer_email=bob.human_handle.email) - if not is_revoked: - assert isinstance(rep, InviteNewRepAlreadyMember) - else: - assert isinstance(rep, InviteNewRepOk) - - -@pytest.mark.trio -async def test_idempotent_new_user_invitation(alice, backend, alice_ws): - claimer_email = "zack@example.com" - - invitation = await backend.invite.new_for_user( - organization_id=alice.organization_id, - claimer_email=claimer_email, - greeter_user_id=alice.user_id, - created_on=DateTime(2000, 1, 2), - ) - - # Calling invite_new should be idempotent - with freeze_time("2000-01-03"): - rep = await invite_new(alice_ws, type=InvitationType.USER, claimer_email=claimer_email) - assert isinstance(rep, InviteNewRepOk) - assert rep.token == invitation.token - - rep = await invite_new(alice_ws, type=InvitationType.USER, claimer_email=claimer_email) - assert isinstance(rep, InviteNewRepOk) - assert rep.token == invitation.token - - rep = await invite_list(alice_ws) - assert rep == InviteListRepOk( - [ - InviteListItemUser( - invitation.token, DateTime(2000, 1, 2), claimer_email, InvitationStatus.IDLE - ) - ] - ) - - -@pytest.mark.trio -async def test_idempotent_new_device_invitation(alice, backend, alice_ws): - invitation = await backend.invite.new_for_device( - organization_id=alice.organization_id, - greeter_user_id=alice.user_id, - created_on=DateTime(2000, 1, 2), - ) - - # Calling invite_new should be idempotent - with freeze_time("2000-01-03"): - rep = await invite_new(alice_ws, type=InvitationType.DEVICE) - assert isinstance(rep, InviteNewRepOk) - assert rep.token == invitation.token - - rep = await invite_new(alice_ws, type=InvitationType.DEVICE) - assert isinstance(rep, InviteNewRepOk) - assert rep.token == invitation.token - - rep = await invite_list(alice_ws) - assert rep == InviteListRepOk( - [InviteListItemDevice(invitation.token, DateTime(2000, 1, 2), InvitationStatus.IDLE)] - ) - - -@pytest.mark.trio -async def test_new_user_invitation_after_invitation_deleted(alice, backend, alice_ws): - claimer_email = "zack@example.com" - invitation = await backend.invite.new_for_user( - organization_id=alice.organization_id, - claimer_email=claimer_email, - greeter_user_id=alice.user_id, - created_on=DateTime(2000, 1, 2), - ) - await backend.invite.delete( - organization_id=alice.organization_id, - greeter=invitation.greeter_user_id, - token=invitation.token, - on=DateTime(2000, 1, 3), - reason=InvitationDeletedReason.FINISHED, - ) - - # Deleted invitation shouldn't prevent from creating a new one - - with freeze_time("2000-01-04"): - rep = await invite_new(alice_ws, type=InvitationType.USER, claimer_email=claimer_email) - assert isinstance(rep, InviteNewRepOk) - new_token = rep.token - assert new_token != invitation.token - - rep = await invite_list(alice_ws) - assert rep == InviteListRepOk( - [InviteListItemUser(new_token, DateTime(2000, 1, 4), claimer_email, InvitationStatus.IDLE)] - ) - - -@pytest.mark.trio -async def test_new_device_invitation_after_invitation_deleted(alice, backend, alice_ws): - invitation = await backend.invite.new_for_device( - organization_id=alice.organization_id, - greeter_user_id=alice.user_id, - created_on=DateTime(2000, 1, 2), - ) - await backend.invite.delete( - organization_id=alice.organization_id, - greeter=invitation.greeter_user_id, - token=invitation.token, - on=DateTime(2000, 1, 3), - reason=InvitationDeletedReason.FINISHED, - ) - - # Deleted invitation shouldn't prevent from creating a new one - - with freeze_time("2000-01-04"): - rep = await invite_new(alice_ws, type=InvitationType.DEVICE) - assert isinstance(rep, InviteNewRepOk) - new_token = rep.token - assert new_token != invitation.token - - rep = await invite_list(alice_ws) - assert rep == InviteListRepOk( - [InviteListItemDevice(new_token, DateTime(2000, 1, 4), InvitationStatus.IDLE)] - ) - - -@pytest.mark.trio -async def test_delete_already_deleted_invitation(alice, backend, alice_ws): - invitation = await backend.invite.new_for_device( - organization_id=alice.organization_id, greeter_user_id=alice.user_id - ) - - await backend.invite.delete( - organization_id=alice.organization_id, - greeter=alice.user_id, - token=invitation.token, - on=DateTime(2000, 1, 2), - reason=InvitationDeletedReason.ROTTEN, - ) - - rep = await invite_delete( - alice_ws, token=invitation.token, reason=InvitationDeletedReason.CANCELLED - ) - - assert isinstance(rep, InviteDeleteRepAlreadyDeleted) - - -@pytest.mark.trio -async def test_invitation_deletion_isolated_between_users(bob, backend, alice_ws): - invitation = await backend.invite.new_for_device( - organization_id=bob.organization_id, greeter_user_id=bob.user_id - ) - - rep = await invite_list(alice_ws) - assert isinstance(rep, InviteListRepOk) - - rep = await invite_delete( - alice_ws, token=invitation.token, reason=InvitationDeletedReason.CANCELLED - ) - - assert isinstance(rep, InviteDeleteRepNotFound) - - -@pytest.mark.trio -async def test_invitation_deletion_isolated_between_organizations( - alice, other_alice, backend_asgi_app, backend_invited_ws_factory, alice_ws -): - invitation = await backend_asgi_app.backend.invite.new_for_device( - organization_id=other_alice.organization_id, greeter_user_id=other_alice.user_id - ) - - rep = await invite_list(alice_ws) - assert isinstance(rep, InviteListRepOk) - - rep = await invite_delete( - alice_ws, token=invitation.token, reason=InvitationDeletedReason.CANCELLED - ) - - assert isinstance(rep, InviteDeleteRepNotFound) - - with pytest.raises(HandshakeBadIdentity): - async with backend_invited_ws_factory( - backend_asgi_app, - organization_id=alice.organization_id, - invitation_type=InvitationType.DEVICE, - token=invitation.token, - ): - pass diff --git a/server/tests/backend/migrations/0001_data.sql b/server/tests/backend/migrations/0001_data.sql deleted file mode 100644 index 9ef60c1d57a..00000000000 --- a/server/tests/backend/migrations/0001_data.sql +++ /dev/null @@ -1,295 +0,0 @@ --- Parsec Cloud (https://parsec.cloud) Copyright (c) BUSL-1.1 2016-present Scille SAS - --- Create organizations - -INSERT INTO organization(_id, organization_id, bootstrap_token, root_verify_key, expiration_date) VALUES ( - 10, - 'CoolOrg', - '123456', - E'\\x1234567890abcdef', - '2021-07-29 10:13:41.699846+00' -); -INSERT INTO organization(_id, organization_id, bootstrap_token, root_verify_key, expiration_date) VALUES ( - 11, - 'NotBootstrappedOrganization', - 'abcdef', - NULL, - NULL -); - --- Create Alice - -INSERT INTO user_( - _id, organization, user_id, is_admin, user_certificate, user_certifier, created_on, - revoked_on, revoked_user_certificate, revoked_user_certifier -) VALUES ( - 20, - 10, - 'alice', - TRUE, - E'\\x1234567890abcdef', - NULL, - '2021-07-29 10:13:41.699846+00', - NULL, - NULL, - NULL -); -INSERT INTO device(_id, organization, user_, device_id, device_certificate, device_certifier, created_on) VALUES ( - 30, - 10, - 20, - 'alice@pc1', - E'\\x1234567890abcdef', - NULL, - '2021-07-29 10:13:41.699846+00' -); -INSERT INTO device_invitation(_id, organization, device_id, creator, created_on) VALUES ( - 50, - 10, - 'alice@pc2', - 30, - '2021-07-29 10:13:41.699846+00' -); -INSERT INTO device(_id, organization, user_, device_id, device_certificate, device_certifier, created_on) VALUES ( - 31, - 10, - 20, - 'alice@pc2', - E'\\x1234567890abcdef', - 30, - '2021-07-29 10:13:41.699846+00' -); - --- Create Bob - -INSERT INTO user_invitation(_id, organization, user_id, creator, created_on) VALUES ( - 40, - 10, - 'bob', - 31, - '2021-07-29 10:13:41.699846+00' -); -INSERT INTO user_( - _id, organization, user_id, is_admin, user_certificate, user_certifier, created_on, - revoked_on, revoked_user_certificate, revoked_user_certifier -) VALUES ( - 21, - 10, - 'bob', - FALSE, - E'\\x1234567890abcdef', - 31, - '2021-07-29 10:13:41.699846+00', - '2021-07-29 10:13:41.699846+00', - E'\\x1234567890abcdef', - 30 -); -INSERT INTO device(_id, organization, user_, device_id, device_certificate, device_certifier, created_on) VALUES ( - 32, - 10, - 21, - 'bob@pc1', - E'\\x1234567890abcdef', - 31, - '2021-07-29 10:13:41.699846+00' -); - --- Messages - -INSERT INTO message(_id, organization, recipient, timestamp, index, sender, body) VALUES ( - 100, - 10, - 21, - '2021-07-29 10:13:41.699846+00', - 1, - 31, - E'\\x1234567890abcdef' -); - --- Realms - -INSERT INTO realm(_id, organization, realm_id, encryption_revision, maintenance_started_by, maintenance_started_on, maintenance_type) VALUES ( - 200, - 10, - 'd8602e9b-85ff-4f1e-bdc2-786571470b3d', - 1, - 30, - '2021-07-29 10:13:41.699846+00', - 'REENCRYPTION' -); - -INSERT INTO realm(_id, organization, realm_id, encryption_revision, maintenance_started_by, maintenance_started_on, maintenance_type) VALUES ( - 201, - 10, - '48a8d192-b221-4e74-8acb-7b57c310894e', - 1, - 31, - '2021-07-29 10:13:41.699846+00', - 'GARBAGE_COLLECTION' -); - -INSERT INTO realm(_id, organization, realm_id, encryption_revision, maintenance_started_by, maintenance_started_on, maintenance_type) VALUES ( - 202, - 10, - 'ab1f22f3-1e5c-495c-a53d-2767e4775561', - 1, - NULL, - NULL, - NULL -); - -INSERT INTO realm_user_role(_id, realm, user_, role, certificate, certified_by, certified_on) VALUES ( - 300, - 200, - 20, - 'OWNER', - E'\\x1234567890abcdef', - 30, - '2021-07-29 10:13:41.699846+00' -); - -INSERT INTO realm_user_role(_id, realm, user_, role, certificate, certified_by, certified_on) VALUES ( - 301, - 201, - 20, - 'OWNER', - E'\\x1234567890abcdef', - 30, - '2021-07-29 10:13:41.699846+00' -); - -INSERT INTO realm_user_role(_id, realm, user_, role, certificate, certified_by, certified_on) VALUES ( - 302, - 202, - 20, - 'OWNER', - E'\\x1234567890abcdef', - 30, - '2021-07-29 10:13:41.699846+00' -); - -INSERT INTO realm_user_role(_id, realm, user_, role, certificate, certified_by, certified_on) VALUES ( - 303, - 200, - 21, - 'MANAGER', - E'\\x1234567890abcdef', - 30, - '2021-07-29 10:13:41.699846+00' -); - -INSERT INTO realm_user_role(_id, realm, user_, role, certificate, certified_by, certified_on) VALUES ( - 304, - 200, - 21, - 'CONTRIBUTOR', - E'\\x1234567890abcdef', - 30, - '2021-07-29 10:13:41.699846+00' -); - -INSERT INTO realm_user_role(_id, realm, user_, role, certificate, certified_by, certified_on) VALUES ( - 305, - 200, - 21, - 'READER', - E'\\x1234567890abcdef', - 30, - '2021-07-29 10:13:41.699846+00' -); - -INSERT INTO realm_user_role(_id, realm, user_, role, certificate, certified_by, certified_on) VALUES ( - 306, - 200, - 21, - NULL, - E'\\x1234567890abcdef', - 30, - '2021-07-29 10:13:41.699846+00' -); - --- Vlob - -INSERT INTO vlob_encryption_revision(_id, realm, encryption_revision) VALUES ( - 400, - 200, - 1 -); - -INSERT INTO vlob_atom(_id, organization, vlob_encryption_revision, vlob_id, version, blob, size, author, created_on, deleted_on) VALUES ( - 500, - 10, - 400, - '3458c7ec-626b-41da-b9eb-cf8164baa487', - 1, - E'\\x1234567890abcdef', - 24, - 31, - '2021-07-29 10:13:41.699846+00', - NULL -); - -INSERT INTO vlob_atom(_id, organization, vlob_encryption_revision, vlob_id, version, blob, size, author, created_on, deleted_on) VALUES ( - 501, - 10, - 400, - 'e0f85b59-78f8-4188-acfc-2e2a51360f4c', - 1, - E'\\x1234567890abcdef', - 24, - 31, - '2021-07-29 10:13:41.699846+00', - '2021-07-29 10:13:41.699846+00' -); - -INSERT INTO realm_vlob_update(_id, realm, index, vlob_atom) VALUES ( - 600, - 200, - 1, - 500 -); - -INSERT INTO realm_vlob_update(_id, realm, index, vlob_atom) VALUES ( - 601, - 200, - 2, - 501 -); - --- Block - -INSERT INTO block(_id, organization, block_id, realm, author, size, created_on, deleted_on) VALUES ( - 700, - 10, - '11d2bdea-5b1d-41ad-9da0-7570a3d666d6', - 200, - 31, - 24, - '2021-07-29 10:13:41.699846+00', - NULL -); - -INSERT INTO block(_id, organization, block_id, realm, author, size, created_on, deleted_on) VALUES ( - 701, - 10, - 'ce461256-b21b-41d1-a589-5a41fb0821cf', - 200, - 31, - 24, - '2021-07-29 10:13:41.699846+00', - '2021-07-29 10:13:41.699846+00' -); - -INSERT INTO block_data(_id, organization_id, block_id, data) VALUES ( - 800, - 'CoolOrg', - '11d2bdea-5b1d-41ad-9da0-7570a3d666d6', - E'\\x1234567890abcdef' -); - -INSERT INTO block_data(_id, organization_id, block_id, data) VALUES ( - 801, - 'CoolOrg', - 'ce461256-b21b-41d1-a589-5a41fb0821cf', - E'\\x1234567890abcdef' -); diff --git a/server/tests/backend/migrations/0003_data.sql b/server/tests/backend/migrations/0003_data.sql deleted file mode 100644 index bcea483f25f..00000000000 --- a/server/tests/backend/migrations/0003_data.sql +++ /dev/null @@ -1,37 +0,0 @@ --- Parsec Cloud (https://parsec.cloud) Copyright (c) BUSL-1.1 2016-present Scille SAS - -INSERT INTO human(_id, organization, email, label) VALUES ( - 3000, - 10, - 'James T. Kirk', - 'kirk@starfleet.com' -); - -INSERT INTO user_( - _id, organization, user_id, human, is_admin, user_certificate, user_certifier, - created_on, revoked_on, revoked_user_certificate, revoked_user_certifier -) VALUES ( - 3001, - 10, - 'kirk', - 3000, - TRUE, - E'\\x1234567890abcdef', - 31, - '2021-07-29 10:13:41.699846+00', - NULL, - NULL, - NULL -); - -INSERT INTO device( - _id, organization, user_, device_id, device_certificate, device_certifier, created_on -) VALUES ( - 3002, - 10, - 3001, - 'kirk@enterprise', - E'\\x1234567890abcdef', - 31, - '2021-07-29 10:13:41.699846+00' -); diff --git a/server/tests/backend/migrations/0004_data.sql b/server/tests/backend/migrations/0004_data.sql deleted file mode 100644 index d3b2a1f7536..00000000000 --- a/server/tests/backend/migrations/0004_data.sql +++ /dev/null @@ -1,199 +0,0 @@ --- Parsec Cloud (https://parsec.cloud) Copyright (c) BUSL-1.1 2016-present Scille SAS - -INSERT INTO invitation( - _id, organization, token, type, greeter, claimer_email, created_on, deleted_on, - deleted_reason, conduit_state, conduit_greeter_payload, conduit_claimer_payload -) VALUES ( - 4000, - 10, - '3458c7ec-626b-41da-b9eb-cf8164baa487', - 'USER', - 20, - 'adam@example.com', - '2021-07-29 10:13:41.699846+00', - NULL, - NULL, - '1_WAIT_PEERS', - NULL, - NULL -); - -INSERT INTO invitation( - _id, organization, token, type, greeter, claimer_email, created_on, deleted_on, - deleted_reason, conduit_state, conduit_greeter_payload, conduit_claimer_payload -) VALUES ( - 4001, - 10, - 'e0f85b59-78f8-4188-acfc-2e2a51360f4c', - 'DEVICE', - 20, - NULL, - '2021-07-29 10:13:41.699846+00', - '2021-07-29 10:13:41.699846+00', - 'FINISHED', - '1_WAIT_PEERS', - NULL, - NULL -); - -INSERT INTO invitation( - _id, organization, token, type, greeter, claimer_email, created_on, deleted_on, - deleted_reason, conduit_state, conduit_greeter_payload, conduit_claimer_payload -) VALUES ( - 4002, - 10, - '1027c25b-912d-4a5c-9407-294df9f1b51c', - 'DEVICE', - 20, - NULL, - '2021-07-29 10:13:41.699846+00', - '2021-07-29 10:13:41.699846+00', - 'CANCELLED', - '1_WAIT_PEERS', - NULL, - NULL -); - -INSERT INTO invitation( - _id, organization, token, type, greeter, claimer_email, created_on, deleted_on, - deleted_reason, conduit_state, conduit_greeter_payload, conduit_claimer_payload -) VALUES ( - 4003, - 10, - 'dd0c73c5-16fe-4eb4-8c45-f8a77cb90288', - 'DEVICE', - 20, - NULL, - '2021-07-29 10:13:41.699846+00', - '2021-07-29 10:13:41.699846+00', - 'ROTTEN', - '1_WAIT_PEERS', - E'\\x1234567890abcdef', - E'\\x1234567890abcdef' -); - -INSERT INTO invitation( - _id, organization, token, type, greeter, claimer_email, created_on, deleted_on, - deleted_reason, conduit_state, conduit_greeter_payload, conduit_claimer_payload -) VALUES ( - 4004, - 10, - 'd28a9224-a791-40cb-a44d-42d1cab3e4c0', - 'DEVICE', - 20, - NULL, - '2021-07-29 10:13:41.699846+00', - NULL, - NULL, - '1_WAIT_PEERS', - E'\\x1234567890abcdef', - E'\\x1234567890abcdef' -); - -INSERT INTO invitation( - _id, organization, token, type, greeter, claimer_email, created_on, deleted_on, - deleted_reason, conduit_state, conduit_greeter_payload, conduit_claimer_payload -) VALUES ( - 4005, - 10, - '2decca1f-edfb-4f45-b58a-ff499d0bc5fe', - 'DEVICE', - 20, - NULL, - '2021-07-29 10:13:41.699846+00', - NULL, - NULL, - '2_1_CLAIMER_HASHED_NONCE', - NULL, - E'\\x1234567890abcdef' -); - -INSERT INTO invitation( - _id, organization, token, type, greeter, claimer_email, created_on, deleted_on, - deleted_reason, conduit_state, conduit_greeter_payload, conduit_claimer_payload -) VALUES ( - 4006, - 10, - 'a2b9aa15-7e8b-4144-bfa9-01a9ddde08c6', - 'DEVICE', - 20, - NULL, - '2021-07-29 10:13:41.699846+00', - NULL, - NULL, - '2_2_GREETER_NONCE', - E'\\x1234567890abcdef', - NULL -); - -INSERT INTO invitation( - _id, organization, token, type, greeter, claimer_email, created_on, deleted_on, - deleted_reason, conduit_state, conduit_greeter_payload, conduit_claimer_payload -) VALUES ( - 4007, - 10, - 'ebd5b84e-59f7-4b41-84bd-aa07af174811', - 'DEVICE', - 20, - NULL, - '2021-07-29 10:13:41.699846+00', - NULL, - NULL, - '2_3_CLAIMER_NONCE', - E'\\x1234567890abcdef', - E'\\x1234567890abcdef' -); - -INSERT INTO invitation( - _id, organization, token, type, greeter, claimer_email, created_on, deleted_on, - deleted_reason, conduit_state, conduit_greeter_payload, conduit_claimer_payload -) VALUES ( - 4008, - 10, - 'c6c503ab-a724-4bc0-8f4a-ff80e40e16fa', - 'DEVICE', - 20, - NULL, - '2021-07-29 10:13:41.699846+00', - NULL, - NULL, - '3_1_CLAIMER_TRUST', - E'\\x1234567890abcdef', - NULL -); - -INSERT INTO invitation( - _id, organization, token, type, greeter, claimer_email, created_on, deleted_on, - deleted_reason, conduit_state, conduit_greeter_payload, conduit_claimer_payload -) VALUES ( - 4009, - 10, - '94fe8ad1-ea10-40ab-9b72-f3e13a615d40', - 'DEVICE', - 20, - NULL, - '2021-07-29 10:13:41.699846+00', - NULL, - NULL, - '3_2_GREETER_TRUST', - NULL, - E'\\x1234567890abcdef' -); - -INSERT INTO invitation( - _id, organization, token, type, greeter, claimer_email, created_on, deleted_on, - deleted_reason, conduit_state, conduit_greeter_payload, conduit_claimer_payload -) VALUES ( - 4010, - 10, - '788f0f67-cff9-406d-a8ea-bb59f71fdc15', - 'USER', - 20, - 'adam@example.com', - '2021-07-29 10:13:41.699846+00', - NULL, - NULL, - '4_COMMUNICATE', - E'\\x1234567890abcdef', - E'\\x1234567890abcdef' -); diff --git a/server/tests/backend/migrations/0005_data.sql b/server/tests/backend/migrations/0005_data.sql deleted file mode 100644 index 5a569edbf28..00000000000 --- a/server/tests/backend/migrations/0005_data.sql +++ /dev/null @@ -1,67 +0,0 @@ --- Parsec Cloud (https://parsec.cloud) Copyright (c) BUSL-1.1 2016-present Scille SAS - -INSERT INTO user_( - _id, organization, user_id, profile, user_certificate, redacted_user_certificate, - user_certifier, created_on, revoked_on, revoked_user_certificate, revoked_user_certifier -) VALUES ( - 5000, - 10, - 'riri', - 'ADMIN', - E'\\x1234567890abcdef', - E'\\x1234567890abcdef', - 31, - '2021-07-29 10:13:41.699846+00', - NULL, - NULL, - NULL -); - -INSERT INTO user_( - _id, organization, user_id, profile, user_certificate, redacted_user_certificate, - user_certifier, created_on, revoked_on, revoked_user_certificate, revoked_user_certifier -) VALUES ( - 5001, - 10, - 'fifi', - 'STANDARD', - E'\\x1234567890abcdef', - E'\\x1234567890abcdef', - 31, - '2021-07-29 10:13:41.699846+00', - NULL, - NULL, - NULL -); - -INSERT INTO user_( - _id, organization, user_id, profile, user_certificate, redacted_user_certificate, - user_certifier, created_on, revoked_on, revoked_user_certificate, revoked_user_certifier -) VALUES ( - 5002, - 10, - 'loulou', - 'OUTSIDER', - E'\\x1234567890abcdef', - E'\\x1234567890abcdef', - 31, - '2021-07-29 10:13:41.699846+00', - NULL, - NULL, - NULL -); - -INSERT INTO device( - _id, organization, user_, device_id, device_label, device_certificate, - redacted_device_certificate, device_certifier, created_on -) VALUES ( - 5010, - 10, - 5000, - 'riri@pc1', - 'PC 1', - E'\\x1234567890abcdef', - E'\\x1234567890abcdef', - 31, - '2021-07-29 10:13:41.699846+00' -); diff --git a/server/tests/backend/migrations/0006_data.sql b/server/tests/backend/migrations/0006_data.sql deleted file mode 100644 index 1c13266d069..00000000000 --- a/server/tests/backend/migrations/0006_data.sql +++ /dev/null @@ -1,23 +0,0 @@ --- Parsec Cloud (https://parsec.cloud) Copyright (c) BUSL-1.1 2016-present Scille SAS - -INSERT INTO organization( - _id, organization_id, bootstrap_token, root_verify_key, expiration_date, user_profile_outsider_allowed -) VALUES ( - 6000, - 'Org6000', - '123456', - E'\\x1234567890abcdef', - '2021-07-29 10:13:41.699846+00', - TRUE -); - -INSERT INTO organization( - _id, organization_id, bootstrap_token, root_verify_key, expiration_date, user_profile_outsider_allowed -) VALUES ( - 6001, - 'Org6001', - '123456', - E'\\x1234567890abcdef', - '2021-07-29 10:13:41.699846+00', - FALSE -); diff --git a/server/tests/backend/migrations/0007_data.sql b/server/tests/backend/migrations/0007_data.sql deleted file mode 100644 index c59a1204f0f..00000000000 --- a/server/tests/backend/migrations/0007_data.sql +++ /dev/null @@ -1,27 +0,0 @@ --- Parsec Cloud (https://parsec.cloud) Copyright (c) BUSL-1.1 2016-present Scille SAS - -INSERT INTO organization( - _id, organization_id, bootstrap_token, root_verify_key, - expiration_date, user_profile_outsider_allowed, active_users_limit -) VALUES ( - 7000, - 'Org7000', - '123456', - E'\\x1234567890abcdef', - '2021-07-29 10:13:41.699846+00', - TRUE, - 42 -); - -INSERT INTO organization( - _id, organization_id, bootstrap_token, root_verify_key, - expiration_date, user_profile_outsider_allowed, active_users_limit -) VALUES ( - 7001, - 'Org7001', - '123456', - E'\\x1234567890abcdef', - '2021-07-29 10:13:41.699846+00', - TRUE, - 0 -); diff --git a/server/tests/backend/migrations/0008_data.sql b/server/tests/backend/migrations/0008_data.sql deleted file mode 100644 index 3eff77de26f..00000000000 --- a/server/tests/backend/migrations/0008_data.sql +++ /dev/null @@ -1,69 +0,0 @@ --- Parsec Cloud (https://parsec.cloud) Copyright (c) BUSL-1.1 2016-present Scille SAS - -INSERT INTO organization( - _id, organization_id, bootstrap_token, root_verify_key, - user_profile_outsider_allowed, active_users_limit, is_expired, - _expired_on, _bootstrapped_on, _created_on -) VALUES ( - 8000, - 'Org8000', - '123456', - E'\\x1234567890abcdef', - TRUE, - NULL, - TRUE, - '2021-07-29 10:13:41.699846+00', - '2021-07-29 10:13:41.699846+00', - '2021-07-29 10:13:41.699846+00' -); - -INSERT INTO organization( - _id, organization_id, bootstrap_token, root_verify_key, - user_profile_outsider_allowed, active_users_limit, is_expired, - _expired_on, _bootstrapped_on, _created_on -) VALUES ( - 8001, - 'Org8001', - '123456', - E'\\x1234567890abcdef', - TRUE, - NULL, - FALSE, - NULL, - NULL, - '2021-07-29 10:13:41.699846+00' -); - -INSERT INTO organization( - _id, organization_id, bootstrap_token, root_verify_key, - user_profile_outsider_allowed, active_users_limit, is_expired, - _expired_on, _bootstrapped_on, _created_on -) VALUES ( - 8002, - 'Org8002', - '123456', - E'\\x1234567890abcdef', - TRUE, - NULL, - FALSE, - NULL, - '2021-07-29 10:13:41.699846+00', - '2021-07-29 10:13:41.699846+00' -); - -INSERT INTO organization( - _id, organization_id, bootstrap_token, root_verify_key, - user_profile_outsider_allowed, active_users_limit, is_expired, - _expired_on, _bootstrapped_on, _created_on -) VALUES ( - 8003, - 'Org8003', - '123456', - E'\\x1234567890abcdef', - TRUE, - NULL, - TRUE, - '2021-07-29 10:13:41.699846+00', - NULL, - '2021-07-29 10:13:41.699846+00' -); diff --git a/server/tests/backend/migrations/0009_data.sql b/server/tests/backend/migrations/0009_data.sql deleted file mode 100644 index 65d76af6578..00000000000 --- a/server/tests/backend/migrations/0009_data.sql +++ /dev/null @@ -1,53 +0,0 @@ --- Parsec Cloud (https://parsec.cloud) Copyright (c) BUSL-1.1 2016-present Scille SAS - --- Updating a role now updates the realm_user_change table --- Here, alice@pc1 (user 20 device 30) grants contributor access to bob (user 21) to the realm 200 - -INSERT INTO realm_user_role(_id, realm, user_, role, certificate, certified_by, certified_on) VALUES ( - 307, - 200, - 21, - 'CONTRIBUTOR', - E'\\x1234567890abcdef', - 30, - '2021-10-29 11:58:08.841265+02' -); - -INSERT INTO realm_user_change(_id, realm, user_, last_role_change, last_vlob_update) VALUES ( - 2, - 200, - 20, - '2021-10-29 11:58:08.841265+02', - NULL -); - --- Writing a vlob now updates the realm_user_change table --- Here, bob@pc1 (user 21 device 32) writes a vlob to the realm 200 - -INSERT INTO vlob_atom(_id, organization, vlob_encryption_revision, vlob_id, version, blob, size, author, created_on, deleted_on) VALUES ( - 502, - 10, - 400, - '3458c7ec-626b-41da-b9eb-cf8164baa487', - 2, - E'\\x1234567890abcdef', - 24, - 32, - '2021-10-29 11:30:16.791954+02', - NULL -); - -INSERT INTO realm_vlob_update(_id, realm, index, vlob_atom) VALUES ( - 602, - 200, - 3, - 502 -); - -INSERT INTO realm_user_change(_id, realm, user_, last_role_change, last_vlob_update) VALUES ( - 1, - 200, - 21, - NULL, - '2021-10-29 11:30:16.791954+02' -); diff --git a/server/tests/backend/migrations/0010_data.sql b/server/tests/backend/migrations/0010_data.sql deleted file mode 100644 index 8cdd4350ada..00000000000 --- a/server/tests/backend/migrations/0010_data.sql +++ /dev/null @@ -1,62 +0,0 @@ --- Parsec Cloud (https://parsec.cloud) Copyright (c) BUSL-1.1 2016-present Scille SAS - --- pki_certificate_submitted -INSERT INTO pki_enrollment(_id, organization, enrollment_id, submitter_der_x509_certificate, submitter_der_x509_certificate_sha1, submit_payload_signature, submit_payload, enrollment_state, submitted_on) VALUES( - 6, - 10, - '6ecd8c99-4036-403d-bf84-cf8400f67836', - E'', - E'', - E'', - E'', - 'SUBMITTED', - '2021-10-29 11:30:16.791954+02' -); - --- pki_certificate_cancelled -INSERT INTO pki_enrollment(_id, organization, enrollment_id, submitter_der_x509_certificate, submitter_der_x509_certificate_sha1, submit_payload_signature, submit_payload, enrollment_state, submitted_on, info_cancelled.cancelled_on) VALUES( - 7, - 10, - '6ecd8c99-4036-403d-bf84-cf8400f67837', - E'', - E'', - E'', - E'', - 'CANCELLED', - '2021-10-29 11:30:16.791954+02', - '2021-10-29 11:30:16.791954+02' -); - --- pki_certificate_rejected -INSERT INTO pki_enrollment(_id, organization, enrollment_id, submitter_der_x509_certificate, submitter_der_x509_certificate_sha1, submit_payload_signature, submit_payload, enrollment_state, submitted_on, info_rejected.rejected_on) VALUES( - 8, - 10, - '6ecd8c99-4036-403d-bf84-cf8400f67838', - E'', - E'', - E'', - E'', - 'REJECTED', - '2021-10-29 11:30:16.791954+02', - '2021-10-29 11:30:16.791954+02' -); - - --- pki_certificate_accepted -INSERT INTO pki_enrollment(_id, organization, enrollment_id, submitter_der_x509_certificate, submitter_der_x509_certificate_sha1, submit_payload_signature, submit_payload, enrollment_state, submitted_on, info_accepted.accepted_on, info_accepted.accepter_der_x509_certificate, info_accepted.accept_payload_signature, info_accepted.accept_payload) VALUES( - 9, - 10, - '6ecd8c99-4036-403d-bf84-cf8400f67839', - E'', - E'', - E'', - E'', - 'ACCEPTED', - '2021-10-29 11:30:16.791954+02', - '2021-10-29 11:30:16.791954+02', - E'', - E'', - E'' -); - --- TODO add test accepted and accepter diff --git a/server/tests/backend/migrations/0011_data.sql b/server/tests/backend/migrations/0011_data.sql deleted file mode 100644 index 89e523dde6d..00000000000 --- a/server/tests/backend/migrations/0011_data.sql +++ /dev/null @@ -1,56 +0,0 @@ --- Parsec Cloud (https://parsec.cloud) Copyright (c) BUSL-1.1 2016-present Scille SAS - - -INSERT INTO organization( - _id, organization_id, bootstrap_token, root_verify_key, - user_profile_outsider_allowed, active_users_limit, is_expired, - _expired_on, _bootstrapped_on, _created_on, - sequester_authority_certificate, sequester_authority_verify_key_der -) VALUES ( - 11000, - 'Org11000', - '123456', - E'\\x1234567890abcdef', - TRUE, - NULL, - TRUE, - '2021-07-29 10:13:41.699846+00', - '2021-07-29 10:13:41.699846+00', - '2021-07-29 10:13:41.699846+00', - E'\\x1234567890abcdef', - E'\\x1234567890abcdef' -); - - -INSERT INTO sequester_service( - _id, service_id, organization, service_certificate, service_label, created_on -) VALUES ( - 11000, - '6ecd8c99-4036-403d-bf84-cf8400f67836', - 11000, - E'\\x1234567890abcdef', - 'service', - '2021-07-29 10:13:41.699846+00' -); - - -INSERT INTO sequester_service( - _id, service_id, organization, service_certificate, service_label, created_on, disabled_on -) VALUES ( - 11001, - '6ecd8c99-4036-403d-bf84-cf8400f67831', - 11000, - E'\\x1234567890abcdef', - 'disabled_service', - '2021-07-29 10:13:41.699846+00', - '2021-07-29 10:13:41.699846+00' -); - -INSERT INTO sequester_service_vlob_atom( - _id, vlob_atom, service, blob -) VALUES ( - 11000, - 502, - 11000, - E'\\x1234567890abcdef' -); diff --git a/server/tests/backend/migrations/0012_data.sql b/server/tests/backend/migrations/0012_data.sql deleted file mode 100644 index d0d9f1ae740..00000000000 --- a/server/tests/backend/migrations/0012_data.sql +++ /dev/null @@ -1,38 +0,0 @@ --- Parsec Cloud (https://parsec.cloud) Copyright (c) BUSL-1.1 2016-present Scille SAS - - -INSERT INTO sequester_service( - _id, service_id, organization, service_certificate, service_label, created_on, service_type -) VALUES ( - 12000, - '6ecd8c99-4036-403d-bf84-cf8400f67864', - 11000, - E'\\x1234567890abcdef', - 'service', - '2021-07-29 10:13:41.699846+00', - 'STORAGE' -); - - -INSERT INTO sequester_service( - _id, service_id, organization, service_certificate, service_label, created_on, disabled_on, webhook_url, service_type -) VALUES ( - 12001, - '6ecd8c99-4036-403d-bf84-cf8400f67814', - 11000, - E'\\x1234567890abcdef', - 'disabled_service', - '2021-07-29 10:13:41.699846+00', - '2021-07-29 10:13:41.699846+00', - 'http://somewhere.lost', - 'WEBHOOK' -); - -INSERT INTO sequester_service_vlob_atom( - _id, vlob_atom, service, blob -) VALUES ( - 12000, - 502, - 12001, - E'\\x1234567890abcdef' -); diff --git a/server/tests/backend/migrations/__init__.py b/server/tests/backend/migrations/__init__.py deleted file mode 100644 index 05e02a3b569..00000000000 --- a/server/tests/backend/migrations/__init__.py +++ /dev/null @@ -1 +0,0 @@ -# Parsec Cloud (https://parsec.cloud) Copyright (c) BUSL-1.1 2016-present Scille SAS diff --git a/server/tests/backend/migrations/test_migrations.py b/server/tests/backend/migrations/test_migrations.py deleted file mode 100644 index e5b66da3952..00000000000 --- a/server/tests/backend/migrations/test_migrations.py +++ /dev/null @@ -1,166 +0,0 @@ -# Parsec Cloud (https://parsec.cloud) Copyright (c) BUSL-1.1 2016-present Scille SAS -from __future__ import annotations - -import importlib.resources -import re -from contextlib import contextmanager - -import pytest -import trio -import trio_asyncio -import triopg -from asyncpg.cluster import TempCluster - -from parsec.backend.postgresql import migrations as migrations_module -from parsec.backend.postgresql.handler import ( - MIGRATION_FILE_PATTERN, - apply_migrations, - retrieve_migrations, -) - - -@contextmanager -def pg_cluster_factory(data_dir): - # TempCluster internally creates a new asyncio loop (to test the DB - # connection) which is not compatible with trio_asyncio, so we must - # run this from a non-trio context - pg_cluster = TempCluster(data_dir_parent=data_dir) - # Make the default superuser name stable. - print("Creating PostgreSQL cluster...", end="", flush=True) - pg_cluster.init(username="postgres") - pg_cluster.trust_local_connections() - pg_cluster.start(port="dynamic", server_settings={}) - print("Done !") - - try: - yield pg_cluster - - finally: - if pg_cluster.get_status() == "running": - pg_cluster.stop() - if pg_cluster.get_status() != "not-initialized": - pg_cluster.destroy() - - -def url_from_pg_cluster(pg_cluster): - spec = pg_cluster.get_connection_spec() - return f"postgresql://postgres@localhost:{spec['port']}/postgres" - - -def collect_data_patches(): - from tests.backend import migrations as migrations_test_module - - patches = {} - for file in importlib.resources.files(migrations_test_module).iterdir(): - file_name = file.name - match = re.search(MIGRATION_FILE_PATTERN, file_name) - if match: - idx = int(match.group("id")) - assert idx not in patches # Sanity check - sql = importlib.resources.files(migrations_test_module).joinpath(file_name).read_text() - patches[idx] = sql - - return patches - - -@pytest.mark.postgresql -def test_migrations(tmp_path): - # Use our own cluster to isolate this test from the others (given - # otherwise it failure would most likely provoke inconsistent DB schemas - # errors in any subsequent tests) - with pg_cluster_factory(tmp_path) as pg_cluster: - postgresql_url = url_from_pg_cluster(pg_cluster) - pg_dump = pg_cluster._find_pg_binary("pg_dump") - psql = pg_cluster._find_pg_binary("psql") - - # Need to run trio loop after pg_cluster is ready - trio_asyncio.run(_trio_test_migration, postgresql_url, pg_dump, psql) - - -async def _trio_test_migration(postgresql_url, pg_dump, psql): - async def reset_db_schema(): - # Now drop the database clean... - async with triopg.connect(postgresql_url) as conn: - rep = await conn.execute("DROP SCHEMA public CASCADE") - assert rep == "DROP SCHEMA" - rep = await conn.execute("CREATE SCHEMA public") - assert rep == "CREATE SCHEMA" - - async def dump_schema() -> str: - cmd = [pg_dump, "--schema=public", "--schema-only", postgresql_url] - print(f"run: {' '.join(cmd)}") - process = await trio.run_process(cmd, capture_stdout=True) - return process.stdout.decode() - - async def dump_data() -> str: - cmd = [pg_dump, "--schema=public", "--data-only", postgresql_url] - print(f"run: {' '.join(cmd)}") - process = await trio.run_process(cmd, capture_stdout=True) - return process.stdout.decode() - - async def restore_data(data: str) -> None: - cmd = [psql, "--no-psqlrc", "--set", "ON_ERROR_STOP=on", postgresql_url] - print(f"run: {' '.join(cmd)}") - process = await trio.run_process(cmd, capture_stdout=True, stdin=data.encode()) - return process.stdout.decode() - - # The schema may start with an automatic comment, something like: - # `COMMENT ON SCHEMA public IS 'standard public schema';` - # So we clean everything first - await reset_db_schema() - - # Now we apply migrations one after another and also insert the provided data - migrations = retrieve_migrations() - patches = collect_data_patches() - async with triopg.connect(postgresql_url) as conn: - for migration in migrations: - result = await apply_migrations(postgresql_url, [migration], dry_run=False) - assert not result.error - patch_sql = patches.get(migration.idx, "") - if patch_sql: - await conn.execute(patch_sql) - - # Save the final state of the database schema - schema_from_migrations = await dump_schema() - - # Also save the final data - data_from_migrations = await dump_data() - - # Now drop the database clean... - await reset_db_schema() - - # ...and reinitialize it with the current datamodel script - sql = importlib.resources.files(migrations_module).joinpath("datamodel.sql").read_text() - async with triopg.connect(postgresql_url) as conn: - await conn.execute(sql) - - # The resulting database schema should be equivalent to what we add after - # all the migrations - schema_from_init = await dump_schema() - assert schema_from_init == schema_from_migrations - - # Final check is to re-import all the data, this requires some cooking first: - - # Remove the migration related data given their should already be in the database - data_from_migrations = re.sub( - r"COPY public.migration[^\\]*\\.", "", data_from_migrations, flags=re.DOTALL - ) - # Modify the foreign key constraint between `user_` and `device` to be - # checked at the end of the transaction. This is needed because `user_` - # table is entirely populated before switching to `device`. So the - # constraint should break as soon as an `user_` row references a device_id. - data_from_migrations = f""" -ALTER TABLE public."user_" ALTER CONSTRAINT fk_user_device_user_certifier DEFERRABLE; -ALTER TABLE public."user_" ALTER CONSTRAINT fk_user_device_revoked_user_certifier DEFERRABLE; - -BEGIN; - -SET CONSTRAINTS fk_user_device_user_certifier DEFERRED; -SET CONSTRAINTS fk_user_device_revoked_user_certifier DEFERRED; - -{data_from_migrations} - -COMMIT; -""" - - await restore_data(data_from_migrations) diff --git a/server/tests/backend/organization/__init__.py b/server/tests/backend/organization/__init__.py deleted file mode 100644 index 05e02a3b569..00000000000 --- a/server/tests/backend/organization/__init__.py +++ /dev/null @@ -1 +0,0 @@ -# Parsec Cloud (https://parsec.cloud) Copyright (c) BUSL-1.1 2016-present Scille SAS diff --git a/server/tests/backend/organization/test_bootstrap.py b/server/tests/backend/organization/test_bootstrap.py deleted file mode 100644 index ff37d12a09e..00000000000 --- a/server/tests/backend/organization/test_bootstrap.py +++ /dev/null @@ -1,71 +0,0 @@ -# Parsec Cloud (https://parsec.cloud) Copyright (c) BUSL-1.1 2016-present Scille SAS -from __future__ import annotations - -import pytest - -from parsec.api.protocol import ( - OrganizationBootstrapRepInvalidData, - OrganizationBootstrapRepOk, -) -from tests.backend.common import organization_bootstrap -from tests.common import ( - LocalDevice, - OrganizationFullData, - customize_fixtures, - local_device_to_backend_user, -) - - -@pytest.mark.trio -@customize_fixtures(backend_not_populated=True) -async def test_sequestered_organization_bootstrap( - coolorg: OrganizationFullData, - other_org: OrganizationFullData, - alice: LocalDevice, - anonymous_backend_ws, - backend, -): - # Create organization - org_token = "123456" - await backend.organization.create(id=coolorg.organization_id, bootstrap_token=org_token) - - backend_alice, backend_alice_first_device = local_device_to_backend_user(alice, coolorg) - - organization_bootstrap_args = { - "bootstrap_token": org_token, - "root_verify_key": coolorg.root_verify_key, - "user_certificate": backend_alice.user_certificate, - "device_certificate": backend_alice_first_device.device_certificate, - "redacted_user_certificate": backend_alice.redacted_user_certificate, - "redacted_device_certificate": backend_alice_first_device.redacted_device_certificate, - "sequester_authority_certificate": None, - } - - # Not redacted vs redacted errors in user/device certificates - for field, value in [ - ("redacted_user_certificate", backend_alice.user_certificate), - ("redacted_device_certificate", backend_alice_first_device.device_certificate), - ("user_certificate", backend_alice.redacted_user_certificate), - ("device_certificate", backend_alice_first_device.redacted_device_certificate), - ]: - rep = await organization_bootstrap( - anonymous_backend_ws, - check_rep=False, - **{ - **organization_bootstrap_args, - field: value, - }, - ) - assert isinstance(rep, OrganizationBootstrapRepInvalidData) - - # TODO: test timestamp not in the ballpark - # TODO: test timestamp mismatch between certificates - # TODO: test author mismatch between certificates - # TODO: test invalid profil in user certificate - # TODO: test redacted and non redacted user/device certificates mismatch - - # Finally valid bootstrap - rep = await organization_bootstrap( - anonymous_backend_ws, check_rep=False, **organization_bootstrap_args - ) - assert isinstance(rep, OrganizationBootstrapRepOk) diff --git a/server/tests/backend/organization/test_config.py b/server/tests/backend/organization/test_config.py deleted file mode 100644 index a4972afc994..00000000000 --- a/server/tests/backend/organization/test_config.py +++ /dev/null @@ -1,82 +0,0 @@ -# Parsec Cloud (https://parsec.cloud) Copyright (c) BUSL-1.1 2016-present Scille SAS -from __future__ import annotations - -import pytest - -from parsec._parsec import ActiveUsersLimit -from parsec.api.protocol import OrganizationConfigRepOk -from tests.backend.common import organization_config -from tests.common import OrganizationFullData, customize_fixtures, sequester_service_factory - - -@pytest.mark.trio -async def test_organization_config_ok(coolorg: OrganizationFullData, alice_ws, backend): - rep = await organization_config(alice_ws) - assert rep == OrganizationConfigRepOk( - user_profile_outsider_allowed=True, - active_users_limit=backend.config.organization_initial_active_users_limit, - sequester_authority_certificate=None, - sequester_services_certificates=None, - ) - - await backend.organization.update( - id=coolorg.organization_id, - user_profile_outsider_allowed=False, - active_users_limit=ActiveUsersLimit.LimitedTo(42), - ) - rep = await organization_config(alice_ws) - assert rep == OrganizationConfigRepOk( - user_profile_outsider_allowed=False, - active_users_limit=ActiveUsersLimit.LimitedTo(42), - sequester_authority_certificate=None, - sequester_services_certificates=None, - ) - - -@pytest.mark.trio -@customize_fixtures(coolorg_is_sequestered_organization=True) -async def test_organization_config_ok_sequestered_organization( - coolorg: OrganizationFullData, alice_ws, backend -): - rep = await organization_config(alice_ws) - assert rep == OrganizationConfigRepOk( - user_profile_outsider_allowed=True, - active_users_limit=backend.config.organization_initial_active_users_limit, - sequester_authority_certificate=coolorg.sequester_authority.certif, - sequester_services_certificates=[], - ) - - # Add new services - s1 = sequester_service_factory( - authority=coolorg.sequester_authority, label="Sequester service 1" - ) - await backend.sequester.create_service( - organization_id=coolorg.organization_id, service=s1.backend_service - ) - s2 = sequester_service_factory( - authority=coolorg.sequester_authority, label="Sequester service 2" - ) - await backend.sequester.create_service( - organization_id=coolorg.organization_id, service=s2.backend_service - ) - - rep = await organization_config(alice_ws) - assert rep == OrganizationConfigRepOk( - user_profile_outsider_allowed=True, - active_users_limit=backend.config.organization_initial_active_users_limit, - sequester_authority_certificate=coolorg.sequester_authority.certif, - sequester_services_certificates=[s1.certif, s2.certif], - ) - - # Delete a service, should no longer appear in config - await backend.sequester.disable_service( - organization_id=coolorg.organization_id, service_id=s1.service_id - ) - - rep = await organization_config(alice_ws) - assert rep == OrganizationConfigRepOk( - user_profile_outsider_allowed=True, - active_users_limit=backend.config.organization_initial_active_users_limit, - sequester_authority_certificate=coolorg.sequester_authority.certif, - sequester_services_certificates=[s2.certif], - ) diff --git a/server/tests/backend/organization/test_stats.py b/server/tests/backend/organization/test_stats.py deleted file mode 100644 index c5e2a60fd93..00000000000 --- a/server/tests/backend/organization/test_stats.py +++ /dev/null @@ -1,187 +0,0 @@ -# Parsec Cloud (https://parsec.cloud) Copyright (c) BUSL-1.1 2016-present Scille SAS -from __future__ import annotations - -import pytest - -from parsec._parsec import DateTime, UsersPerProfileDetailItem -from parsec.api.protocol import BlockID, OrganizationStatsRepOk, UserProfile, VlobID -from tests.backend.common import organization_stats -from tests.common import customize_fixtures - - -@pytest.mark.trio -async def test_organization_stats_data(alice_ws, realm, realm_factory, alice, backend): - stats = await organization_stats(alice_ws) - initial_metadata_size = stats.metadata_size - assert stats == OrganizationStatsRepOk( - data_size=0, - metadata_size=initial_metadata_size, - users=3, - active_users=3, - users_per_profile_detail=[ - UsersPerProfileDetailItem(profile=UserProfile.ADMIN, active=2, revoked=0), - UsersPerProfileDetailItem(profile=UserProfile.STANDARD, active=1, revoked=0), - UsersPerProfileDetailItem(profile=UserProfile.OUTSIDER, active=0, revoked=0), - ], - realms=4, - ) - - # Create new metadata - await backend.vlob.create( - organization_id=alice.organization_id, - author=alice.device_id, - realm_id=realm, - encryption_revision=1, - vlob_id=VlobID.new(), - timestamp=DateTime.now(), - blob=b"1234", - ) - stats = await organization_stats(alice_ws) - assert stats == OrganizationStatsRepOk( - data_size=0, - metadata_size=initial_metadata_size + 4, - users=3, - active_users=3, - users_per_profile_detail=[ - UsersPerProfileDetailItem(profile=UserProfile.ADMIN, active=2, revoked=0), - UsersPerProfileDetailItem(profile=UserProfile.STANDARD, active=1, revoked=0), - UsersPerProfileDetailItem(profile=UserProfile.OUTSIDER, active=0, revoked=0), - ], - realms=4, - ) - - # Create new data - await backend.block.create( - organization_id=alice.organization_id, - author=alice.device_id, - block_id=BlockID.new(), - realm_id=realm, - block=b"1234", - ) - stats = await organization_stats(alice_ws) - assert stats == OrganizationStatsRepOk( - data_size=4, - metadata_size=initial_metadata_size + 4, - users=3, - active_users=3, - users_per_profile_detail=[ - UsersPerProfileDetailItem(profile=UserProfile.ADMIN, active=2, revoked=0), - UsersPerProfileDetailItem(profile=UserProfile.STANDARD, active=1, revoked=0), - UsersPerProfileDetailItem(profile=UserProfile.OUTSIDER, active=0, revoked=0), - ], - realms=4, - ) - - # create new workspace - await realm_factory(backend, alice) - stats = await organization_stats(alice_ws) - assert stats == OrganizationStatsRepOk( - data_size=4, - metadata_size=initial_metadata_size + 4, - users=3, - active_users=3, - users_per_profile_detail=[ - UsersPerProfileDetailItem(profile=UserProfile.ADMIN, active=2, revoked=0), - UsersPerProfileDetailItem(profile=UserProfile.STANDARD, active=1, revoked=0), - UsersPerProfileDetailItem(profile=UserProfile.OUTSIDER, active=0, revoked=0), - ], - realms=5, - ) - - -@pytest.mark.trio -@customize_fixtures(backend_not_populated=True) -async def test_organization_stats_users( - backend_asgi_app, - backend_data_binder_factory, - organization_factory, - local_device_factory, - other_org, - backend_authenticated_ws_factory, -): - binder = backend_data_binder_factory(backend_asgi_app.backend) - org = organization_factory("IFD") - godfrey1 = local_device_factory( - org=org, - base_device_id="godfrey@d1", - base_human_handle="Godfrey Ho ", - profile=UserProfile.ADMIN, - ) - await binder.bind_organization(org, godfrey1, initial_user_manifest="not_synced") - - expected_stats = OrganizationStatsRepOk( - users=1, - active_users=1, - users_per_profile_detail=[ - UsersPerProfileDetailItem(profile=UserProfile.ADMIN, active=1, revoked=0), - UsersPerProfileDetailItem(profile=UserProfile.STANDARD, active=0, revoked=0), - UsersPerProfileDetailItem(profile=UserProfile.OUTSIDER, active=0, revoked=0), - ], - data_size=0, - metadata_size=0, - realms=0, - ) - - async with backend_authenticated_ws_factory(backend_asgi_app, godfrey1) as sock: - for profile in UserProfile.VALUES: - i = [ - i - for i, v in enumerate(expected_stats.users_per_profile_detail) - if v.profile == profile - ][0] - device = local_device_factory(profile=profile, org=org) - await binder.bind_device(device, certifier=godfrey1, initial_user_manifest="not_synced") - expected_stats = OrganizationStatsRepOk( - users=expected_stats.users + 1, - active_users=expected_stats.active_users + 1, - users_per_profile_detail=[ - UsersPerProfileDetailItem( - profile=v.profile, active=v.active + 1, revoked=v.revoked - ) - if i == j - else v - for j, v in enumerate(expected_stats.users_per_profile_detail) - ], - data_size=expected_stats.data_size, - metadata_size=expected_stats.metadata_size, - realms=expected_stats.realms, - ) - stats = await organization_stats(sock) - assert stats == expected_stats - - await binder.bind_revocation(device.user_id, certifier=godfrey1) - expected_stats = OrganizationStatsRepOk( - users=expected_stats.users, - active_users=expected_stats.active_users - 1, - users_per_profile_detail=[ - UsersPerProfileDetailItem( - profile=v.profile, active=v.active - 1, revoked=v.revoked + 1 - ) - if i == j - else v - for j, v in enumerate(expected_stats.users_per_profile_detail) - ], - data_size=expected_stats.data_size, - metadata_size=expected_stats.metadata_size, - realms=expected_stats.realms, - ) - stats = await organization_stats(sock) - assert stats == expected_stats - - # Also make sure stats are isolated between organizations - other_org_device = local_device_factory(org=other_org, profile=UserProfile.ADMIN) - await binder.bind_organization(other_org, other_org_device, initial_user_manifest="not_synced") - async with backend_authenticated_ws_factory(backend_asgi_app, other_org_device) as sock: - stats = await organization_stats(sock) - assert stats == OrganizationStatsRepOk( - users=1, - active_users=1, - users_per_profile_detail=[ - UsersPerProfileDetailItem(profile=UserProfile.ADMIN, active=1, revoked=0), - UsersPerProfileDetailItem(profile=UserProfile.STANDARD, active=0, revoked=0), - UsersPerProfileDetailItem(profile=UserProfile.OUTSIDER, active=0, revoked=0), - ], - data_size=0, - metadata_size=0, - realms=0, - ) diff --git a/server/tests/backend/realm/__init__.py b/server/tests/backend/realm/__init__.py deleted file mode 100644 index 05e02a3b569..00000000000 --- a/server/tests/backend/realm/__init__.py +++ /dev/null @@ -1 +0,0 @@ -# Parsec Cloud (https://parsec.cloud) Copyright (c) BUSL-1.1 2016-present Scille SAS diff --git a/server/tests/backend/realm/test_base.py b/server/tests/backend/realm/test_base.py deleted file mode 100644 index d7e911f1748..00000000000 --- a/server/tests/backend/realm/test_base.py +++ /dev/null @@ -1,43 +0,0 @@ -# Parsec Cloud (https://parsec.cloud) Copyright (c) BUSL-1.1 2016-present Scille SAS -from __future__ import annotations - -import pytest - -from parsec._parsec import DateTime -from parsec.api.data import RealmRoleCertificate -from parsec.api.protocol import RealmRole, RealmStatusRepNotAllowed, RealmStatusRepOk -from tests.backend.common import realm_status, realm_update_roles - - -@pytest.mark.trio -async def test_status(bob_ws, alice_ws, alice, bob, realm): - rep = await realm_status(alice_ws, realm) - assert rep == RealmStatusRepOk( - in_maintenance=False, - maintenance_type=None, - maintenance_started_by=None, - maintenance_started_on=None, - encryption_revision=1, - ) - # Cheap test on no access - rep = await realm_status(bob_ws, realm) - assert isinstance(rep, RealmStatusRepNotAllowed) - # Also test lesser role have access - await realm_update_roles( - alice_ws, - RealmRoleCertificate( - author=alice.device_id, - timestamp=DateTime.now(), - realm_id=realm, - user_id=bob.user_id, - role=RealmRole.READER, - ).dump_and_sign(alice.signing_key), - ) - rep = await realm_status(bob_ws, realm) - assert rep == RealmStatusRepOk( - in_maintenance=False, - maintenance_type=None, - maintenance_started_by=None, - maintenance_started_on=None, - encryption_revision=1, - ) diff --git a/server/tests/backend/realm/test_block.py b/server/tests/backend/realm/test_block.py deleted file mode 100644 index e1f2f7d9215..00000000000 --- a/server/tests/backend/realm/test_block.py +++ /dev/null @@ -1,480 +0,0 @@ -# Parsec Cloud (https://parsec.cloud) Copyright (c) BUSL-1.1 2016-present Scille SAS -from __future__ import annotations - -import msgpack -import pytest -import trio -from hypothesis import given -from hypothesis import strategies as st - -from parsec._parsec import ( - DateTime, - authenticated_cmds, -) -from parsec.api.protocol import ( - BlockCreateRepAlreadyExists, - BlockCreateRepInMaintenance, - BlockCreateRepNotAllowed, - BlockCreateRepOk, - BlockCreateRepTimeout, - BlockID, - BlockReadRepNotAllowed, - BlockReadRepNotFound, - BlockReadRepOk, - BlockReadRepTimeout, - RealmRole, - VlobID, -) -from parsec.backend.block import BlockStoreError -from parsec.backend.raid5_blockstore import ( - generate_checksum_chunk, - rebuild_block_from_chunks, - split_block_in_chunks, -) -from parsec.backend.realm import RealmGrantedRole -from tests.backend.common import block_create, block_read -from tests.common import customize_fixtures - -BLOCK_ID = BlockID.from_hex("00000000000000000000000000000001") -VLOB_ID = VlobID.from_hex("00000000000000000000000000000002") -BLOCK_DATA = b"Hodi ho !" - - -@pytest.fixture -async def block(backend, alice, realm): - block_id = BlockID.from_hex("0000000000000000000000000000000C") - - await backend.block.create( - organization_id=alice.organization_id, - author=alice.device_id, - block_id=block_id, - realm_id=realm, - block=BLOCK_DATA, - ) - return block_id - - -@pytest.mark.trio -async def test_block_read_check_access_rights( - backend, alice, bob, bob_ws, realm, block, next_timestamp -): - # User not part of the realm - rep = await block_read(bob_ws, block) - assert isinstance(rep, BlockReadRepNotAllowed) - - # User part of the realm with various role - for role in RealmRole.VALUES: - await backend.realm.update_roles( - alice.organization_id, - RealmGrantedRole( - certificate=b"", - realm_id=realm, - user_id=bob.user_id, - role=role, - granted_by=alice.device_id, - granted_on=next_timestamp(), - ), - ) - rep = await block_read(bob_ws, block) - assert rep == BlockReadRepOk(BLOCK_DATA) - - # Ensure user that used to be part of the realm have no longer access - await backend.realm.update_roles( - alice.organization_id, - RealmGrantedRole( - certificate=b"", - realm_id=realm, - user_id=bob.user_id, - role=None, - granted_by=alice.device_id, - granted_on=next_timestamp(), - ), - ) - rep = await block_read(bob_ws, block) - assert isinstance(rep, BlockReadRepNotAllowed) - - -@pytest.mark.trio -async def test_block_create_check_access_rights(backend, alice, bob, bob_ws, realm, next_timestamp): - block_id = BlockID.new() - - # User not part of the realm - rep = await block_create(bob_ws, block_id, realm, BLOCK_DATA, check_rep=False) - assert isinstance(rep, BlockCreateRepNotAllowed) - - # User part of the realm with various role - for role, access_granted in [ - (RealmRole.READER, False), - (RealmRole.CONTRIBUTOR, True), - (RealmRole.MANAGER, True), - (RealmRole.OWNER, True), - ]: - await backend.realm.update_roles( - alice.organization_id, - RealmGrantedRole( - certificate=b"", - realm_id=realm, - user_id=bob.user_id, - role=role, - granted_by=alice.device_id, - granted_on=next_timestamp(), - ), - ) - block_id = BlockID.new() - rep = await block_create(bob_ws, block_id, realm, BLOCK_DATA, check_rep=False) - if access_granted: - assert isinstance(rep, BlockCreateRepOk) - - else: - assert isinstance(rep, BlockCreateRepNotAllowed) - - # Ensure user that used to be part of the realm have no longer access - await backend.realm.update_roles( - alice.organization_id, - RealmGrantedRole( - certificate=b"", - realm_id=realm, - user_id=bob.user_id, - role=None, - granted_by=alice.device_id, - granted_on=next_timestamp(), - ), - ) - rep = await block_create(bob_ws, block_id, realm, BLOCK_DATA, check_rep=False) - assert isinstance(rep, BlockCreateRepNotAllowed) - - -@pytest.mark.trio -async def test_block_create_and_read(alice_ws, realm): - await block_create(alice_ws, BLOCK_ID, realm, BLOCK_DATA) - - rep = await block_read(alice_ws, BLOCK_ID) - assert rep == BlockReadRepOk(BLOCK_DATA) - - # Test not found as well - - dummy_id = BlockID.from_hex("00000000000000000000000000000002") - rep = await block_read(alice_ws, dummy_id) - assert isinstance(rep, BlockReadRepNotFound) - - -@pytest.mark.trio -@customize_fixtures(blockstore_mode="RAID1") -async def test_raid1_block_create_and_read(alice_ws, realm): - await test_block_create_and_read(alice_ws, realm) - - -@pytest.mark.trio -@customize_fixtures(blockstore_mode="RAID1") -async def test_raid1_block_create_partial_failure(caplog, alice_ws, backend, realm): - async def mock_create(organization_id, id, block): - await trio.sleep(0) - raise BlockStoreError() - - backend.blockstore.blockstores[1].create = mock_create - - rep = await block_create(alice_ws, BLOCK_ID, realm, BLOCK_DATA, check_rep=False) - assert isinstance(rep, BlockCreateRepTimeout) - - log = caplog.assert_occurred_once("[warning ] Block create error: A node have failed") - assert f"organization_id=CoolOrg" in log - assert f"block_id={BLOCK_ID.hex}" in log - - -@pytest.mark.trio -@customize_fixtures(blockstore_mode="RAID1_PARTIAL_CREATE_OK") -async def test_raid1_partial_create_ok_block_create_partial_failure(alice_ws, backend, realm): - async def mock_create(organization_id, id, block): - await trio.sleep(0) - raise BlockStoreError() - - backend.blockstore.blockstores[1].create = mock_create - - rep = await block_create(alice_ws, BLOCK_ID, realm, BLOCK_DATA) - assert isinstance(rep, BlockCreateRepOk) - - rep = await block_read(alice_ws, BLOCK_ID) - assert rep == BlockReadRepOk(BLOCK_DATA) - - -@pytest.mark.trio -@customize_fixtures(blockstore_mode="RAID1") -async def test_raid1_block_create_partial_exists(alice_ws, alice, backend, realm): - await backend.blockstore.blockstores[1].create(alice.organization_id, BLOCK_ID, BLOCK_DATA) - # Blockstore overwrite existing block without questions - await block_create(alice_ws, BLOCK_ID, realm, BLOCK_DATA) - - -@pytest.mark.trio -@customize_fixtures(blockstore_mode="RAID1") -async def test_raid1_block_read_partial_failure(alice_ws, backend, block): - async def mock_read(organization_id, id): - await trio.sleep(0) - raise BlockStoreError() - - backend.blockstore.blockstores[1].read = mock_read - - rep = await block_read(alice_ws, block) - assert rep == BlockReadRepOk(BLOCK_DATA) - - -@pytest.mark.trio -@customize_fixtures(blockstore_mode="RAID0") -async def test_raid0_block_create_and_read(alice_ws, realm): - await test_block_create_and_read(alice_ws, realm) - - -@pytest.mark.trio -@customize_fixtures(blockstore_mode="RAID5") -async def test_raid5_block_create_and_read(alice_ws, realm): - await test_block_create_and_read(alice_ws, realm) - - -@pytest.mark.trio -@customize_fixtures(blockstore_mode="RAID5") -@pytest.mark.parametrize("failing_blockstore", (0, 1, 2)) -async def test_raid5_block_create_single_failure( - caplog, alice_ws, backend, realm, failing_blockstore -): - async def mock_create(organization_id, id, block): - await trio.sleep(0) - raise BlockStoreError() - - backend.blockstore.blockstores[failing_blockstore].create = mock_create - - rep = await block_create(alice_ws, BLOCK_ID, realm, BLOCK_DATA, check_rep=False) - assert isinstance(rep, BlockCreateRepTimeout) - - log = caplog.assert_occurred_once("[warning ] Block create error: A node have failed") - assert f"organization_id=CoolOrg" in log - assert f"block_id={BLOCK_ID.hex}" in log - - -@pytest.mark.trio -@customize_fixtures(blockstore_mode="RAID5_PARTIAL_CREATE_OK") -@pytest.mark.parametrize("failing_blockstore", (0, 1, 2)) -async def test_raid5_partial_create_ok_block_create_single_failure( - caplog, alice_ws, backend, realm, failing_blockstore -): - async def mock_create(organization_id, id, block): - await trio.sleep(0) - raise BlockStoreError() - - backend.blockstore.blockstores[failing_blockstore].create = mock_create - - await block_create(alice_ws, BLOCK_ID, realm, BLOCK_DATA) - - rep = await block_read(alice_ws, BLOCK_ID) - assert rep == BlockReadRepOk(BLOCK_DATA) - - -@pytest.mark.trio -@customize_fixtures(blockstore_mode="RAID5_PARTIAL_CREATE_OK") -@pytest.mark.parametrize("failing_blockstores", [(0, 1), (0, 2)]) -async def test_raid5_partial_create_ok_block_create_too_many_failures( - caplog, alice_ws, backend, realm, failing_blockstores -): - async def mock_create(organization_id, id, block): - await trio.sleep(0) - raise BlockStoreError() - - fb1, fb2 = failing_blockstores - - backend.blockstore.blockstores[fb1].create = mock_create - backend.blockstore.blockstores[fb2].create = mock_create - - rep = await block_create(alice_ws, BLOCK_ID, realm, BLOCK_DATA, check_rep=False) - assert isinstance(rep, BlockCreateRepTimeout) - - log = caplog.assert_occurred_once( - "[warning ] Block create error: More than 1 nodes have failed" - ) - assert f"organization_id=CoolOrg" in log - assert f"block_id={BLOCK_ID.hex}" in log - - -@pytest.mark.trio -@customize_fixtures(blockstore_mode="RAID5") -async def test_raid5_block_create_partial_exists(alice_ws, alice, backend, realm): - await backend.blockstore.blockstores[1].create(alice.organization_id, BLOCK_ID, BLOCK_DATA) - - await block_create(alice_ws, BLOCK_ID, realm, BLOCK_DATA) - - -@pytest.mark.trio -@customize_fixtures(blockstore_mode="RAID5") -@pytest.mark.parametrize("failing_blockstore", (0, 1)) # Ignore checksum blockstore -async def test_raid5_block_read_single_failure(alice_ws, backend, block, failing_blockstore): - async def mock_read(organization_id, id): - await trio.sleep(0) - raise BlockStoreError() - - backend.blockstore.blockstores[failing_blockstore].read = mock_read - - rep = await block_read(alice_ws, block) - assert rep == BlockReadRepOk(BLOCK_DATA) - - -@pytest.mark.trio -@customize_fixtures(blockstore_mode="RAID5") -@pytest.mark.parametrize("bad_chunk", (b"", b"too big")) -async def test_raid5_block_read_single_invalid_chunk_size( - alice_ws, alice, backend, block, bad_chunk -): - async def mock_read(organization_id, id): - return bad_chunk - - backend.blockstore.blockstores[1].read = mock_read - - rep = await block_read(alice_ws, block) - # A bad chunk result in a bad block, which should be detected by the client - assert isinstance(rep, BlockReadRepOk) - - -@pytest.mark.trio -@customize_fixtures(blockstore_mode="RAID5") -@pytest.mark.parametrize("failing_blockstores", [(0, 1), (0, 2)]) -async def test_raid5_block_read_multiple_failure( - caplog, alice_ws, backend, block, failing_blockstores -): - async def mock_read(organization_id, id): - await trio.sleep(0) - raise BlockStoreError() - - fb1, fb2 = failing_blockstores - backend.blockstore.blockstores[fb1].read = mock_read - backend.blockstore.blockstores[fb2].read = mock_read - - rep = await block_read(alice_ws, block) - assert isinstance(rep, BlockReadRepTimeout) - - log = caplog.assert_occurred_once("[warning ] Block read error: More than 1 nodes have failed") - assert f"organization_id=CoolOrg" in log - assert f"block_id={block.hex}" in log - - -@pytest.mark.parametrize( - "bad_msg", - [ - {}, - {"id": BLOCK_ID.hex, "block": BLOCK_DATA, "bad_field": "foo"}, - {"id": "not an uuid", "block": BLOCK_DATA}, - {"id": 42, "block": BLOCK_DATA}, - {"id": None, "block": BLOCK_DATA}, - {"id": BLOCK_ID.hex, "block": 42}, - {"id": BLOCK_ID.hex, "block": None}, - {"block": BLOCK_DATA}, - ], -) -@pytest.mark.trio -async def test_block_create_bad_msg(alice_ws, bad_msg): - await alice_ws.send(msgpack.packb({"cmd": "block_create", **bad_msg})) - raw_rep = await alice_ws.receive() - rep = authenticated_cmds.latest.block_create.Rep.load(raw_rep) - assert isinstance(rep, authenticated_cmds.latest.block_create.RepUnknownStatus) - assert rep.status == "invalid_msg_format" - - -@pytest.mark.trio -async def test_block_read_not_found(alice_ws): - rep = await block_read(alice_ws, BLOCK_ID) - assert isinstance(rep, BlockReadRepNotFound) - - -@pytest.mark.parametrize( - "bad_msg", - [ - {"id": BLOCK_ID.hex, "bad_field": "foo"}, - {"id": "not_an_uuid"}, - {"id": 42}, - {"id": None}, - {}, - ], -) -@pytest.mark.trio -async def test_block_read_bad_msg(alice_ws, bad_msg): - await alice_ws.send(msgpack.packb({"cmd": "block_read", **bad_msg})) - raw_rep = await alice_ws.receive() - # Valid ID doesn't exists in database but this is ok given here we test - # another layer so it's not important as long as we get our - # `invalid_msg_format` status - rep = authenticated_cmds.latest.block_read.Rep.load(raw_rep) - assert isinstance(rep, authenticated_cmds.latest.block_read.RepUnknownStatus) - assert rep.status == "invalid_msg_format" - - -@pytest.mark.trio -async def test_block_conflicting_id(alice_ws, realm): - block_v1 = b"v1" - await block_create(alice_ws, BLOCK_ID, realm, block_v1) - - block_v2 = b"v2" - rep = await block_create(alice_ws, BLOCK_ID, realm, block_v2, check_rep=False) - assert isinstance(rep, BlockCreateRepAlreadyExists) - - rep = await block_read(alice_ws, BLOCK_ID) - assert rep == BlockReadRepOk(block_v1) - - -@pytest.mark.trio -async def test_block_check_other_organization( - backend_asgi_app, ws_from_other_organization_factory, realm, block -): - async with ws_from_other_organization_factory(backend_asgi_app) as sock: - rep = await block_read(sock, block) - assert isinstance(rep, BlockReadRepNotFound) - - await backend_asgi_app.backend.realm.create( - sock.device.organization_id, - RealmGrantedRole( - certificate=b"", - realm_id=realm, - user_id=sock.device.user_id, - role=RealmRole.OWNER, - granted_by=sock.device.device_id, - granted_on=DateTime.now(), - ), - ) - await block_create(sock, block, realm, b"other org data") - - rep = await block_read(sock, block) - assert rep == BlockReadRepOk(b"other org data") - - -@pytest.mark.trio -async def test_access_during_maintenance(backend, alice, alice_ws, realm, block): - await backend.realm.start_reencryption_maintenance( - alice.organization_id, - alice.device_id, - realm, - 2, - {alice.user_id: b"whatever"}, - DateTime.now(), - ) - rep = await block_create(alice_ws, BLOCK_ID, realm, BLOCK_DATA, check_rep=False) - assert isinstance(rep, BlockCreateRepInMaintenance) - - # Reading while in reencryption is OK - rep = await block_read(alice_ws, block) - assert rep == BlockReadRepOk(BLOCK_DATA) - - -@given(block=st.binary(max_size=2**8), nb_blockstores=st.integers(min_value=3, max_value=16)) -def test_split_block(block, nb_blockstores): - nb_chunks = nb_blockstores - 1 - chunks = split_block_in_chunks(block, nb_chunks) - assert len(chunks) == nb_chunks - - chunk_size = len(chunks[0]) - for chunk in chunks[1:]: - assert len(chunk) == chunk_size - - rebuilt = rebuild_block_from_chunks(chunks, None) - assert rebuilt == block - - checksum_chunk = generate_checksum_chunk(chunks) - for missing in range(len(chunks)): - partial_chunks = chunks.copy() - partial_chunks[missing] = None - rebuilt = rebuild_block_from_chunks(partial_chunks, checksum_chunk) - assert rebuilt == block diff --git a/server/tests/backend/realm/test_create.py b/server/tests/backend/realm/test_create.py deleted file mode 100644 index 6afe8414ea6..00000000000 --- a/server/tests/backend/realm/test_create.py +++ /dev/null @@ -1,154 +0,0 @@ -# Parsec Cloud (https://parsec.cloud) Copyright (c) BUSL-1.1 2016-present Scille SAS -from __future__ import annotations - -import pytest - -from parsec._parsec import ( - BackendEventRealmRolesUpdated, - DateTime, -) -from parsec.api.data import RealmRoleCertificate -from parsec.api.protocol import ( - RealmCreateRepAlreadyExists, - RealmCreateRepBadTimestamp, - RealmCreateRepInvalidCertification, - RealmCreateRepInvalidData, - RealmCreateRepOk, - RealmRole, - UserProfile, - VlobID, -) -from parsec.utils import BALLPARK_CLIENT_EARLY_OFFSET, BALLPARK_CLIENT_LATE_OFFSET -from tests.backend.common import realm_create -from tests.common import customize_fixtures, freeze_time - - -async def _test_create_ok(backend, device, ws): - realm_id = VlobID.from_hex("C0000000000000000000000000000000") - certif = RealmRoleCertificate.build_realm_root_certif( - author=device.device_id, timestamp=DateTime.now(), realm_id=realm_id - ).dump_and_sign(device.signing_key) - with backend.event_bus.listen() as spy: - rep = await realm_create(ws, certif) - assert isinstance(rep, RealmCreateRepOk) - await spy.wait_with_timeout(BackendEventRealmRolesUpdated) - - -@pytest.mark.trio -async def test_create_ok(backend, alice, alice_ws): - await _test_create_ok(backend, alice, alice_ws) - - -@pytest.mark.trio -@customize_fixtures(alice_profile=UserProfile.OUTSIDER) -async def test_create_allowed_for_outsider(backend, alice, alice_ws): - await _test_create_ok(backend, alice, alice_ws) - - -@pytest.mark.trio -async def test_create_invalid_certif(bob, alice_ws): - realm_id = VlobID.from_hex("C0000000000000000000000000000000") - certif = RealmRoleCertificate.build_realm_root_certif( - author=bob.device_id, timestamp=DateTime.now(), realm_id=realm_id - ).dump_and_sign(bob.signing_key) - rep = await realm_create(alice_ws, certif) - # The reason is no longer generated - assert isinstance(rep, RealmCreateRepInvalidCertification) - - -@pytest.mark.trio -async def test_create_certif_not_self_signed(alice, bob, alice_ws): - realm_id = VlobID.from_hex("C0000000000000000000000000000000") - certif = RealmRoleCertificate( - author=alice.device_id, - timestamp=DateTime.now(), - realm_id=realm_id, - user_id=bob.user_id, - role=RealmRole.OWNER, - ).dump_and_sign(alice.signing_key) - rep = await realm_create(alice_ws, certif) - # The reason is no longer generated - assert isinstance(rep, RealmCreateRepInvalidData) - - -@pytest.mark.trio -async def test_create_certif_role_not_owner(alice, alice_ws): - realm_id = VlobID.from_hex("C0000000000000000000000000000000") - certif = RealmRoleCertificate( - author=alice.device_id, - timestamp=DateTime.now(), - realm_id=realm_id, - user_id=alice.user_id, - role=RealmRole.MANAGER, - ).dump_and_sign(alice.signing_key) - rep = await realm_create(alice_ws, certif) - # The reason is no longer generated - assert isinstance(rep, RealmCreateRepInvalidData) - - -@pytest.mark.trio -async def test_create_certif_too_old(alice, alice_ws): - now = DateTime.now() - - # Generate a certificate - - realm_id = VlobID.from_hex("C0000000000000000000000000000000") - certif = RealmRoleCertificate.build_realm_root_certif( - author=alice.device_id, timestamp=now, realm_id=realm_id - ).dump_and_sign(alice.signing_key) - - # Create a realm a tiny bit too late - - later = now.add(seconds=BALLPARK_CLIENT_LATE_OFFSET) - with freeze_time(later): - rep = await realm_create(alice_ws, certif) - assert rep == RealmCreateRepBadTimestamp( - reason=None, - backend_timestamp=later, - ballpark_client_early_offset=BALLPARK_CLIENT_EARLY_OFFSET, - ballpark_client_late_offset=BALLPARK_CLIENT_LATE_OFFSET, - client_timestamp=now, - ) - - # Create a realm late but right before the deadline - - later = now.add(seconds=BALLPARK_CLIENT_LATE_OFFSET, microseconds=-1) - with freeze_time(later): - rep = await realm_create(alice_ws, certif) - assert isinstance(rep, RealmCreateRepOk) - - # Generate a new certificate - - realm_id = VlobID.from_hex("C0000000000000000000000000000001") - certif = RealmRoleCertificate.build_realm_root_certif( - author=alice.device_id, timestamp=now, realm_id=realm_id - ).dump_and_sign(alice.signing_key) - - # Create a realm a tiny bit too soon - - sooner = now.subtract(seconds=BALLPARK_CLIENT_EARLY_OFFSET) - with freeze_time(sooner): - rep = await realm_create(alice_ws, certif) - assert rep == RealmCreateRepBadTimestamp( - reason=None, - backend_timestamp=sooner, - ballpark_client_early_offset=BALLPARK_CLIENT_EARLY_OFFSET, - ballpark_client_late_offset=BALLPARK_CLIENT_LATE_OFFSET, - client_timestamp=now, - ) - - # Create a realm soon but after the limit - - sooner = now.subtract(seconds=BALLPARK_CLIENT_EARLY_OFFSET, microseconds=-1) - with freeze_time(sooner): - rep = await realm_create(alice_ws, certif) - assert isinstance(rep, RealmCreateRepOk) - - -@pytest.mark.trio -async def test_create_realm_already_exists(alice, alice_ws, realm): - certif = RealmRoleCertificate.build_realm_root_certif( - author=alice.device_id, timestamp=DateTime.now(), realm_id=realm - ).dump_and_sign(alice.signing_key) - rep = await realm_create(alice_ws, certif) - assert isinstance(rep, RealmCreateRepAlreadyExists) diff --git a/server/tests/backend/realm/test_realm_stats.py b/server/tests/backend/realm/test_realm_stats.py deleted file mode 100644 index 8a26a1a7713..00000000000 --- a/server/tests/backend/realm/test_realm_stats.py +++ /dev/null @@ -1,52 +0,0 @@ -# Parsec Cloud (https://parsec.cloud) Copyright (c) BUSL-1.1 2016-present Scille SAS -from __future__ import annotations - -import pytest - -from parsec.api.protocol import ( - BlockID, - RealmStatsRepNotAllowed, - RealmStatsRepNotFound, - RealmStatsRepOk, - VlobID, -) -from tests.backend.common import block_create, realm_stats, vlob_create - -REALM_ID_FAKE = VlobID.from_hex("00000000-0000-0000-0000-000000000001") - - -@pytest.mark.trio -async def test_realm_stats_ok(alice_ws, realm): - # Create new data - await block_create(alice_ws, realm_id=realm, block_id=BlockID.new(), block=b"1234") - rep = await realm_stats(alice_ws, realm_id=realm) - assert rep == RealmStatsRepOk(blocks_size=4, vlobs_size=0) - - # Create new metadata - await vlob_create(alice_ws, realm_id=realm, vlob_id=VlobID.new(), blob=b"1234") - rep = await realm_stats(alice_ws, realm_id=realm) - assert rep == RealmStatsRepOk(blocks_size=4, vlobs_size=4) - - -@pytest.mark.trio -async def test_realm_stats_ko( - backend_asgi_app, alice_ws, bob_ws, ws_from_other_organization_factory, realm -): - # test with no access to the realm - rep = await realm_stats(bob_ws, realm_id=realm) - assert isinstance(rep, RealmStatsRepNotAllowed) - - # test with non existant realm - rep = await realm_stats(alice_ws, realm_id=REALM_ID_FAKE) - # The reason is no longer generated - assert isinstance(rep, RealmStatsRepNotFound) - - # test with no access to the realm - rep = await realm_stats(bob_ws, realm_id=realm) - assert isinstance(rep, RealmStatsRepNotAllowed) - - # test with device_id but wrong organization - async with ws_from_other_organization_factory(backend_asgi_app) as sock: - rep = await realm_stats(sock, realm_id=realm) - # The reason is no longer generated - assert isinstance(rep, RealmStatsRepNotFound) diff --git a/server/tests/backend/realm/test_reencryption_maintenance.py b/server/tests/backend/realm/test_reencryption_maintenance.py deleted file mode 100644 index 0b222acc6df..00000000000 --- a/server/tests/backend/realm/test_reencryption_maintenance.py +++ /dev/null @@ -1,733 +0,0 @@ -# Parsec Cloud (https://parsec.cloud) Copyright (c) BUSL-1.1 2016-present Scille SAS -from __future__ import annotations - -import pytest - -from parsec._parsec import ( - BackendEventMessageReceived, - BackendEventRealmMaintenanceFinished, - BackendEventRealmMaintenanceStarted, - DateTime, - ReencryptionBatchEntry, - authenticated_cmds, -) -from parsec.api.protocol import ( - # TODO: This test is broken but it is fine given reencryption is not part of APIv4 - # ApiV2V3_APIEventMessageReceived, - # ApiV2V3_APIEventRealmMaintenanceFinished, - # ApiV2V3_APIEventRealmMaintenanceStarted, - # ApiV2V3_EventsListenRepNoEvents, - # ApiV2V3_EventsListenRepOk, - # ApiV2V3_Message, - # ApiV2V3_MessageGetRepOk, - BlockCreateRepInMaintenance, - BlockID, - BlockReadRepOk, - MaintenanceType, - RealmFinishReencryptionMaintenanceRepBadEncryptionRevision, - RealmFinishReencryptionMaintenanceRepMaintenanceError, - RealmFinishReencryptionMaintenanceRepNotAllowed, - RealmFinishReencryptionMaintenanceRepNotInMaintenance, - RealmFinishReencryptionMaintenanceRepOk, - RealmRole, - RealmStartReencryptionMaintenanceRepBadEncryptionRevision, - RealmStartReencryptionMaintenanceRepBadTimestamp, - RealmStartReencryptionMaintenanceRepInMaintenance, - RealmStartReencryptionMaintenanceRepNotAllowed, - RealmStartReencryptionMaintenanceRepNotFound, - RealmStartReencryptionMaintenanceRepOk, - RealmStartReencryptionMaintenanceRepParticipantMismatch, - RealmStatusRepOk, - UserID, - VlobCreateRepInMaintenance, - VlobID, - VlobListVersionsRepOk, - VlobMaintenanceGetReencryptionBatchRepBadEncryptionRevision, - VlobMaintenanceGetReencryptionBatchRepNotInMaintenance, - VlobMaintenanceGetReencryptionBatchRepOk, - VlobMaintenanceSaveReencryptionBatchRepNotAllowed, - VlobMaintenanceSaveReencryptionBatchRepNotInMaintenance, - VlobMaintenanceSaveReencryptionBatchRepOk, - VlobPollChangesRepOk, - VlobUpdateRepInMaintenance, -) -from parsec.backend.realm import RealmGrantedRole -from parsec.backend.vlob import VlobNotFoundError, VlobVersionError -from parsec.utils import BALLPARK_CLIENT_EARLY_OFFSET, BALLPARK_CLIENT_LATE_OFFSET -from tests.backend.common import ( - # TODO: This test is broken but it is fine given reencryption is not part of APIv4 - # apiv2v3_events_listen_nowait, - # apiv2v3_events_subscribe, - # apiv2v3_message_get, - # apiv2v3_vlob_read, - block_create, - block_read, - realm_finish_reencryption_maintenance, - realm_start_reencryption_maintenance, - realm_status, - vlob_create, - vlob_list_versions, - vlob_maintenance_get_reencryption_batch, - vlob_maintenance_save_reencryption_batch, - vlob_poll_changes, - vlob_update, -) -from tests.common import freeze_time, real_clock_timeout - - -@pytest.mark.trio -async def test_start_bad_encryption_revision(alice_ws, realm, alice): - rep = await realm_start_reencryption_maintenance( - alice_ws, realm, 42, DateTime.now(), {alice.user_id: b"whatever"}, check_rep=False - ) - assert isinstance(rep, RealmStartReencryptionMaintenanceRepBadEncryptionRevision) - - -@pytest.mark.trio -async def test_start_bad_timestamp(alice_ws, realm, alice): - with freeze_time() as now: - rep = await realm_start_reencryption_maintenance( - alice_ws, realm, 2, DateTime(2000, 1, 1), {alice.user_id: b"whatever"}, check_rep=False - ) - assert rep == RealmStartReencryptionMaintenanceRepBadTimestamp( - reason=None, - backend_timestamp=now, - ballpark_client_early_offset=BALLPARK_CLIENT_EARLY_OFFSET, - ballpark_client_late_offset=BALLPARK_CLIENT_LATE_OFFSET, - client_timestamp=DateTime(2000, 1, 1), - ) - - -@pytest.mark.trio -async def test_start_bad_per_participant_message( - backend, alice_ws, alice, bob, adam, realm, next_timestamp -): - # Bob used to be part of the realm - await backend.realm.update_roles( - alice.organization_id, - RealmGrantedRole( - certificate=b"", - realm_id=realm, - user_id=bob.user_id, - role=RealmRole.READER, - granted_by=alice.device_id, - granted_on=next_timestamp(), - ), - ) - await backend.realm.update_roles( - alice.organization_id, - RealmGrantedRole( - certificate=b"", - realm_id=realm, - user_id=bob.user_id, - role=None, - granted_by=alice.device_id, - granted_on=next_timestamp(), - ), - ) - # Adam is still part of the realm, but is revoked - await backend.realm.update_roles( - alice.organization_id, - RealmGrantedRole( - certificate=b"", - realm_id=realm, - user_id=adam.user_id, - role=RealmRole.READER, - granted_by=alice.device_id, - granted_on=next_timestamp(), - ), - ) - await backend.user.revoke_user( - alice.organization_id, - adam.user_id, - revoked_user_certificate=b"", - revoked_user_certifier=alice.device_id, - ) - - for msg in [ - {}, - {alice.user_id: b"ok", bob.user_id: b"bad"}, - {alice.user_id: b"ok", UserID("zack"): b"bad"}, - {alice.user_id: b"ok", adam.user_id: b"bad"}, - ]: - rep = await realm_start_reencryption_maintenance( - alice_ws, realm, 2, next_timestamp(), msg, check_rep=False - ) - # The reason is no longer generated - assert isinstance(rep, RealmStartReencryptionMaintenanceRepParticipantMismatch) - - # Finally make sure the reencryption is possible - await realm_start_reencryption_maintenance( - alice_ws, realm, 2, next_timestamp(), {alice.user_id: b"ok"} - ) - - -@pytest.mark.trio -async def test_start_send_message_to_participants(backend, alice, bob, alice_ws, bob_ws, realm): - await backend.realm.update_roles( - alice.organization_id, - RealmGrantedRole( - certificate=b"", - realm_id=realm, - user_id=bob.user_id, - role=RealmRole.READER, - granted_by=alice.device_id, - granted_on=DateTime.now(), - ), - ) - - with freeze_time("2000-01-02"): - await realm_start_reencryption_maintenance( - alice_ws, - realm, - 2, - DateTime.now(), - {alice.user_id: b"alice msg", bob.user_id: b"bob msg"}, - ) - - # Each participant should have received a message - for user, sock in ((alice, alice_ws), (bob, bob_ws)): - rep = await apiv2v3_message_get(sock) - assert rep == ApiV2V3_MessageGetRepOk( - messages=[ - ApiV2V3_Message( - count=1, - body=f"{user.user_id.str} msg".encode(), - timestamp=DateTime(2000, 1, 2), - sender=alice.device_id, - ) - ], - ) - - -@pytest.mark.trio -async def test_start_reencryption_update_status(alice_ws, alice, realm): - with freeze_time("2000-01-02"): - await realm_start_reencryption_maintenance( - alice_ws, realm, 2, DateTime.now(), {alice.user_id: b"foo"} - ) - rep = await realm_status(alice_ws, realm) - assert rep == RealmStatusRepOk( - encryption_revision=2, - in_maintenance=True, - maintenance_started_by=alice.device_id, - maintenance_started_on=DateTime(2000, 1, 2), - maintenance_type=MaintenanceType.REENCRYPTION, - ) - - -@pytest.mark.trio -async def test_start_already_in_maintenance(alice_ws, realm, alice): - await realm_start_reencryption_maintenance( - alice_ws, realm, 2, DateTime.now(), {alice.user_id: b"whatever"} - ) - # Providing good or bad encryption revision shouldn't change anything - for encryption_revision in (2, 3): - rep = await realm_start_reencryption_maintenance( - alice_ws, - realm, - encryption_revision, - DateTime.now(), - {alice.user_id: b"whatever"}, - check_rep=False, - ) - assert isinstance(rep, RealmStartReencryptionMaintenanceRepInMaintenance) - - -@pytest.mark.trio -async def test_start_check_access_rights(backend, bob_ws, alice, bob, realm, next_timestamp): - # User not part of the realm - rep = await realm_start_reencryption_maintenance( - bob_ws, realm, 2, DateTime.now(), {alice.user_id: b"whatever"}, check_rep=False - ) - assert isinstance(rep, RealmStartReencryptionMaintenanceRepNotAllowed) - - # User part of the realm with various role - for not_allowed_role in (RealmRole.READER, RealmRole.CONTRIBUTOR, RealmRole.MANAGER, None): - await backend.realm.update_roles( - alice.organization_id, - RealmGrantedRole( - certificate=b"", - realm_id=realm, - user_id=bob.user_id, - role=not_allowed_role, - granted_by=alice.device_id, - granted_on=next_timestamp(), - ), - ) - - rep = await realm_start_reencryption_maintenance( - bob_ws, - realm, - 2, - next_timestamp(), - {alice.user_id: b"foo", bob.user_id: b"bar"}, - check_rep=False, - ) - assert isinstance(rep, RealmStartReencryptionMaintenanceRepNotAllowed) - - # Finally, just make sure owner can do it - await backend.realm.update_roles( - alice.organization_id, - RealmGrantedRole( - certificate=b"", - realm_id=realm, - user_id=bob.user_id, - role=RealmRole.OWNER, - granted_by=alice.device_id, - granted_on=next_timestamp(), - ), - ) - - rep = await realm_start_reencryption_maintenance( - bob_ws, - realm, - 2, - DateTime.now(), - {alice.user_id: b"foo", bob.user_id: b"bar"}, - check_rep=False, - ) - assert isinstance(rep, RealmStartReencryptionMaintenanceRepOk) - - -@pytest.mark.trio -async def test_start_other_organization( - backend_asgi_app, ws_from_other_organization_factory, realm, alice -): - async with ws_from_other_organization_factory(backend_asgi_app) as sock: - rep = await realm_start_reencryption_maintenance( - sock, realm, 2, DateTime.now(), {alice.user_id: b"foo"}, check_rep=False - ) - # The reason is no longer generated - assert isinstance(rep, RealmStartReencryptionMaintenanceRepNotFound) - - -@pytest.mark.trio -async def test_finish_not_in_maintenance(alice_ws, realm): - for encryption_revision in (2, 3): - rep = await realm_finish_reencryption_maintenance( - alice_ws, realm, encryption_revision, check_rep=False - ) - # The reason is no longer generated - assert isinstance(rep, RealmFinishReencryptionMaintenanceRepNotInMaintenance) - - -@pytest.mark.trio -async def test_finish_while_reencryption_not_done(alice_ws, realm, alice, vlobs): - await realm_start_reencryption_maintenance( - alice_ws, realm, 2, DateTime.now(), {alice.user_id: b"whatever"} - ) - rep = await realm_finish_reencryption_maintenance(alice_ws, realm, 2, check_rep=False) - # The reason is no longer generated - assert isinstance(rep, RealmFinishReencryptionMaintenanceRepMaintenanceError) - - # Also try with part of the job done - rep = await vlob_maintenance_get_reencryption_batch(alice_ws, realm, 2, size=2) - assert isinstance(rep, VlobMaintenanceGetReencryptionBatchRepOk) - assert len(rep.batch) == 2 - - batch = [] - for entry in rep.batch: - batch.append( - ReencryptionBatchEntry( - entry.vlob_id, - entry.version, - f"{entry.vlob_id.hex}::{entry.version} reencrypted".encode(), - ) - ) - await vlob_maintenance_save_reencryption_batch(alice_ws, realm, 2, batch) - - rep = await realm_finish_reencryption_maintenance(alice_ws, realm, 2, check_rep=False) - # The reason is no longer generated - assert isinstance(rep, RealmFinishReencryptionMaintenanceRepMaintenanceError) - - -@pytest.mark.trio -async def test_reencrypt_and_finish_check_access_rights( - backend, alice_ws, bob_ws, alice, bob, realm, vlobs, next_timestamp -): - encryption_revision = 1 - - # Changing realm roles is not possible during maintenance, - # hence those helpers to easily jump in/out of maintenance - - async def _ready_to_finish(bob_in_workspace): - nonlocal encryption_revision - encryption_revision += 1 - reencryption_msgs = {alice.user_id: b"foo"} - if bob_in_workspace: - reencryption_msgs[bob.user_id] = b"bar" - await realm_start_reencryption_maintenance( - alice_ws, realm, encryption_revision, DateTime.now(), reencryption_msgs - ) - updated_batch = [ - ReencryptionBatchEntry( - vlob_id=vlob_id, - version=version, - blob=f"{vlob_id.hex}::{version}::{encryption_revision}".encode(), - ) - for vlob_id, version in {(vlobs[0], 1), (vlobs[0], 2), (vlobs[1], 1)} - ] - await vlob_maintenance_save_reencryption_batch( - alice_ws, realm, encryption_revision, updated_batch - ) - - async def _finish(): - await realm_finish_reencryption_maintenance(alice_ws, realm, encryption_revision) - - async def _assert_bob_maintenance_access(allowed): - rep = await vlob_maintenance_save_reencryption_batch( - bob_ws, realm, encryption_revision, [], check_rep=False - ) - if allowed: - assert isinstance(rep, VlobMaintenanceSaveReencryptionBatchRepOk) - else: - assert isinstance(rep, VlobMaintenanceSaveReencryptionBatchRepNotAllowed) - - rep = await realm_finish_reencryption_maintenance( - bob_ws, realm, encryption_revision, check_rep=False - ) - if allowed: - assert isinstance(rep, RealmFinishReencryptionMaintenanceRepOk) - else: - assert isinstance(rep, RealmFinishReencryptionMaintenanceRepNotAllowed) - - # User not part of the realm - await _ready_to_finish(bob_in_workspace=False) - await _assert_bob_maintenance_access(allowed=False) - await _finish() - - # User part of the realm with various role - for not_allowed_role in (RealmRole.READER, RealmRole.CONTRIBUTOR, RealmRole.MANAGER, None): - await backend.realm.update_roles( - alice.organization_id, - RealmGrantedRole( - certificate=b"", - realm_id=realm, - user_id=bob.user_id, - role=not_allowed_role, - granted_by=alice.device_id, - granted_on=next_timestamp(), - ), - ) - await _ready_to_finish(bob_in_workspace=not_allowed_role is not None) - await _assert_bob_maintenance_access(allowed=False) - await _finish() - - # Finally, just make sure owner can do it - await backend.realm.update_roles( - alice.organization_id, - RealmGrantedRole( - certificate=b"", - realm_id=realm, - user_id=bob.user_id, - role=RealmRole.OWNER, - granted_by=alice.device_id, - granted_on=next_timestamp(), - ), - ) - await _ready_to_finish(bob_in_workspace=True) - await _assert_bob_maintenance_access(allowed=True) - - -@pytest.mark.trio -async def test_reencryption_batch_not_during_maintenance(alice_ws, realm): - rep = await vlob_maintenance_get_reencryption_batch(alice_ws, realm, 1) - # The reason is no longer generated - assert isinstance(rep, VlobMaintenanceGetReencryptionBatchRepNotInMaintenance) - - rep = await vlob_maintenance_save_reencryption_batch(alice_ws, realm, 1, [], check_rep=False) - # The reason is no longer generated - assert isinstance(rep, VlobMaintenanceSaveReencryptionBatchRepNotInMaintenance) - - rep = await realm_finish_reencryption_maintenance(alice_ws, realm, 1, check_rep=False) - # The reason is no longer generated - assert isinstance(rep, RealmFinishReencryptionMaintenanceRepNotInMaintenance) - - -@pytest.mark.trio -async def test_reencryption_batch_bad_revision(alice_ws, realm, alice): - await realm_start_reencryption_maintenance( - alice_ws, realm, 2, DateTime.now(), {alice.user_id: b"foo"} - ) - - rep = await vlob_maintenance_get_reencryption_batch(alice_ws, realm, 1) - assert isinstance(rep, VlobMaintenanceGetReencryptionBatchRepBadEncryptionRevision) - - rep = await realm_finish_reencryption_maintenance(alice_ws, realm, 1, check_rep=False) - assert isinstance(rep, RealmFinishReencryptionMaintenanceRepBadEncryptionRevision) - - -@pytest.mark.trio -async def test_reencryption(alice, alice_ws, realm, vlob_atoms): - with freeze_time("2000-01-02"): - await realm_start_reencryption_maintenance( - alice_ws, realm, 2, DateTime.now(), {alice.user_id: b"foo"} - ) - - # Each participant should have received a message - rep = await apiv2v3_message_get(alice_ws) - assert rep == ApiV2V3_MessageGetRepOk( - messages=[ - ApiV2V3_Message( - count=1, - body=b"foo", - timestamp=DateTime(2000, 1, 2), - sender=alice.device_id, - ) - ], - ) - - async def _reencrypt_with_batch_of_2(expected_size, expected_done): - rep = await vlob_maintenance_get_reencryption_batch(alice_ws, realm, 2, size=2) - assert isinstance(rep, VlobMaintenanceGetReencryptionBatchRepOk) - assert len(rep.batch) == expected_size - - batch = [] - for entry in rep.batch: - batch.append( - ReencryptionBatchEntry( - entry.vlob_id, - entry.version, - f"{entry.vlob_id.hex}::{entry.version} reencrypted".encode(), - ) - ) - rep = await vlob_maintenance_save_reencryption_batch(alice_ws, realm, 2, batch) - assert rep == VlobMaintenanceSaveReencryptionBatchRepOk(total=3, done=expected_done) - - # Should have 2 batch to reencrypt - await _reencrypt_with_batch_of_2(expected_size=2, expected_done=2) - await _reencrypt_with_batch_of_2(expected_size=1, expected_done=3) - await _reencrypt_with_batch_of_2(expected_size=0, expected_done=3) - - # Finish the reencryption - await realm_finish_reencryption_maintenance(alice_ws, realm, 2) - - # Check the vlob have changed - for vlob_id, version in vlob_atoms: - rep = await apiv2v3_vlob_read(alice_ws, vlob_id, version, encryption_revision=2) - assert rep.blob == f"{vlob_id.hex}::{version} reencrypted".encode() - - -@pytest.mark.trio -async def test_reencryption_provide_unknown_vlob_atom_and_duplications( - backend, alice, alice_ws, realm, vlob_atoms -): - await realm_start_reencryption_maintenance( - alice_ws, realm, 2, DateTime.now(), {alice.user_id: b"foo"} - ) - rep = await vlob_maintenance_get_reencryption_batch(alice_ws, realm, 2) - assert isinstance(rep, VlobMaintenanceGetReencryptionBatchRepOk) - assert len(rep.batch) == 3 - - unknown_vlob_id = VlobID.new() - duplicated_vlob_id = rep.batch[0].vlob_id - duplicated_version = rep.batch[0].version - duplicated_expected_blob = rep.batch[0].blob - reencrypted_batch = [ - # Reencryption as identity - *rep.batch, - # Add an unknown vlob - ReencryptionBatchEntry(unknown_vlob_id, 1, b"ignored"), - # Valid vlob ID with invalid version - ReencryptionBatchEntry(duplicated_vlob_id, 99, b"ignored"), - # Duplicate a vlob atom, should be ignored given the reencryption has already be done for it - ReencryptionBatchEntry(duplicated_vlob_id, duplicated_version, b"ignored"), - ] - - # Another level of duplication ! - for i in range(2): - rep = await vlob_maintenance_save_reencryption_batch(alice_ws, realm, 2, reencrypted_batch) - assert rep == VlobMaintenanceSaveReencryptionBatchRepOk(total=3, done=3) - - # Finish the reencryption - await realm_finish_reencryption_maintenance(alice_ws, realm, 2) - - # Check the vlobs - with pytest.raises(VlobNotFoundError): - await backend.vlob.read( - organization_id=alice.organization_id, - author=alice.device_id, - encryption_revision=2, - vlob_id=unknown_vlob_id, - ) - with pytest.raises(VlobVersionError): - await backend.vlob.read( - organization_id=alice.organization_id, - author=alice.device_id, - encryption_revision=2, - vlob_id=duplicated_vlob_id, - version=99, - ) - _, content, _, _, _, _ = await backend.vlob.read( - organization_id=alice.organization_id, - author=alice.device_id, - encryption_revision=2, - vlob_id=duplicated_vlob_id, - version=duplicated_version, - ) - assert content == duplicated_expected_blob - - -@pytest.mark.trio -async def test_access_during_reencryption(backend, alice_ws, alice, realm_factory, next_timestamp): - # First initialize a nice realm with block and vlob - realm_id = await realm_factory(backend, author=alice) - vlob_id = VlobID.new() - block_id = BlockID.new() - await backend.vlob.create( - organization_id=alice.organization_id, - author=alice.device_id, - realm_id=realm_id, - encryption_revision=1, - vlob_id=vlob_id, - timestamp=next_timestamp(), - blob=b"v1", - ) - await backend.block.create( - organization_id=alice.organization_id, - author=alice.device_id, - realm_id=realm_id, - block_id=block_id, - created_on=next_timestamp(), - block=b"", - ) - - async def _assert_write_access_disallowed(encryption_revision): - rep = await vlob_create( - alice_ws, - realm_id=realm_id, - vlob_id=VlobID.new(), - blob=b"data", - encryption_revision=encryption_revision, - check_rep=False, - ) - assert isinstance(rep, VlobCreateRepInMaintenance) - rep = await vlob_update( - alice_ws, - vlob_id, - version=2, - blob=b"data", - encryption_revision=encryption_revision, - check_rep=False, - ) - assert isinstance(rep, VlobUpdateRepInMaintenance) - rep = await block_create( - alice_ws, block_id=block_id, realm_id=realm_id, block=b"data", check_rep=False - ) - assert isinstance(rep, BlockCreateRepInMaintenance) - - async def _assert_read_access_allowed(encryption_revision, expected_blob=b"v1"): - rep = await apiv2v3_vlob_read( - alice_ws, vlob_id=vlob_id, version=1, encryption_revision=encryption_revision - ) - assert isinstance(rep, authenticated_cmds.v3.vlob_read.RepOk) - assert rep.blob == expected_blob - - rep = await block_read(alice_ws, block_id=block_id) - assert rep == BlockReadRepOk(b"") - - # For good measure, also try those read-only commands even if they - # are encryption-revision agnostic - rep = await vlob_list_versions(alice_ws, vlob_id=vlob_id) - assert isinstance(rep, VlobListVersionsRepOk) - rep = await vlob_poll_changes(alice_ws, realm_id=realm_id, last_checkpoint=0) - assert isinstance(rep, VlobPollChangesRepOk) - - async def _assert_read_access_bad_encryption_revision(encryption_revision, expected_status): - rep = await apiv2v3_vlob_read( - alice_ws, vlob_id=vlob_id, version=1, encryption_revision=encryption_revision - ) - assert isinstance(rep, expected_status) - - # Sanity check just to make we can access the data with initial encryption revision - await _assert_read_access_allowed(1) - - # Now start reencryption - await backend.realm.start_reencryption_maintenance( - organization_id=alice.organization_id, - author=alice.device_id, - realm_id=realm_id, - encryption_revision=2, - per_participant_message={alice.user_id: b""}, - timestamp=DateTime.now(), - ) - - # Only read with old encryption revision is now allowed - await _assert_read_access_allowed(1) - await _assert_read_access_bad_encryption_revision( - 2, expected_status=authenticated_cmds.v3.vlob_read.RepInMaintenance - ) - await _assert_write_access_disallowed(1) - await _assert_write_access_disallowed(2) - - # Actually reencrypt the vlob data, this shouldn't affect us for the moment - # given reencryption is not formally finished - await backend.vlob.maintenance_save_reencryption_batch( - organization_id=alice.organization_id, - author=alice.device_id, - realm_id=realm_id, - encryption_revision=2, - batch=[(vlob_id, 1, b"v2")], - ) - - await _assert_read_access_allowed(1) - await _assert_read_access_bad_encryption_revision( - 2, expected_status=authenticated_cmds.v3.vlob_read.RepInMaintenance - ) - await _assert_write_access_disallowed(1) - await _assert_write_access_disallowed(2) - - # Finish the reencryption - await backend.realm.finish_reencryption_maintenance( - organization_id=alice.organization_id, - author=alice.device_id, - realm_id=realm_id, - encryption_revision=2, - ) - - # Now only the new encryption revision is allowed - await _assert_read_access_allowed(2, expected_blob=b"v2") - await _assert_read_access_bad_encryption_revision( - 1, expected_status=authenticated_cmds.v3.vlob_read.RepBadEncryptionRevision - ) - - -@pytest.mark.trio -async def test_reencryption_events(backend, alice_ws, alice2_ws, realm, alice, vlobs, vlob_atoms): - # Start listening events - await apiv2v3_events_subscribe(alice_ws) - - with backend.event_bus.listen() as spy: - # Start maintenance and check for events - await realm_start_reencryption_maintenance( - alice2_ws, realm, 2, DateTime.now(), {alice.user_id: b"foo"} - ) - - async with real_clock_timeout(): - # No guarantees those events occur before the commands' return - await spy.wait_multiple( - [BackendEventRealmMaintenanceStarted, BackendEventMessageReceived] - ) - - rep = await apiv2v3_events_listen_nowait(alice_ws) - assert rep == ApiV2V3_EventsListenRepOk(ApiV2V3_APIEventRealmMaintenanceStarted(realm, 2)) - rep = await apiv2v3_events_listen_nowait(alice_ws) - assert rep == ApiV2V3_EventsListenRepOk(ApiV2V3_APIEventMessageReceived(1)) - - # Do the reencryption - rep = await vlob_maintenance_get_reencryption_batch(alice_ws, realm, 2, size=100) - await vlob_maintenance_save_reencryption_batch(alice_ws, realm, 2, rep.batch) - - # Finish maintenance and check for events - await realm_finish_reencryption_maintenance(alice2_ws, realm, 2) - - # No guarantees those events occur before the commands' return - await spy.wait_with_timeout(BackendEventRealmMaintenanceFinished) - - rep = await apiv2v3_events_listen_nowait(alice_ws) - assert rep == ApiV2V3_EventsListenRepOk(ApiV2V3_APIEventRealmMaintenanceFinished(realm, 2)) - - # Sanity check - rep = await apiv2v3_events_listen_nowait(alice_ws) - assert rep == ApiV2V3_EventsListenRepNoEvents() diff --git a/server/tests/backend/realm/test_roles_updated_event.py b/server/tests/backend/realm/test_roles_updated_event.py deleted file mode 100644 index fc90aaaba87..00000000000 --- a/server/tests/backend/realm/test_roles_updated_event.py +++ /dev/null @@ -1,85 +0,0 @@ -# Parsec Cloud (https://parsec.cloud) Copyright (c) BUSL-1.1 2016-present Scille SAS -from __future__ import annotations - -import pytest - -from parsec._parsec import ( - BackendEventRealmRolesUpdated, - DateTime, - authenticated_cmds, -) -from parsec.api.data import RealmRoleCertificate -from parsec.api.protocol import ( - RealmRole, - VlobID, -) -from tests.backend.common import ( - events_listen, - realm_create, - realm_update_roles, -) - - -@pytest.mark.trio -async def test_realm_create(backend, alice, alice_ws, alice2_ws): - realm_id = VlobID.from_hex("C0000000000000000000000000000000") - certif = RealmRoleCertificate.build_realm_root_certif( - author=alice.device_id, timestamp=DateTime.now(), realm_id=realm_id - ).dump_and_sign(alice.signing_key) - - async with events_listen(alice2_ws) as alice2_events_listen: - with backend.event_bus.listen() as spy: - rep = await realm_create(alice_ws, certif) - assert isinstance(rep, authenticated_cmds.latest.realm_create.RepOk) - certificate_index = rep.certificate_index - await spy.wait_with_timeout(BackendEventRealmRolesUpdated) - - rep = await alice2_events_listen.do_recv() - assert rep == authenticated_cmds.latest.events_listen.RepOk( - authenticated_cmds.latest.events_listen.APIEventCertificatesUpdated(certificate_index) - ) - - -@pytest.mark.trio -async def test_roles_updated_for_participant( - backend, alice, bob, alice_ws, bob_ws, realm, next_timestamp -): - async def _update_role_and_check_events(role): - with backend.event_bus.listen() as spy: - certif = RealmRoleCertificate( - author=alice.device_id, - timestamp=next_timestamp(), - realm_id=realm, - user_id=bob.user_id, - role=role, - ).dump_and_sign(alice.signing_key) - rep = await realm_update_roles(alice_ws, certif, check_rep=False) - assert isinstance(rep, authenticated_cmds.latest.realm_update_roles.RepOk) - - await spy.wait_with_timeout( - BackendEventRealmRolesUpdated( - organization_id=alice.organization_id, - author=alice.device_id, - realm_id=realm, - user=bob.user_id, - role=role, - ) - ) - - # Check events propagated to the client - rep = await apiv2v3_events_listen_nowait(bob_ws) - assert rep == ApiV2V3_EventsListenRepOk(ApiV2V3_APIEventRealmRolesUpdated(realm, role)) - rep = await apiv2v3_events_listen_nowait(bob_ws) - assert isinstance(rep, ApiV2V3_EventsListenRepNoEvents) - - # 0) Init event listening on the socket - await apiv2v3_events_subscribe(bob_ws) - - # 1) New participant - await _update_role_and_check_events(RealmRole.MANAGER) - - # 2) Change participant role - await _update_role_and_check_events(RealmRole.READER) - - # 3) Stop sharing with participant - await _update_role_and_check_events(None) diff --git a/server/tests/backend/realm/test_shuffle_roles.py b/server/tests/backend/realm/test_shuffle_roles.py deleted file mode 100644 index 674e4936296..00000000000 --- a/server/tests/backend/realm/test_shuffle_roles.py +++ /dev/null @@ -1,173 +0,0 @@ -# Parsec Cloud (https://parsec.cloud) Copyright (c) BUSL-1.1 2016-present Scille SAS -from __future__ import annotations - -import pytest -import trio -from hypothesis import strategies as st -from hypothesis_trio.stateful import ( - Bundle, - TrioAsyncioRuleBasedStateMachine, - initialize, - invariant, - multiple, - rule, - run_state_machine_as_test, -) - -from parsec.api.data import RealmRoleCertificate -from parsec.api.protocol import ( - RealmRole, - RealmUpdateRolesRepAlreadyGranted, - RealmUpdateRolesRepInvalidData, - RealmUpdateRolesRepNotAllowed, - RealmUpdateRolesRepOk, - VlobID, -) -from parsec.backend.asgi import app_factory -from parsec.backend.realm import RealmGrantedRole -from tests.backend.common import realm_update_roles -from tests.common import call_with_control - - -@pytest.mark.slow -def test_shuffle_roles( - hypothesis_settings, - reset_testbed, - backend_factory, - backend_data_binder_factory, - backend_authenticated_ws_factory, - local_device_factory, - coolorg, - next_timestamp, -): - class ShuffleRoles(TrioAsyncioRuleBasedStateMachine): - realm_role_strategy = st.one_of(st.just(x) for x in RealmRole.VALUES) - User = Bundle("user") - - async def start_backend(self): - async def _backend_controlled_cb(started_cb): - async with backend_factory(populated=False) as backend: - await started_cb(backend=backend) - - return await self.get_root_nursery().start(call_with_control, _backend_controlled_cb) - - @property - def backend_asgi_app(self): - return self._backend_asgi_app - - @initialize(target=User) - async def init(self): - await reset_testbed() - self.backend_controller = await self.start_backend() - self._backend_asgi_app = app_factory(self.backend_controller.backend) - self.org = coolorg - device = local_device_factory(org=self.org) - - # Create organization and first user - self.backend_data_binder = backend_data_binder_factory(self.backend_asgi_app.backend) - await self.backend_data_binder.bind_organization(self.org, device) - - # Create realm - self.realm_id = VlobID.new() - now = next_timestamp() - certif = RealmRoleCertificate.build_realm_root_certif( - author=device.device_id, timestamp=now, realm_id=self.realm_id - ).dump_and_sign(device.signing_key) - await self.backend_asgi_app.backend.realm.create( - organization_id=device.organization_id, - self_granted_role=RealmGrantedRole( - realm_id=self.realm_id, - user_id=device.user_id, - certificate=certif, - role=RealmRole.OWNER, - granted_by=device.device_id, - granted_on=now, - ), - ) - self.current_roles = {device.user_id: RealmRole.OWNER} - self.certifs = [certif] - - self.wss = {} - return device - - async def get_ws(self, device): - try: - return self.wss[device.user_id] - except KeyError: - pass - - async def _start_ws(device, *, task_status=trio.TASK_STATUS_IGNORED): - async with backend_authenticated_ws_factory(self.backend_asgi_app, device) as ws: - task_status.started(ws) - await trio.sleep_forever() - - ws = await self.get_root_nursery().start(_start_ws, device) - self.wss[device.user_id] = ws - return ws - - @rule(target=User, author=User, role=realm_role_strategy) - async def give_role_to_new_user(self, author, role): - # Create new user/device - new_device = local_device_factory(org=self.org) - await self.backend_data_binder.bind_device(new_device) - self.current_roles[new_device.user_id] = None - # Assign role - author_ws = await self.get_ws(author) - if await self._give_role(author_ws, author, new_device, role): - return new_device - else: - return multiple() - - @rule(author=User, recipient=User, role=realm_role_strategy) - async def change_role_for_existing_user(self, author, recipient, role): - author_ws = await self.get_ws(author) - await self._give_role(author_ws, author, recipient, role) - - async def _give_role(self, author_ws, author, recipient, role): - author_ws = await self.get_ws(author) - - certif = RealmRoleCertificate( - author=author.device_id, - timestamp=next_timestamp(), - realm_id=self.realm_id, - user_id=recipient.user_id, - role=role, - ).dump_and_sign(author.signing_key) - rep = await realm_update_roles(author_ws, certif, check_rep=False) - if author.user_id == recipient.user_id: - # The reason is no longer generated - assert isinstance(rep, RealmUpdateRolesRepInvalidData) - - else: - owner_only = (RealmRole.OWNER,) - owner_or_manager = (RealmRole.OWNER, RealmRole.MANAGER) - existing_recipient_role = self.current_roles[recipient.user_id] - if existing_recipient_role in owner_or_manager or role in owner_or_manager: - allowed_roles = owner_only - else: - allowed_roles = owner_or_manager - - if self.current_roles[author.user_id] in allowed_roles: - # print(f"+ {author.user_id} -{role.value}-> {recipient.user_id}") - if existing_recipient_role != role: - assert isinstance(rep, RealmUpdateRolesRepOk) - self.current_roles[recipient.user_id] = role - self.certifs.append(certif) - else: - assert isinstance(rep, RealmUpdateRolesRepAlreadyGranted) - else: - # print(f"- {author.user_id} -{role.value}-> {recipient.user_id}") - assert isinstance(rep, RealmUpdateRolesRepNotAllowed) - - return isinstance(rep, RealmUpdateRolesRepOk) - - @invariant() - async def check_current_roles(self): - try: - backend = self.backend_asgi_app.backend - except AttributeError: - return - roles = await backend.realm.get_current_roles(self.org.organization_id, self.realm_id) - assert roles == {k: v for k, v in self.current_roles.items() if v is not None} - - run_state_machine_as_test(ShuffleRoles, settings=hypothesis_settings) diff --git a/server/tests/backend/realm/test_update_roles.py b/server/tests/backend/realm/test_update_roles.py deleted file mode 100644 index d7957382240..00000000000 --- a/server/tests/backend/realm/test_update_roles.py +++ /dev/null @@ -1,476 +0,0 @@ -# Parsec Cloud (https://parsec.cloud) Copyright (c) BUSL-1.1 2016-present Scille SAS -from __future__ import annotations - -import pytest - -from parsec._parsec import ( - DateTime, -) -from parsec.api.data import RealmRoleCertificate -from parsec.api.protocol import ( - RealmRole, - RealmUpdateRolesRepAlreadyGranted, - RealmUpdateRolesRepIncompatibleProfile, - RealmUpdateRolesRepInMaintenance, - RealmUpdateRolesRepInvalidData, - RealmUpdateRolesRepNotAllowed, - RealmUpdateRolesRepNotFound, - RealmUpdateRolesRepOk, - RealmUpdateRolesRepRequireGreaterTimestamp, - RealmUpdateRolesRepUserRevoked, - UserProfile, - VlobCreateRepOk, - VlobID, -) -from parsec.backend.realm import RealmGrantedRole -from tests.backend.common import realm_update_roles, vlob_create -from tests.common import customize_fixtures, freeze_time - -VLOB_ID = VlobID.from_hex("00000000000000000000000000000001") -REALM_ID = VlobID.from_hex("0000000000000000000000000000000A") - - -async def _realm_get_clear_role_certifs(backend, device, realm_id): - certificates = await backend.realm.get_role_certificates( - organization_id=device.organization_id, author=device.device_id, realm_id=realm_id - ) - cooked = [RealmRoleCertificate.unsecure_load(certif) for certif in certificates] - return [item for item in sorted(cooked, key=lambda x: x.timestamp)] - - -@pytest.fixture -def realm_generate_certif_and_update_roles_or_fail(next_timestamp): - async def _realm_generate_certif_and_update_roles_or_fail( - ws, author, realm_id, user_id, role, timestamp=None - ): - certif = RealmRoleCertificate( - author=author.device_id, - timestamp=timestamp or next_timestamp(), - realm_id=realm_id, - user_id=user_id, - role=role, - ).dump_and_sign(author.signing_key) - return await realm_update_roles(ws, certif, check_rep=False) - - return _realm_generate_certif_and_update_roles_or_fail - - -@pytest.fixture -def backend_realm_generate_certif_and_update_roles(next_timestamp): - async def _backend_realm_generate_certif_and_update_roles( - backend, author, realm_id, user_id, role, timestamp=None - ): - now = timestamp or next_timestamp() - certif = RealmRoleCertificate( - author=author.device_id, timestamp=now, realm_id=realm_id, user_id=user_id, role=role - ).dump_and_sign(author.signing_key) - await backend.realm.update_roles( - author.organization_id, - RealmGrantedRole( - certificate=certif, - realm_id=realm_id, - user_id=user_id, - role=role, - granted_by=author.device_id, - granted_on=now, - ), - ) - return certif - - return _backend_realm_generate_certif_and_update_roles - - -@pytest.mark.trio -async def test_update_roles_not_found( - alice, bob, alice_ws, realm_generate_certif_and_update_roles_or_fail -): - rep = await realm_generate_certif_and_update_roles_or_fail( - alice_ws, alice, REALM_ID, bob.user_id, RealmRole.MANAGER - ) - # The reason is no longer generated - assert isinstance(rep, RealmUpdateRolesRepNotFound) - - -@pytest.mark.trio -async def test_update_roles_bad_user( - alice, mallory, alice_ws, realm, realm_generate_certif_and_update_roles_or_fail -): - rep = await realm_generate_certif_and_update_roles_or_fail( - alice_ws, alice, realm, mallory.user_id, RealmRole.MANAGER - ) - # The reason is no longer generated - assert isinstance(rep, RealmUpdateRolesRepNotFound) - - -@pytest.mark.trio -async def test_update_roles_cannot_modify_self( - alice, alice_ws, realm, realm_generate_certif_and_update_roles_or_fail -): - rep = await realm_generate_certif_and_update_roles_or_fail( - alice_ws, alice, realm, alice.user_id, RealmRole.MANAGER - ) - # The reason is no longer generated - assert isinstance(rep, RealmUpdateRolesRepInvalidData) - - -@pytest.mark.trio -@customize_fixtures(bob_profile=UserProfile.OUTSIDER) -async def test_update_roles_outsider_is_limited( - alice, bob, alice_ws, realm, realm_generate_certif_and_update_roles_or_fail -): - for role, is_allowed in [ - (RealmRole.READER, True), - (RealmRole.CONTRIBUTOR, True), - (RealmRole.MANAGER, False), - (RealmRole.OWNER, False), - ]: - rep = await realm_generate_certif_and_update_roles_or_fail( - alice_ws, alice, realm, bob.user_id, role - ) - if is_allowed: - assert isinstance(rep, RealmUpdateRolesRepOk) - else: - # The reason is no longer generated - assert isinstance(rep, RealmUpdateRolesRepIncompatibleProfile) - - -@pytest.mark.trio -@customize_fixtures(alice_profile=UserProfile.OUTSIDER) -async def test_update_roles_outsider_cannot_share_with( - alice, bob, alice_ws, realm, realm_generate_certif_and_update_roles_or_fail -): - rep = await realm_generate_certif_and_update_roles_or_fail( - alice_ws, alice, realm, bob.user_id, RealmRole.READER - ) - # The reason is no longer generated - assert isinstance(rep, RealmUpdateRolesRepNotAllowed) - - -@pytest.mark.trio -@pytest.mark.parametrize("start_with_existing_role", (False, True)) -async def test_remove_role_idempotent( - backend, - alice, - bob, - alice_ws, - realm, - start_with_existing_role, - realm_generate_certif_and_update_roles_or_fail, -): - if start_with_existing_role: - with freeze_time("2000-01-03"): - rep = await realm_generate_certif_and_update_roles_or_fail( - alice_ws, alice, realm, bob.user_id, RealmRole.MANAGER - ) - assert isinstance(rep, RealmUpdateRolesRepOk) - - with freeze_time("2000-01-04"): - rep = await realm_generate_certif_and_update_roles_or_fail( - alice_ws, alice, realm, bob.user_id, None - ) - if start_with_existing_role: - assert isinstance(rep, RealmUpdateRolesRepOk) - else: - assert isinstance(rep, RealmUpdateRolesRepAlreadyGranted) - - with freeze_time("2000-01-05"): - rep = await realm_generate_certif_and_update_roles_or_fail( - alice_ws, alice, realm, bob.user_id, None - ) - assert isinstance(rep, RealmUpdateRolesRepAlreadyGranted) - - certifs = await _realm_get_clear_role_certifs(backend, alice, realm) - expected_certifs = [ - RealmRoleCertificate( - author=alice.device_id, - timestamp=DateTime(2000, 1, 2), - realm_id=realm, - user_id=alice.user_id, - role=RealmRole.OWNER, - ) - ] - if start_with_existing_role: - expected_certifs += [ - RealmRoleCertificate( - author=alice.device_id, - timestamp=DateTime(2000, 1, 3), - realm_id=realm, - user_id=bob.user_id, - role=RealmRole.MANAGER, - ), - RealmRoleCertificate( - author=alice.device_id, - timestamp=DateTime(2000, 1, 4), - realm_id=realm, - user_id=bob.user_id, - role=None, - ), - ] - assert certifs == expected_certifs - - -@pytest.mark.trio -async def test_update_roles_as_owner( - backend, alice, bob, alice_ws, realm, realm_generate_certif_and_update_roles_or_fail -): - for role in RealmRole.VALUES: - rep = await realm_generate_certif_and_update_roles_or_fail( - alice_ws, alice, realm, bob.user_id, role - ) - assert isinstance(rep, RealmUpdateRolesRepOk) - - roles = await backend.realm.get_current_roles(alice.organization_id, realm) - assert roles == {alice.user_id: RealmRole.OWNER, bob.user_id: role} - - # Now remove role - rep = await realm_generate_certif_and_update_roles_or_fail( - alice_ws, alice, realm, bob.user_id, None - ) - assert isinstance(rep, RealmUpdateRolesRepOk) - - roles = await backend.realm.get_current_roles(bob.organization_id, realm) - assert roles == {alice.user_id: RealmRole.OWNER} - - -@pytest.mark.trio -async def test_update_roles_as_manager( - backend_data_binder, - local_device_factory, - backend, - alice, - bob, - alice_ws, - bob_ws, - realm, - realm_generate_certif_and_update_roles_or_fail, - backend_realm_generate_certif_and_update_roles, -): - # Vlob realm must have at least one owner, so we need 3 users in total - # (Zack is owner, Alice is manager and gives role to Bob) - zack = local_device_factory("zack@dev1") - await backend_data_binder.bind_device(zack) - await backend_realm_generate_certif_and_update_roles( - backend, alice, realm, zack.user_id, RealmRole.OWNER - ) - await backend_realm_generate_certif_and_update_roles( - backend, zack, realm, alice.user_id, RealmRole.MANAGER - ) - - for role in (RealmRole.CONTRIBUTOR, RealmRole.READER): - rep = await realm_generate_certif_and_update_roles_or_fail( - alice_ws, alice, realm, bob.user_id, role - ) - assert isinstance(rep, RealmUpdateRolesRepOk) - - roles = await backend.realm.get_current_roles(alice.organization_id, realm) - assert roles == { - zack.user_id: RealmRole.OWNER, - alice.user_id: RealmRole.MANAGER, - bob.user_id: role, - } - - # Remove role - rep = await realm_generate_certif_and_update_roles_or_fail( - alice_ws, alice, realm, bob.user_id, None - ) - assert isinstance(rep, RealmUpdateRolesRepOk) - - roles = await backend.realm.get_current_roles(alice.organization_id, realm) - assert roles == { - zack.user_id: RealmRole.OWNER, - alice.user_id: RealmRole.MANAGER, - } - - # Cannot give owner or manager role as manager - for new_role in (RealmRole.OWNER, RealmRole.MANAGER): - rep = await realm_generate_certif_and_update_roles_or_fail( - alice_ws, alice, realm, bob.user_id, new_role - ) - assert isinstance(rep, RealmUpdateRolesRepNotAllowed) - - # Also cannot change owner or manager role - for new_role in (RealmRole.OWNER, RealmRole.MANAGER): - await backend_realm_generate_certif_and_update_roles( - backend, zack, realm, bob.user_id, new_role - ) - rep = await realm_generate_certif_and_update_roles_or_fail( - alice_ws, alice, realm, zack.user_id, RealmRole.CONTRIBUTOR - ) - assert isinstance(rep, RealmUpdateRolesRepNotAllowed) - - -@pytest.mark.trio -@pytest.mark.parametrize("alice_role", (RealmRole.CONTRIBUTOR, RealmRole.READER, None)) -async def test_role_update_not_allowed( - backend_data_binder, - local_device_factory, - backend, - alice, - bob, - alice_ws, - realm, - alice_role, - realm_generate_certif_and_update_roles_or_fail, - backend_realm_generate_certif_and_update_roles, -): - # Vlob realm must have at least one owner, so we need 3 users in total - # (Zack is owner, Alice gives role to Bob) - zack = local_device_factory("zack@dev1") - await backend_data_binder.bind_device(zack) - await backend_realm_generate_certif_and_update_roles( - backend, alice, realm, zack.user_id, RealmRole.OWNER - ) - await backend_realm_generate_certif_and_update_roles( - backend, zack, realm, alice.user_id, alice_role - ) - - # Cannot give role - for role in RealmRole.VALUES: - rep = await realm_generate_certif_and_update_roles_or_fail( - alice_ws, alice, realm, bob.user_id, role - ) - assert isinstance(rep, RealmUpdateRolesRepNotAllowed) - - # Cannot remove role - await backend_realm_generate_certif_and_update_roles( - backend, zack, realm, bob.user_id, RealmRole.READER - ) - rep = await realm_generate_certif_and_update_roles_or_fail( - alice_ws, alice, realm, bob.user_id, None - ) - assert isinstance(rep, RealmUpdateRolesRepNotAllowed) - - -@pytest.mark.trio -async def test_remove_role_dont_change_other_realms( - backend, alice, bob, alice_ws, realm, bob_realm, realm_generate_certif_and_update_roles_or_fail -): - # Bob is owner of bob_realm and manager of realm - rep = await realm_generate_certif_and_update_roles_or_fail( - alice_ws, alice, realm, bob.user_id, RealmRole.MANAGER - ) - assert isinstance(rep, RealmUpdateRolesRepOk) - - # Remove Bob from realm - rep = await realm_generate_certif_and_update_roles_or_fail( - alice_ws, alice, realm, bob.user_id, None - ) - assert isinstance(rep, RealmUpdateRolesRepOk) - - # Bob should still have access to bob_realm - roles = await backend.realm.get_current_roles(alice.organization_id, bob_realm) - assert roles == {bob.user_id: RealmRole.OWNER} - - -@pytest.mark.trio -async def test_role_access_during_maintenance( - backend, alice, bob, alice_ws, realm, realm_generate_certif_and_update_roles_or_fail -): - await backend.realm.start_reencryption_maintenance( - alice.organization_id, - alice.device_id, - realm, - 2, - {alice.user_id: b"whatever"}, - DateTime(2000, 1, 2), - ) - - # Update role is not allowed - rep = await realm_generate_certif_and_update_roles_or_fail( - alice_ws, alice, realm, bob.user_id, RealmRole.MANAGER - ) - assert isinstance(rep, RealmUpdateRolesRepInMaintenance) - - -@pytest.mark.trio -async def test_update_roles_causality_checks( - backend, - alice, - bob, - adam, - alice_ws, - bob_ws, - realm, - realm_generate_certif_and_update_roles_or_fail, - next_timestamp, -): - # Use this timestamp as reference - ref = next_timestamp() - - # Grant a role to bob - rep = await realm_generate_certif_and_update_roles_or_fail( - alice_ws, alice, realm, bob.user_id, RealmRole.MANAGER, ref - ) - assert isinstance(rep, RealmUpdateRolesRepOk) - - # Now try to change bob's role with the same timestamp or lower, this should fail - for timestamp in (ref, ref.subtract(seconds=1)): - rep = await realm_generate_certif_and_update_roles_or_fail( - alice_ws, alice, realm, bob.user_id, RealmRole.CONTRIBUTOR, timestamp - ) - assert rep == RealmUpdateRolesRepRequireGreaterTimestamp(ref) - - # Advance ref - ref = ref.add(seconds=10) - - # Now bob invites someone at timestamp ref - rep = await realm_generate_certif_and_update_roles_or_fail( - bob_ws, bob, realm, adam.user_id, RealmRole.CONTRIBUTOR, ref - ) - assert isinstance(rep, RealmUpdateRolesRepOk) - - # Now try to remove bob's management rights with the same timestamp or lower: this should fail - for timestamp in (ref, ref.subtract(seconds=1)): - rep = await realm_generate_certif_and_update_roles_or_fail( - alice_ws, alice, realm, bob.user_id, RealmRole.CONTRIBUTOR, timestamp - ) - assert rep == RealmUpdateRolesRepRequireGreaterTimestamp(ref) - - # Advance ref - ref = ref.add(seconds=10) - - # Now bob writes to the corresponding realm - rep = await vlob_create( - bob_ws, realm, VLOB_ID, blob=b"ciphered", timestamp=ref, check_rep=False - ) - assert isinstance(rep, VlobCreateRepOk) - - # Now try to remove bob's write rights with the same timestamp or lower: this should fail - for timestamp in (ref, ref.subtract(seconds=1)): - rep = await realm_generate_certif_and_update_roles_or_fail( - alice_ws, alice, realm, bob.user_id, RealmRole.READER, timestamp - ) - assert rep == RealmUpdateRolesRepRequireGreaterTimestamp(ref) - - -@pytest.mark.trio -async def test_update_roles_for_revoked_user( - backend, - alice, - bob, - alice_ws, - realm, - realm_generate_certif_and_update_roles_or_fail, - next_timestamp, - backend_data_binder, -): - # Grant a role to bob - rep = await realm_generate_certif_and_update_roles_or_fail( - alice_ws, alice, realm, bob.user_id, RealmRole.MANAGER, next_timestamp() - ) - assert isinstance(rep, RealmUpdateRolesRepOk) - - # Revoke Bob - await backend_data_binder.bind_revocation(bob.user_id, certifier=alice) - - # Now try to change bob's role, this should fail - rep = await realm_generate_certif_and_update_roles_or_fail( - alice_ws, alice, realm, bob.user_id, RealmRole.CONTRIBUTOR, next_timestamp() - ) - assert isinstance(rep, RealmUpdateRolesRepUserRevoked) - - # Even removing access should fail - rep = await realm_generate_certif_and_update_roles_or_fail( - alice_ws, alice, realm, bob.user_id, None, next_timestamp() - ) - assert isinstance(rep, RealmUpdateRolesRepUserRevoked) diff --git a/server/tests/backend/realm/test_vlob_access.py b/server/tests/backend/realm/test_vlob_access.py deleted file mode 100644 index d5e53237ae7..00000000000 --- a/server/tests/backend/realm/test_vlob_access.py +++ /dev/null @@ -1,658 +0,0 @@ -# Parsec Cloud (https://parsec.cloud) Copyright (c) BUSL-1.1 2016-present Scille SAS -from __future__ import annotations - -import pytest - -from parsec._parsec import ( - DateTime, - authenticated_cmds, -) -from parsec.api.protocol import ( - RealmRole, - RealmUpdateRolesRepOk, - VlobCreateRepAlreadyExists, - VlobCreateRepBadEncryptionRevision, - VlobCreateRepBadTimestamp, - VlobCreateRepInMaintenance, - VlobCreateRepNotAllowed, - VlobCreateRepOk, - VlobCreateRepRequireGreaterTimestamp, - VlobID, - VlobListVersionsRepNotAllowed, - VlobListVersionsRepNotFound, - VlobListVersionsRepOk, - VlobUpdateRepBadEncryptionRevision, - VlobUpdateRepBadTimestamp, - VlobUpdateRepBadVersion, - VlobUpdateRepInMaintenance, - VlobUpdateRepNotAllowed, - VlobUpdateRepNotFound, - VlobUpdateRepOk, - VlobUpdateRepRequireGreaterTimestamp, - packb, -) -from parsec.backend.realm import RealmGrantedRole -from parsec.utils import BALLPARK_CLIENT_EARLY_OFFSET, BALLPARK_CLIENT_LATE_OFFSET -from tests.backend.common import vlob_create, vlob_list_versions, vlob_read, vlob_update -from tests.backend.realm.test_update_roles import realm_generate_certif_and_update_roles_or_fail -from tests.common import freeze_time - -# Fixture -realm_generate_certif_and_update_roles_or_fail - -VLOB_ID = VlobID.from_hex("00000000000000000000000000000001") - - -@pytest.mark.trio -async def test_create_and_read(alice, alice_ws, alice2_ws, realm): - blob = b"Initial commit." - with freeze_time("2000-01-03"): - await vlob_create(alice_ws, realm, VLOB_ID, blob) - - rep = await vlob_read(alice2_ws, VLOB_ID) - assert rep == authenticated_cmds.latest.vlob_read.RepOk( - version=1, - blob=blob, - author=alice.device_id, - timestamp=DateTime(2000, 1, 3), - author_last_role_granted_on=DateTime(2000, 1, 2), - ) - - -@pytest.mark.trio -async def test_create_bad_timestamp(alice_ws, realm): - blob = b"Initial commit." - d1 = DateTime(2000, 1, 1) - with freeze_time(d1): - d2 = d1.add(seconds=3600) - rep = await vlob_create(alice_ws, realm, VLOB_ID, blob, timestamp=d2, check_rep=False) - assert rep == VlobCreateRepBadTimestamp( - reason=None, - backend_timestamp=d1, - ballpark_client_early_offset=BALLPARK_CLIENT_EARLY_OFFSET, - ballpark_client_late_offset=BALLPARK_CLIENT_LATE_OFFSET, - client_timestamp=d2, - ) - - -@pytest.mark.parametrize( - "bad_msg", - [ - {"blob": b"...", "bad_field": "foo"}, - {"blob": 42}, - {"blob": None}, - {"id": "", "blob": b"..."}, - {"id": 42, "blob": b"..."}, - ], -) -@pytest.mark.trio -async def test_create_bad_msg(alice_ws, bad_msg): - await alice_ws.send(packb({"cmd": "vlob_create", **bad_msg})) - raw_rep = await alice_ws.receive() - rep = authenticated_cmds.latest.vlob_create.Rep.load(raw_rep) - assert isinstance(rep, authenticated_cmds.latest.vlob_create.RepUnknownStatus) - assert rep.status == "invalid_msg_format" - - -@pytest.mark.trio -async def test_create_but_already_exists(alice_ws, realm): - blob = b"Initial commit." - - await vlob_create(alice_ws, realm, VLOB_ID, blob) - - rep = await vlob_create(alice_ws, realm, VLOB_ID, blob, check_rep=False) - assert isinstance(rep, VlobCreateRepAlreadyExists) - - -@pytest.mark.trio -async def test_create_but_unknown_realm(alice_ws): - bad_realm_id = VlobID.new() - blob = b"Initial commit." - - rep = await vlob_create(alice_ws, bad_realm_id, VLOB_ID, blob, check_rep=False) - assert isinstance(rep, VlobCreateRepNotAllowed) - - -@pytest.mark.trio -async def test_create_check_access_rights(backend, alice, bob, bob_ws, realm, next_timestamp): - vlob_id = VlobID.new() - - # User not part of the realm - rep = await vlob_create( - bob_ws, realm, vlob_id, b"Initial version.", next_timestamp(), check_rep=False - ) - assert isinstance(rep, VlobCreateRepNotAllowed) - - # User part of the realm with various role - for role, access_granted in [ - (RealmRole.READER, False), - (RealmRole.CONTRIBUTOR, True), - (RealmRole.MANAGER, True), - (RealmRole.OWNER, True), - ]: - await backend.realm.update_roles( - alice.organization_id, - RealmGrantedRole( - certificate=b"dummy", - realm_id=realm, - user_id=bob.user_id, - role=role, - granted_by=alice.device_id, - granted_on=next_timestamp(), - ), - ) - vlob_id = VlobID.new() - rep = await vlob_create( - bob_ws, realm, vlob_id, b"Initial version.", next_timestamp(), check_rep=False - ) - if access_granted: - isinstance(rep, VlobCreateRepOk) - - else: - isinstance(rep, VlobCreateRepNotAllowed) - - # Ensure user that used to be part of the realm have no longer access - await backend.realm.update_roles( - alice.organization_id, - RealmGrantedRole( - certificate=b"", - realm_id=realm, - user_id=bob.user_id, - role=None, - granted_by=alice.device_id, - granted_on=next_timestamp(), - ), - ) - rep = await vlob_create( - bob_ws, realm, vlob_id, b"Initial version.", next_timestamp(), check_rep=False - ) - assert isinstance(rep, VlobCreateRepNotAllowed) - - -@pytest.mark.trio -async def test_read_not_found(alice_ws): - rep = await vlob_read(alice_ws, VLOB_ID) - # The reason is no longer generated - assert isinstance(rep, authenticated_cmds.latest.vlob_read.RepNotFound) - - -@pytest.mark.trio -async def test_read_ok(alice, alice_ws, vlobs): - rep = await vlob_read(alice_ws, vlobs[0]) - assert rep == authenticated_cmds.latest.vlob_read.RepOk( - blob=b"r:A b:1 v:2", - version=2, - author=alice.device_id, - timestamp=DateTime(2000, 1, 3), - author_last_role_granted_on=DateTime(2000, 1, 2), - ) - - -@pytest.mark.trio -async def test_read_ok_v1(alice, alice_ws, vlobs): - rep = await vlob_read(alice_ws, vlobs[0], version=1) - assert rep == authenticated_cmds.latest.vlob_read.RepOk( - blob=b"r:A b:1 v:1", - version=1, - author=alice.device_id, - timestamp=DateTime(2000, 1, 2, 1), - author_last_role_granted_on=DateTime(2000, 1, 2), - ) - - -@pytest.mark.trio -async def test_read_ok_timestamp_after_v2(alice, alice_ws, vlobs): - rep = await vlob_read(alice_ws, vlobs[0], timestamp=DateTime(2000, 1, 4)) - assert rep == authenticated_cmds.latest.vlob_read.RepOk( - blob=b"r:A b:1 v:2", - version=2, - author=alice.device_id, - timestamp=DateTime(2000, 1, 3), - author_last_role_granted_on=DateTime(2000, 1, 2), - ) - - -@pytest.mark.trio -async def test_read_ok_timestamp_is_v2(alice, alice_ws, vlobs): - rep = await vlob_read(alice_ws, vlobs[0], timestamp=DateTime(2000, 1, 3)) - assert rep == authenticated_cmds.latest.vlob_read.RepOk( - blob=b"r:A b:1 v:2", - version=2, - author=alice.device_id, - timestamp=DateTime(2000, 1, 3), - author_last_role_granted_on=DateTime(2000, 1, 2), - ) - - -@pytest.mark.trio -async def test_read_ok_timestamp_between_v1_and_v2(alice, alice_ws, vlobs): - rep = await vlob_read(alice_ws, vlobs[0], timestamp=DateTime(2000, 1, 2, 10)) - assert rep == authenticated_cmds.latest.vlob_read.RepOk( - blob=b"r:A b:1 v:1", - version=1, - author=alice.device_id, - timestamp=DateTime(2000, 1, 2, 1), - author_last_role_granted_on=DateTime(2000, 1, 2), - ) - - -@pytest.mark.trio -async def test_read_ok_timestamp_is_v1(alice, alice_ws, vlobs): - rep = await vlob_read(alice_ws, vlobs[0], timestamp=DateTime(2000, 1, 2, 1)) - assert rep == authenticated_cmds.latest.vlob_read.RepOk( - blob=b"r:A b:1 v:1", - version=1, - author=alice.device_id, - timestamp=DateTime(2000, 1, 2, 1), - author_last_role_granted_on=DateTime(2000, 1, 2), - ) - - -@pytest.mark.trio -async def test_read_before_v1(alice_ws, vlobs): - rep = await vlob_read(alice_ws, vlobs[0], timestamp=DateTime(2000, 1, 1)) - assert isinstance(rep, authenticated_cmds.latest.vlob_read.RepBadVersion) - - -@pytest.mark.trio -async def test_read_check_access_rights(backend, alice, bob, bob_ws, realm, vlobs, next_timestamp): - # Not part of the realm - rep = await vlob_read(bob_ws, vlobs[0]) - assert isinstance(rep, authenticated_cmds.latest.vlob_read.RepNotAllowed) - - for role in RealmRole.VALUES: - await backend.realm.update_roles( - alice.organization_id, - RealmGrantedRole( - certificate=b"dummy", - realm_id=realm, - user_id=bob.user_id, - role=role, - granted_by=alice.device_id, - granted_on=next_timestamp(), - ), - ) - rep = await vlob_read(bob_ws, vlobs[0]) - assert isinstance(rep, authenticated_cmds.latest.vlob_read.RepOk) - - # Ensure user that used to be part of the realm have no longer access - await backend.realm.update_roles( - alice.organization_id, - RealmGrantedRole( - certificate=b"", - realm_id=realm, - user_id=bob.user_id, - role=None, - granted_by=alice.device_id, - granted_on=next_timestamp(), - ), - ) - rep = await vlob_read(bob_ws, vlobs[0]) - assert isinstance(rep, authenticated_cmds.latest.vlob_read.RepNotAllowed) - - -@pytest.mark.trio -async def test_read_other_organization(backend_asgi_app, ws_from_other_organization_factory, vlobs): - async with ws_from_other_organization_factory(backend_asgi_app) as sock: - rep = await vlob_read(sock, vlobs[0]) - # The reason is no longer generated - assert isinstance(rep, authenticated_cmds.latest.vlob_read.RepNotFound) - - -@pytest.mark.parametrize( - "bad_msg", - [ - {"id": VLOB_ID.hex, "bad_field": "foo"}, - {"id": ""}, - {"id": VLOB_ID.hex}, # TODO: really bad ? - {"id": 42}, - {"id": None}, - {"id": VLOB_ID.hex, "version": 0}, - {"id": VLOB_ID.hex, "version": "foo"}, - {}, - ], -) -@pytest.mark.trio -async def test_read_bad_msg(alice_ws, bad_msg): - await alice_ws.send(packb({"cmd": "vlob_read", **bad_msg})) - raw_rep = await alice_ws.receive() - # Id and trust_seed are invalid anyway, but here we test another layer - # so it's not important as long as we get our `bad_message` status - rep = authenticated_cmds.latest.vlob_read.Rep.load(raw_rep) - assert isinstance(rep, authenticated_cmds.latest.vlob_read.RepUnknownStatus) - assert rep.status == "invalid_msg_format" - - -@pytest.mark.trio -async def test_read_bad_version(alice_ws, vlobs): - rep = await vlob_read(alice_ws, vlobs[0], version=3) - assert isinstance(rep, authenticated_cmds.latest.vlob_read.RepBadVersion) - - -@pytest.mark.trio -async def test_update_ok(alice_ws, vlobs): - await vlob_update(alice_ws, vlobs[0], version=3, blob=b"Next version.") - - -@pytest.mark.trio -async def test_update_bad_timestamp(alice_ws, vlobs): - blob = b"Initial commit." - d1 = DateTime(2000, 1, 1) - with freeze_time(d1): - d2 = d1.add(seconds=3600) - rep = await vlob_update( - alice_ws, vlobs[0], version=3, blob=blob, timestamp=d2, check_rep=False - ) - assert rep == VlobUpdateRepBadTimestamp( - reason=None, - backend_timestamp=d1, - ballpark_client_early_offset=BALLPARK_CLIENT_EARLY_OFFSET, - ballpark_client_late_offset=BALLPARK_CLIENT_LATE_OFFSET, - client_timestamp=d2, - ) - - -@pytest.mark.trio -async def test_update_not_found(alice_ws): - rep = await vlob_update(alice_ws, VLOB_ID, version=2, blob=b"Next version.", check_rep=False) - # The reason is no longer generated - assert isinstance(rep, VlobUpdateRepNotFound) - - -@pytest.mark.trio -async def test_update_check_access_rights( - backend, alice, bob, bob_ws, realm, vlobs, next_timestamp -): - # User not part of the realm - rep = await vlob_update( - bob_ws, - vlobs[0], - version=3, - blob=b"Next version.", - timestamp=next_timestamp(), - check_rep=False, - ) - assert isinstance(rep, VlobUpdateRepNotAllowed) - - # User part of the realm with various role - next_version = 3 - for role, access_granted in [ - (RealmRole.READER, False), - (RealmRole.CONTRIBUTOR, True), - (RealmRole.MANAGER, True), - (RealmRole.OWNER, True), - ]: - await backend.realm.update_roles( - alice.organization_id, - RealmGrantedRole( - certificate=b"dummy", - realm_id=realm, - user_id=bob.user_id, - role=role, - granted_by=alice.device_id, - granted_on=next_timestamp(), - ), - ) - rep = await vlob_update( - bob_ws, - vlobs[0], - version=next_version, - blob=b"Next version.", - timestamp=next_timestamp(), - check_rep=False, - ) - if access_granted: - assert isinstance(rep, VlobUpdateRepOk) - next_version += 1 - - else: - assert isinstance(rep, VlobUpdateRepNotAllowed) - - # Ensure user that used to be part of the realm have no longer access - await backend.realm.update_roles( - alice.organization_id, - RealmGrantedRole( - certificate=b"", - realm_id=realm, - user_id=bob.user_id, - role=None, - granted_by=alice.device_id, - granted_on=next_timestamp(), - ), - ) - rep = await vlob_update( - bob_ws, - vlobs[0], - version=next_version, - blob=b"Next version.", - timestamp=next_timestamp(), - check_rep=False, - ) - assert isinstance(rep, VlobUpdateRepNotAllowed) - - -@pytest.mark.trio -async def test_update_other_organization( - backend_asgi_app, ws_from_other_organization_factory, vlobs -): - async with ws_from_other_organization_factory(backend_asgi_app) as sock: - rep = await vlob_update(sock, vlobs[0], version=3, blob=b"Next version.", check_rep=False) - # The reason is no longer generated - assert isinstance(rep, VlobUpdateRepNotFound) - - -@pytest.mark.parametrize( - "bad_msg", - [ - {"id": VLOB_ID.hex, "version": 42, "blob": b"...", "bad_field": "foo"}, - {"id": VLOB_ID.hex, "version": 42, "blob": None}, - {"id": VLOB_ID.hex, "version": 42, "blob": 42}, - {"id": VLOB_ID.hex, "version": 42}, - {"id": VLOB_ID.hex, "version": None, "blob": b"..."}, - {"id": VLOB_ID.hex, "version": -1, "blob": b"..."}, - {"id": 42, "version": 42, "blob": b"..."}, - {"id": None, "version": 42, "blob": b"..."}, - {"version": 42, "blob": b"..."}, - {}, - ], -) -@pytest.mark.trio -async def test_update_bad_msg(alice_ws, bad_msg): - await alice_ws.send(packb({"cmd": "vlob_update", **bad_msg})) - raw_rep = await alice_ws.receive() - # Id and version are invalid anyway, but here we test another layer - # so it's not important as long as we get our `bad_message` status - rep = authenticated_cmds.latest.vlob_update.Rep.load(raw_rep) - assert isinstance(rep, authenticated_cmds.latest.vlob_update.RepUnknownStatus) - assert rep.status == "invalid_msg_format" - - -@pytest.mark.trio -async def test_update_bad_version(alice_ws, vlobs): - rep = await vlob_update(alice_ws, vlobs[0], version=4, blob=b"Next version.", check_rep=False) - assert isinstance(rep, VlobUpdateRepBadVersion) - - -@pytest.mark.trio -async def test_bad_encryption_revision(alice_ws, realm, vlobs): - rep = await vlob_create( - alice_ws, realm, VLOB_ID, blob=b"First version.", encryption_revision=42, check_rep=False - ) - assert isinstance(rep, VlobCreateRepBadEncryptionRevision) - - rep = await vlob_read(alice_ws, vlobs[0], encryption_revision=42) - assert isinstance(rep, authenticated_cmds.latest.vlob_read.RepBadEncryptionRevision) - - rep = await vlob_update( - alice_ws, - vlobs[0], - version=3, - blob=b"Next version.", - encryption_revision=42, - check_rep=False, - ) - assert isinstance(rep, VlobUpdateRepBadEncryptionRevision) - - -@pytest.mark.trio -async def test_list_versions_ok(alice, alice_ws, vlobs): - rep = await vlob_list_versions(alice_ws, vlobs[0]) - assert rep == VlobListVersionsRepOk( - { - 1: (DateTime(2000, 1, 2, 1, 0, 0), alice.device_id), - 2: (DateTime(2000, 1, 3, 0, 0, 0), alice.device_id), - }, - ) - - -@pytest.mark.trio -async def test_list_versions_not_found(alice_ws): - rep = await vlob_list_versions(alice_ws, VLOB_ID) - # The reason is no longer generated - assert isinstance(rep, VlobListVersionsRepNotFound) - - -@pytest.mark.trio -async def test_list_versions_check_access_rights( - backend, alice, bob, bob_ws, realm, vlobs, next_timestamp -): - # Not part of the realm - rep = await vlob_list_versions(bob_ws, vlobs[0]) - assert isinstance(rep, VlobListVersionsRepNotAllowed) - - for role in RealmRole.VALUES: - await backend.realm.update_roles( - alice.organization_id, - RealmGrantedRole( - certificate=b"dummy", - realm_id=realm, - user_id=bob.user_id, - role=role, - granted_by=alice.device_id, - granted_on=next_timestamp(), - ), - ) - rep = await vlob_list_versions(bob_ws, vlobs[0]) - assert isinstance(rep, VlobListVersionsRepOk) - - # Ensure user that used to be part of the realm have no longer access - await backend.realm.update_roles( - alice.organization_id, - RealmGrantedRole( - certificate=b"", - realm_id=realm, - user_id=bob.user_id, - role=None, - granted_by=alice.device_id, - granted_on=next_timestamp(), - ), - ) - rep = await vlob_list_versions(bob_ws, vlobs[0]) - assert isinstance(rep, VlobListVersionsRepNotAllowed) - - -@pytest.mark.trio -async def test_list_versions_other_organization( - backend_asgi_app, ws_from_other_organization_factory, vlobs -): - async with ws_from_other_organization_factory(backend_asgi_app) as sock: - rep = await vlob_list_versions(sock, vlobs[0]) - # The reason is no longer generated - assert isinstance(rep, VlobListVersionsRepNotFound) - - -@pytest.mark.parametrize( - "bad_msg", - [ - {"id": VLOB_ID.hex, "bad_field": "foo"}, - {"id": ""}, - {"id": VLOB_ID.hex}, # TODO: really bad ? - {"id": 42}, - {"id": None}, - {"id": VLOB_ID.hex, "version": 1}, - {}, - ], -) -@pytest.mark.trio -async def test_list_versions_bad_msg(alice_ws, bad_msg): - await alice_ws.send(packb({"cmd": "vlob_list_versions", **bad_msg})) - raw_rep = await alice_ws.receive() - # Id and trust_seed are invalid anyway, but here we test another layer - # so it's not important as long as we get our `bad_message` status - rep = authenticated_cmds.latest.vlob_list_versions.Rep.load(raw_rep) - assert isinstance(rep, authenticated_cmds.latest.vlob_list_versions.RepUnknownStatus) - assert rep.status == "invalid_msg_format" - - -@pytest.mark.trio -async def test_access_during_maintenance(backend, alice, alice_ws, realm, vlobs): - await backend.realm.start_reencryption_maintenance( - alice.organization_id, - alice.device_id, - realm, - 2, - {alice.user_id: b"whatever"}, - DateTime(2000, 1, 2), - ) - - rep = await vlob_create( - alice_ws, realm, VLOB_ID, blob=b"First version.", encryption_revision=2, check_rep=False - ) - assert isinstance(rep, VlobCreateRepInMaintenance) - - rep = await vlob_read(alice_ws, vlobs[0], encryption_revision=2) - assert isinstance(rep, authenticated_cmds.latest.vlob_read.RepInMaintenance) - - rep = await vlob_update( - alice_ws, vlobs[0], version=3, blob=b"Next version.", encryption_revision=2, check_rep=False - ) - assert isinstance(rep, VlobUpdateRepInMaintenance) - - -@pytest.mark.trio -async def test_vlob_updates_causality_checks( - backend, - alice, - bob, - adam, - alice_ws, - bob_ws, - realm, - realm_generate_certif_and_update_roles_or_fail, - next_timestamp, -): - # Use this timestamp as reference - ref = next_timestamp() - - # Grant a role to bob - rep = await realm_generate_certif_and_update_roles_or_fail( - alice_ws, alice, realm, bob.user_id, RealmRole.MANAGER, ref - ) - assert isinstance(rep, RealmUpdateRolesRepOk) - - # Now bob writes to the corresponding realm with the same timestamp or lower: this should fail - for timestamp in (ref, ref.subtract(seconds=1)): - rep = await vlob_create( - bob_ws, realm, VLOB_ID, blob=b"ciphered", timestamp=timestamp, check_rep=False - ) - assert rep == VlobCreateRepRequireGreaterTimestamp(ref) - - # Advance ref - ref = ref.add(seconds=10) - - # Bob successfully write version 1 - rep = await vlob_create( - bob_ws, realm, VLOB_ID, blob=b"ciphered", timestamp=ref, check_rep=False - ) - assert isinstance(rep, VlobCreateRepOk) - - # Now bob writes to the corresponding vlob with a lower timestamp: this should fail - rep = await vlob_update( - bob_ws, - VLOB_ID, - version=2, - blob=b"ciphered", - timestamp=ref.subtract(seconds=1), - check_rep=False, - ) - assert rep == VlobUpdateRepRequireGreaterTimestamp(ref) diff --git a/server/tests/backend/realm/test_vlob_poll_changes.py b/server/tests/backend/realm/test_vlob_poll_changes.py deleted file mode 100644 index 317df53f7f3..00000000000 --- a/server/tests/backend/realm/test_vlob_poll_changes.py +++ /dev/null @@ -1,181 +0,0 @@ -# Parsec Cloud (https://parsec.cloud) Copyright (c) BUSL-1.1 2016-present Scille SAS -from __future__ import annotations - -import pytest - -from parsec._parsec import ( - DateTime, -) -from parsec.api.data import RealmRoleCertificate -from parsec.api.protocol import ( - RealmRole, - RealmUpdateRolesRepOk, - VlobID, - VlobPollChangesRepNotAllowed, - VlobPollChangesRepNotFound, - VlobPollChangesRepOk, - VlobUpdateRepNotAllowed, - VlobUpdateRepOk, -) -from tests.backend.common import realm_update_roles, vlob_poll_changes, vlob_update - -NOW = DateTime(2000, 1, 3) -VLOB_ID = VlobID.from_hex("00000000000000000000000000000001") -OTHER_VLOB_ID = VlobID.from_hex("00000000000000000000000000000002") -YET_ANOTHER_VLOB_ID = VlobID.from_hex("00000000000000000000000000000003") -UNKNOWN_REALM_ID = VlobID.from_hex("0000000000000000000000000000000F") - - -@pytest.fixture -def realm_generate_certif_and_update_roles_or_fail(next_timestamp): - async def _realm_generate_certif_and_update_roles_or_fail(ws, author, realm_id, user_id, role): - certif = RealmRoleCertificate( - author=author.device_id, - timestamp=next_timestamp(), - realm_id=realm_id, - user_id=user_id, - role=role, - ).dump_and_sign(author.signing_key) - return await realm_update_roles(ws, certif, check_rep=False) - - return _realm_generate_certif_and_update_roles_or_fail - - -@pytest.mark.trio -async def test_realm_updated_by_vlob(backend, alice, alice_ws, realm): - await backend.vlob.create( - organization_id=alice.organization_id, - author=alice.device_id, - realm_id=realm, - encryption_revision=1, - vlob_id=VLOB_ID, - timestamp=NOW, - blob=b"v1", - ) - await backend.vlob.update( - organization_id=alice.organization_id, - author=alice.device_id, - encryption_revision=1, - vlob_id=VLOB_ID, - version=2, - timestamp=NOW, - blob=b"v2", - ) - - for last_checkpoint in (0, 1): - rep = await vlob_poll_changes(alice_ws, realm, last_checkpoint) - assert rep == VlobPollChangesRepOk({VLOB_ID: 2}, 2) - - -@pytest.mark.trio -async def test_vlob_poll_changes_checkpoint_up_to_date(backend, alice, alice_ws, realm): - await backend.vlob.create( - organization_id=alice.organization_id, - author=alice.device_id, - realm_id=realm, - encryption_revision=1, - vlob_id=VLOB_ID, - timestamp=NOW, - blob=b"v1", - ) - await backend.vlob.update( - organization_id=alice.organization_id, - author=alice.device_id, - encryption_revision=1, - vlob_id=VLOB_ID, - version=2, - timestamp=NOW, - blob=b"v2", - ) - - rep = await vlob_poll_changes(alice_ws, realm, 2) - assert rep == VlobPollChangesRepOk({}, 2) - - -@pytest.mark.trio -async def test_vlob_poll_changes_not_found(alice_ws): - rep = await vlob_poll_changes(alice_ws, UNKNOWN_REALM_ID, 0) - # The reason is no longer generated - assert isinstance(rep, VlobPollChangesRepNotFound) - - -@pytest.mark.trio -async def test_vlob_poll_changes( - backend, - alice, - bob, - alice_ws, - bob_ws, - realm, - next_timestamp, - realm_generate_certif_and_update_roles_or_fail, -): - await backend.vlob.create( - organization_id=alice.organization_id, - author=alice.device_id, - realm_id=realm, - encryption_revision=1, - vlob_id=VLOB_ID, - timestamp=NOW, - blob=b"v1", - ) - - # At first only Alice is allowed - - rep = await vlob_poll_changes(bob_ws, realm, 2) - assert isinstance(rep, VlobPollChangesRepNotAllowed) - - # Add Bob with read&write rights - - rep = await realm_generate_certif_and_update_roles_or_fail( - alice_ws, alice, realm, bob.user_id, RealmRole.CONTRIBUTOR - ) - assert isinstance(rep, RealmUpdateRolesRepOk) - - rep = await vlob_update(bob_ws, VLOB_ID, 2, b"v2", next_timestamp()) - assert isinstance(rep, VlobUpdateRepOk) - - rep = await vlob_poll_changes(bob_ws, realm, 1) - assert rep == VlobPollChangesRepOk({VLOB_ID: 2}, 2) - - # Change Bob with read only right - - rep = await realm_generate_certif_and_update_roles_or_fail( - alice_ws, alice, realm, bob.user_id, RealmRole.READER - ) - assert isinstance(rep, RealmUpdateRolesRepOk) - - rep = await vlob_update(bob_ws, VLOB_ID, 3, b"v3", next_timestamp(), check_rep=False) - assert isinstance(rep, VlobUpdateRepNotAllowed) - - rep = await vlob_poll_changes(bob_ws, realm, 1) - assert rep == VlobPollChangesRepOk({VLOB_ID: 2}, 2) - - # Finally remove all rights from Bob - - rep = await realm_generate_certif_and_update_roles_or_fail( - alice_ws, alice, realm, bob.user_id, None - ) - assert isinstance(rep, RealmUpdateRolesRepOk) - - rep = await vlob_poll_changes(bob_ws, realm, 2) - assert isinstance(rep, VlobPollChangesRepNotAllowed) - - rep = await vlob_update(bob_ws, VLOB_ID, 3, b"v3", next_timestamp(), check_rep=False) - assert isinstance(rep, VlobUpdateRepNotAllowed) - - -@pytest.mark.trio -async def test_vlob_poll_changes_during_maintenance(backend, alice, alice_ws, realm): - await backend.realm.start_reencryption_maintenance( - alice.organization_id, - alice.device_id, - realm, - 2, - {alice.user_id: b"whatever"}, - DateTime(2000, 1, 2), - ) - - # It's ok to poll changes while the workspace is being reencrypted - rep = await vlob_poll_changes(alice_ws, realm, 1) - assert isinstance(rep, VlobPollChangesRepOk) diff --git a/server/tests/backend/realm/test_vlobs_updated_event.py b/server/tests/backend/realm/test_vlobs_updated_event.py deleted file mode 100644 index 658538c288d..00000000000 --- a/server/tests/backend/realm/test_vlobs_updated_event.py +++ /dev/null @@ -1,289 +0,0 @@ -# Parsec Cloud (https://parsec.cloud) Copyright (c) BUSL-1.1 2016-present Scille SAS -from __future__ import annotations - -import pytest - -from parsec._parsec import ( - BackendEventRealmRolesUpdated, - BackendEventRealmVlobsUpdated, - DateTime, - authenticated_cmds, -) -from parsec.api.protocol import ( - RealmRole, - VlobID, -) -from parsec.backend.realm import RealmGrantedRole -from tests.backend.common import events_listen - -NOW = DateTime(2000, 1, 3) -VLOB_ID = VlobID.from_hex("00000000000000000000000000000001") -OTHER_VLOB_ID = VlobID.from_hex("00000000000000000000000000000002") -YET_ANOTHER_VLOB_ID = VlobID.from_hex("00000000000000000000000000000003") -REALM_ID = VlobID.from_hex("0000000000000000000000000000000A") - - -@pytest.mark.trio -async def test_vlobs_updated_event_ok(backend, alice_ws, alice, alice2, realm, other_realm): - # Not listened events - with backend.event_bus.listen() as spy: - await backend.vlob.create( - organization_id=alice.organization_id, - author=alice.device_id, - realm_id=realm, - encryption_revision=1, - vlob_id=VLOB_ID, - timestamp=NOW, - blob=b"v1", - ) - await spy.wait_with_timeout(BackendEventRealmVlobsUpdated) - - # Start listening events - async with events_listen(alice_ws) as alice_events_listener: - # Good events - with backend.event_bus.listen() as spy: - await backend.vlob.create( - organization_id=alice.organization_id, - author=alice2.device_id, - realm_id=other_realm, - encryption_revision=1, - vlob_id=OTHER_VLOB_ID, - timestamp=NOW, - blob=b"v1", - ) - await backend.vlob.update( - organization_id=alice.organization_id, - author=alice2.device_id, - encryption_revision=1, - vlob_id=VLOB_ID, - version=2, - timestamp=NOW, - blob=b"v2", - ) - await backend.vlob.update( - organization_id=alice.organization_id, - author=alice2.device_id, - encryption_revision=1, - vlob_id=VLOB_ID, - version=3, - timestamp=NOW, - blob=b"v3", - ) - - # No guarantees those events occur before the commands' return - # On top of that, other `realm.vlobs_updated` has been triggered - # before us (i.g. during alice user vlob creation). In case of slow - # database those events could pop only now, hence shadowing the ones - # we are waiting for. To avoid this we have to specify event params. - await spy.wait_multiple_with_timeout( - [ - BackendEventRealmVlobsUpdated( - organization_id=alice.organization_id, - author=alice2.device_id, - realm_id=other_realm, - checkpoint=1, - src_id=OTHER_VLOB_ID, - src_version=1, - ), - BackendEventRealmVlobsUpdated( - organization_id=alice.organization_id, - author=alice2.device_id, - realm_id=realm, - checkpoint=2, - src_id=VLOB_ID, - src_version=2, - ), - BackendEventRealmVlobsUpdated( - organization_id=alice.organization_id, - author=alice2.device_id, - realm_id=realm, - checkpoint=3, - src_id=VLOB_ID, - src_version=3, - ), - ] - ) - - reps = [ - await alice_events_listener.do_recv(), - await alice_events_listener.do_recv(), - await alice_events_listener.do_recv(), - # TODO: how to test there is no more events waiting to be received ? - ] - - assert reps == [ - authenticated_cmds.latest.events_listen.RepOk( - authenticated_cmds.latest.events_listen.APIEventRealmVlobsUpdated( - other_realm, 1, OTHER_VLOB_ID, 1 - ) - ), - authenticated_cmds.latest.events_listen.RepOk( - authenticated_cmds.latest.events_listen.APIEventRealmVlobsUpdated(realm, 2, VLOB_ID, 2) - ), - authenticated_cmds.latest.events_listen.RepOk( - authenticated_cmds.latest.events_listen.APIEventRealmVlobsUpdated(realm, 3, VLOB_ID, 3) - ), - ] - - -@pytest.mark.trio -async def test_vlobs_updated_event_handle_self_events(backend, alice_ws, alice, realm): - async with events_listen(alice_ws) as alice_events_listener: - with backend.event_bus.listen() as spy: - await backend.vlob.create( - organization_id=alice.organization_id, - author=alice.device_id, - realm_id=realm, - encryption_revision=1, - vlob_id=VLOB_ID, - timestamp=NOW, - blob=b"v1", - ) - - await backend.vlob.create( - organization_id=alice.organization_id, - author=alice.device_id, - realm_id=realm, - encryption_revision=1, - vlob_id=OTHER_VLOB_ID, - timestamp=NOW, - blob=b"v1", - ) - await backend.vlob.update( - organization_id=alice.organization_id, - author=alice.device_id, - encryption_revision=1, - vlob_id=VLOB_ID, - version=2, - timestamp=NOW, - blob=b"v2", - ) - - # Wait for events to be processed by the backend - await spy.wait_multiple_with_timeout( - [ - BackendEventRealmVlobsUpdated, - BackendEventRealmVlobsUpdated, - BackendEventRealmVlobsUpdated, - ] - ) - - # Self-events should have been ignored - # TODO: how to test there is no more events waiting to be received ? - rep = await apiv2v3_events_listen_nowait(alice_ws) - assert isinstance(rep, ApiV2V3_EventsListenRepNoEvents) - - -@pytest.mark.trio -async def test_vlobs_updated_event_not_participant(backend, alice_ws, bob, bob_realm): - async with events_listen(alice_ws) as alice_events_listener: - with backend.event_bus.listen() as spy: - await backend.vlob.create( - organization_id=bob.organization_id, - author=bob.device_id, - realm_id=bob_realm, - encryption_revision=1, - vlob_id=VLOB_ID, - timestamp=NOW, - blob=b"v1", - ) - await backend.vlob.update( - organization_id=bob.organization_id, - author=bob.device_id, - encryption_revision=1, - vlob_id=VLOB_ID, - version=2, - timestamp=NOW, - blob=b"v2", - ) - - # Wait for events to be processed by the backend - await spy.wait_multiple_with_timeout( - [BackendEventRealmVlobsUpdated, BackendEventRealmVlobsUpdated] - ) - - # TODO: how to test there is no more events waiting to be received ? - rep = await apiv2v3_events_listen_nowait(alice_ws) - assert isinstance(rep, ApiV2V3_EventsListenRepNoEvents) - - -@pytest.mark.trio -@pytest.mark.parametrize("realm_created_by_self", (True, False)) -async def test_vlobs_updated_event_realm_created_after_subscribe( - backend, alice_ws, alice, alice2, realm_created_by_self -): - realm_id = VlobID.from_hex("0000000000000000000000000000000A") - async with events_listen(alice_ws) as alice_events_listener: - # New realm, should get events anyway - with backend.event_bus.listen() as spy: - realm_creator = alice if realm_created_by_self else alice2 - # Create the realm - await backend.realm.create( - organization_id=realm_creator.organization_id, - self_granted_role=RealmGrantedRole( - realm_id=realm_id, - user_id=realm_creator.user_id, - certificate=b"", - role=RealmRole.OWNER, - granted_by=realm_creator.device_id, - granted_on=DateTime(2000, 1, 2), - ), - ) - # Create vlob in realm - await backend.vlob.create( - organization_id=realm_creator.organization_id, - author=realm_creator.device_id, - realm_id=realm_id, - encryption_revision=1, - vlob_id=VLOB_ID, - timestamp=NOW, - blob=b"v1", - ) - # Update vlob in realm - await backend.vlob.update( - organization_id=alice2.organization_id, - author=alice2.device_id, - encryption_revision=1, - vlob_id=VLOB_ID, - version=2, - timestamp=NOW, - blob=b"v2", - ) - - # Wait for events to be processed by the backend - await spy.wait_multiple_with_timeout( - [ - BackendEventRealmRolesUpdated, - BackendEventRealmVlobsUpdated, - BackendEventRealmVlobsUpdated, - ] - ) - - # Realm access granted - rep = (await alice_events_listener.do_recv(),) - assert rep == authenticated_cmds.latest.events_listen.RepOk( - authenticated_cmds.latest.events_listen.APIEventRealmRolesUpdated( - realm_id, RealmRole.OWNER - ) - ) - - # Create vlob in realm event - if not realm_created_by_self: - rep = (await alice_events_listener.do_recv(),) - assert rep == authenticated_cmds.latest.events_listen.RepOk( - authenticated_cmds.latest.events_listen.APIEventRealmVlobsUpdated( - realm_id, 1, VLOB_ID, 1 - ) - ) - - # Update vlob in realm event - rep = (await alice_events_listener.do_recv(),) - assert rep == authenticated_cmds.latest.events_listen.RepOk( - authenticated_cmds.latest.events_listen.APIEventRealmVlobsUpdated( - realm_id, 2, VLOB_ID, 2 - ) - ) - - # TODO: how to test there is no more events waiting to be received ? - rep = await apiv2v3_events_listen_nowait(alice_ws) - assert isinstance(rep, ApiV2V3_EventsListenRepNoEvents) diff --git a/server/tests/backend/sequester/test_bootstrap_organization.py b/server/tests/backend/sequester/test_bootstrap_organization.py deleted file mode 100644 index 4a83529bd6e..00000000000 --- a/server/tests/backend/sequester/test_bootstrap_organization.py +++ /dev/null @@ -1,109 +0,0 @@ -# Parsec Cloud (https://parsec.cloud) Copyright (c) BUSL-1.1 2016-present Scille SAS -from __future__ import annotations - -import pytest - -from parsec._parsec import DateTime -from parsec.api.protocol import ( - OrganizationBootstrapRepBadTimestamp, - OrganizationBootstrapRepInvalidData, - OrganizationBootstrapRepOk, -) -from tests.backend.common import organization_bootstrap -from tests.common import ( - LocalDevice, - OrganizationFullData, - customize_fixtures, - local_device_to_backend_user, - sequester_authority_factory, -) - - -# Sequester service modification is exposed as server API, so we only test the internals -@pytest.mark.trio -@customize_fixtures(backend_not_populated=True) -@customize_fixtures(coolorg_is_sequestered_organization=True) -async def test_sequestered_organization_bootstrap( - coolorg: OrganizationFullData, - other_org: OrganizationFullData, - alice: LocalDevice, - anonymous_backend_ws, - backend, -): - # Create organization - org_token = "123456" - await backend.organization.create(id=coolorg.organization_id, bootstrap_token=org_token) - - backend_alice, backend_alice_first_device = local_device_to_backend_user( - alice, coolorg, timestamp=coolorg.sequester_authority.certif_data.timestamp - ) - - organization_bootstrap_args = { - "bootstrap_token": org_token, - "root_verify_key": coolorg.root_verify_key, - "user_certificate": backend_alice.user_certificate, - "device_certificate": backend_alice_first_device.device_certificate, - "redacted_user_certificate": backend_alice.redacted_user_certificate, - "redacted_device_certificate": backend_alice_first_device.redacted_device_certificate, - "sequester_authority_certificate": coolorg.sequester_authority.certif, - } - - # Bad authority certificate - rep = await organization_bootstrap( - anonymous_backend_ws, - check_rep=False, - **{**organization_bootstrap_args, "sequester_authority_certificate": b"dummy"}, - ) - assert isinstance(rep, OrganizationBootstrapRepInvalidData) - - # Authority certificate not signed by the root key - bad_sequester_authority_certificate = coolorg.sequester_authority.certif_data.dump_and_sign( - other_org.root_signing_key - ) - rep = await organization_bootstrap( - anonymous_backend_ws, - check_rep=False, - **{ - **organization_bootstrap_args, - "sequester_authority_certificate": bad_sequester_authority_certificate, - }, - ) - assert isinstance(rep, OrganizationBootstrapRepInvalidData) - - # Timestamp out of ballpark in authority certificate - timestamp_out_of_ballpark = DateTime(2000, 1, 1) - authority_certif_bad_timestamp = sequester_authority_factory( - coolorg.root_signing_key, timestamp=timestamp_out_of_ballpark - ).certif - rep = await organization_bootstrap( - anonymous_backend_ws, - check_rep=False, - **{ - **organization_bootstrap_args, - "sequester_authority_certificate": authority_certif_bad_timestamp, - }, - ) - assert isinstance(rep, OrganizationBootstrapRepBadTimestamp) - assert rep.ballpark_client_early_offset == 300.0 - assert rep.ballpark_client_late_offset == 320.0 - - # Timestamp in authority certificate different than user/device certificates - different_timestamp = DateTime.now() - authority_certif_different_timestamp = sequester_authority_factory( - coolorg.root_signing_key, timestamp=different_timestamp - ).certif - rep = await organization_bootstrap( - anonymous_backend_ws, - check_rep=False, - **{ - **organization_bootstrap_args, - "sequester_authority_certificate": authority_certif_different_timestamp, - }, - ) - assert isinstance(rep, OrganizationBootstrapRepInvalidData) - - # Finally valid bootstrap - rep = await organization_bootstrap( - anonymous_backend_ws, check_rep=False, **organization_bootstrap_args - ) - assert isinstance(rep, OrganizationBootstrapRepOk) diff --git a/server/tests/backend/sequester/test_export.py b/server/tests/backend/sequester/test_export.py deleted file mode 100644 index 936a767e81b..00000000000 --- a/server/tests/backend/sequester/test_export.py +++ /dev/null @@ -1,838 +0,0 @@ -# Parsec Cloud (https://parsec.cloud) Copyright (c) BUSL-1.1 2016-present Scille SAS -from __future__ import annotations - -import sqlite3 - -import pytest - -from parsec._parsec import DateTime, HashDigest, SecretKey, SequesterPrivateKeyDer, VlobID -from parsec.api.data import ( - BlockAccess, - DeviceCertificate, - EntryName, - FileManifest, - FolderManifest, - RealmRoleCertificate, - RevokedUserCertificate, - UserCertificate, - WorkspaceManifest, -) -from parsec.api.protocol import ( - BlockID, - OrganizationID, - RealmRole, - SequesterServiceID, - UserProfile, -) -from parsec.backend.postgresql.sequester_export import ( - OUTPUT_DB_INIT_QUERY, - RealmExporter, - RealmExporterInputError, - RealmExporterOutputDbError, -) -from parsec.backend.realm import RealmGrantedRole -from parsec.sequester_export_reader import extract_workspace -from tests.common import OrganizationFullData, customize_fixtures, sequester_service_factory - - -@customize_fixtures(coolorg_is_sequestered_organization=True, adam_is_revoked=True) -@pytest.mark.postgresql -@pytest.mark.trio -async def test_sequester_export_full_run( - tmp_path, coolorg: OrganizationFullData, backend, alice, alice2, bob, adam, other_org -): - curr_now = DateTime(2000, 1, 1) - - def _next_day() -> DateTime: - nonlocal curr_now - curr_now = curr_now.add(days=1) - return curr_now - - def _sqlite_timestamp(year: int, month: int, day: int) -> int: - return int(DateTime(year, month, day).timestamp() * 1000000) - - output_db_path = tmp_path / "export.sqlite" - - # Create the sequester service - s1 = sequester_service_factory( - authority=coolorg.sequester_authority, - label="Sequester service 1", - timestamp=curr_now, # 2000/1/1 - ) - await backend.sequester.create_service( - organization_id=coolorg.organization_id, service=s1.backend_service - ) - - # Populate: Realm - realm1 = VlobID.new() - await backend.realm.create( - organization_id=coolorg.organization_id, - self_granted_role=RealmGrantedRole( - certificate=b"role_cert1", - realm_id=realm1, - user_id=alice.user_id, - role=RealmRole.OWNER, - granted_by=alice.device_id, - granted_on=_next_day(), # 2000/1/2 - ), - ) - - await backend.realm.update_roles( - organization_id=coolorg.organization_id, - new_role=RealmGrantedRole( - certificate=b"role_cert2", - realm_id=realm1, - user_id=bob.user_id, - role=RealmRole.MANAGER, - granted_by=alice.device_id, - granted_on=_next_day(), # 2000/1/3 - ), - ) - - # Populate: Vlobs - vlob1 = VlobID.new() - vlob2 = VlobID.new() - await backend.vlob.create( - organization_id=coolorg.organization_id, - author=alice.device_id, - realm_id=realm1, - encryption_revision=1, - vlob_id=vlob1, - timestamp=_next_day(), # 2000/1/4 - blob=b"vlob1v1", - # Note sequester blob can have a different size than the regular blob ! - sequester_blob={s1.service_id: b"s1:vlob1v1"}, - ) - await backend.vlob.update( - organization_id=coolorg.organization_id, - author=alice.device_id, - encryption_revision=1, - vlob_id=vlob1, - version=2, - timestamp=_next_day(), # 2000/1/5 - blob=b"vlob1v2", - sequester_blob={s1.service_id: b"s1:vlob1v2"}, - ) - await backend.vlob.create( - organization_id=coolorg.organization_id, - author=alice.device_id, - realm_id=realm1, - encryption_revision=1, - vlob_id=vlob2, - timestamp=_next_day(), # 2000/1/6 - blob=b"vlob2v1", - sequester_blob={s1.service_id: b"s1:vlob2v1"}, - ) - - # Populate: blocks - block1 = BlockID.new() - block2 = BlockID.new() - await backend.block.create( - organization_id=coolorg.organization_id, - author=alice.device_id, - block_id=block1, - realm_id=realm1, - created_on=_next_day(), # 2000/1/7 - block=b"block1", - ) - await backend.block.create( - organization_id=coolorg.organization_id, - author=alice.device_id, - block_id=block2, - realm_id=realm1, - created_on=_next_day(), # 2000/1/8 - block=b"block2", - ) - - # Now we can do the actual export ! - async with RealmExporter.run( - organization_id=coolorg.organization_id, - realm_id=realm1, - service_id=s1.service_id, - output_db_path=output_db_path, - input_dbh=backend.sequester.dbh, - input_blockstore=backend.blockstore, - ) as exporter: - # Export vlobs - to_export_count, vlob_batch_offset_marker0 = await exporter.compute_vlobs_export_status() - assert to_export_count == 3 - assert vlob_batch_offset_marker0 == 0 - - vlob_batch_offset_marker1 = await exporter.export_vlobs(batch_size=1) - assert vlob_batch_offset_marker1 == 1 - vlob_batch_offset_marker2 = await exporter.export_vlobs( - batch_offset_marker=vlob_batch_offset_marker1 - ) - assert vlob_batch_offset_marker2 == 3 - - # Export blocks - to_export_count, block_batch_offset_marker0 = await exporter.compute_blocks_export_status() - assert to_export_count == 2 - assert block_batch_offset_marker0 == 0 - block_batch_offset_marker1 = await exporter.export_blocks(batch_size=1) - assert block_batch_offset_marker1 == 1 - block_batch_offset_marker2 = await exporter.export_blocks( - batch_offset_marker=block_batch_offset_marker1 - ) - assert block_batch_offset_marker2 == 2 - - # Export done, check idempotency for vlobs... - vlob_batch_offset_marker3 = await exporter.export_vlobs( - batch_offset_marker=vlob_batch_offset_marker1 - ) - assert vlob_batch_offset_marker3 == 3 - # ...and blocks - block_batch_offset_marker3 = await exporter.export_blocks( - batch_offset_marker=block_batch_offset_marker1 - ) - assert block_batch_offset_marker3 == 2 - - # Check exported database - - con = sqlite3.connect(f"file:{output_db_path}?mode=ro", uri=True) - - # 1) info table - row = con.execute("SELECT magic, version, realm_id from info").fetchone() - assert row == (87947, 1, realm1.bytes) - - # 2) realm_role table - rows = con.execute("SELECT _id, role_certificate from realm_role").fetchall() - # SQLite does dynamic typing, so better be careful - for row in rows: - assert isinstance(row[0], int) # _id - assert isinstance(row[1], bytes) # role_certificate - assert len(rows) == 2 # Contains alice's OWNER role and bob MANAGER roles on realm1 - assert {row[1] for row in rows} == {b"role_cert1", b"role_cert2"} - assert len({row[0] for row in rows}) == 2 # Make sure all ids are unique - - # 3) user table - rows = con.execute( - "SELECT _id, user_certificate, revoked_user_certificate FROM user_" - ).fetchall() - # SQLite does dynamic typing, so better be careful - for row in rows: - assert isinstance(row[0], int) # _id - assert isinstance(row[1], bytes) # user_certificate - assert row[2] is None or isinstance(row[2], bytes) # revoked_user_certificate - assert len(rows) == 3 # Contains alice, bob and adam - user_ids = set() - for device in (alice, bob, adam): - b_user = await backend.user.get_user( - organization_id=device.organization_id, user_id=device.user_id - ) - row = next(row for row in rows if row[1] == b_user.user_certificate) - user_ids.add(row[0]) - assert row[2] == b_user.revoked_user_certificate - assert len(user_ids) == 3 # Make sure all ids are unique - - # 4) device table - rows = con.execute("SELECT _id, device_certificate FROM device").fetchall() - # SQLite does dynamic typing, so better be careful - for row in rows: - assert isinstance(row[0], int) # _id - assert isinstance(row[1], bytes) # device_certificate - assert len(rows) == 4 # Contains alice@dev1, alice@dev2, bob@dev1 and adam@dev1 - device_ids = set() - alice_internal_id = None - for device in (alice, alice2, bob, adam): - _, b_device = await backend.user.get_user_with_device( - organization_id=device.organization_id, device_id=device.device_id - ) - row = next(row for row in rows if row[1] == b_device.device_certificate) - device_ids.add(row[0]) - if device is alice: - alice_internal_id = row[0] - assert len(device_ids) == 4 # Make sure all ids are unique - assert isinstance(alice_internal_id, int) # Sanity check - - # 5) block table - rows = con.execute("SELECT _id, block_id, data, author from block").fetchall() - assert rows == [ - (1, block1.bytes, b"block1", alice_internal_id), - (2, block2.bytes, b"block2", alice_internal_id), - ] - - # 5) vlob table - rows = con.execute( - "SELECT _id, vlob_id, version, blob, author, timestamp from vlob_atom" - ).fetchall() - assert rows == [ - (1, vlob1.bytes, 1, b"s1:vlob1v1", alice_internal_id, _sqlite_timestamp(2000, 1, 4)), - (2, vlob1.bytes, 2, b"s1:vlob1v2", alice_internal_id, _sqlite_timestamp(2000, 1, 5)), - (3, vlob2.bytes, 1, b"s1:vlob2v1", alice_internal_id, _sqlite_timestamp(2000, 1, 6)), - ] - - # Also check for idempotency with a different realm exporter - async with RealmExporter.run( - organization_id=coolorg.organization_id, - realm_id=realm1, - service_id=s1.service_id, - output_db_path=output_db_path, - input_dbh=backend.sequester.dbh, - input_blockstore=backend.blockstore, - ) as exporter: - await exporter.export_vlobs() - await exporter.export_blocks() - - # Exporting a different realm on the same database export should fail - realm2 = VlobID.new() - await backend.realm.create( - organization_id=coolorg.organization_id, - self_granted_role=RealmGrantedRole( - certificate=b"cert2", - realm_id=realm2, - user_id=alice.user_id, - role=RealmRole.OWNER, - granted_by=alice.device_id, - granted_on=DateTime.now(), - ), - ) - with pytest.raises(RealmExporterOutputDbError): - async with RealmExporter.run( - organization_id=coolorg.organization_id, - realm_id=realm2, - service_id=s1.service_id, - output_db_path=output_db_path, - input_dbh=backend.sequester.dbh, - input_blockstore=backend.blockstore, - ) as exporter: - pass - - # The export script can detect missing items and update the export - await backend.realm.update_roles( - organization_id=coolorg.organization_id, - new_role=RealmGrantedRole( - certificate=b"role_cert3", - realm_id=realm1, - user_id=bob.user_id, - role=None, - granted_by=alice.device_id, - granted_on=DateTime.now(), - ), - ) - await backend.vlob.update( - organization_id=coolorg.organization_id, - author=alice.device_id, - encryption_revision=1, - vlob_id=vlob1, - version=3, - timestamp=DateTime.now(), - blob=b"vlob1v3", - sequester_blob={s1.service_id: b"s1:vlob1v3"}, - ) - vlob3 = VlobID.new() - await backend.vlob.create( - organization_id=coolorg.organization_id, - author=alice.device_id, - realm_id=realm1, - encryption_revision=1, - vlob_id=vlob3, - timestamp=DateTime.now(), - blob=b"vlob3v1", - sequester_blob={s1.service_id: b"s1:vlob3v1"}, - ) - block3 = BlockID.new() - await backend.block.create( - organization_id=coolorg.organization_id, - author=alice.device_id, - block_id=block3, - realm_id=realm1, - block=b"block3", - ) - async with RealmExporter.run( - organization_id=coolorg.organization_id, - realm_id=realm1, - service_id=s1.service_id, - output_db_path=output_db_path, - input_dbh=backend.sequester.dbh, - input_blockstore=backend.blockstore, - ) as exporter: - # Export vlobs - to_export_count, vlob_batch_offset_marker0 = await exporter.compute_vlobs_export_status() - assert to_export_count == 5 - assert vlob_batch_offset_marker0 == 3 - vlob_batch_offset_marker1 = await exporter.export_vlobs( - batch_offset_marker=vlob_batch_offset_marker0 - ) - assert vlob_batch_offset_marker1 == 5 - - # Export blocks - to_export_count, block_batch_offset_marker0 = await exporter.compute_blocks_export_status() - assert to_export_count == 3 - assert block_batch_offset_marker0 == 2 - block_batch_offset_marker1 = await exporter.export_blocks( - batch_offset_marker=block_batch_offset_marker1 - ) - assert block_batch_offset_marker1 == 3 - - # Again, check exported database - con = sqlite3.connect(f"file:{output_db_path}?mode=ro", uri=True) - row = con.execute("SELECT magic, version, realm_id from info").fetchone() - assert row == (87947, 1, realm1.bytes) - rows = con.execute("SELECT block_id, data from block").fetchall() - assert rows == [(block1.bytes, b"block1"), (block2.bytes, b"block2"), (block3.bytes, b"block3")] - rows = con.execute("SELECT _id, vlob_id, version, blob from vlob_atom").fetchall() - assert rows == [ - (1, vlob1.bytes, 1, b"s1:vlob1v1"), - (2, vlob1.bytes, 2, b"s1:vlob1v2"), - (3, vlob2.bytes, 1, b"s1:vlob2v1"), - (4, vlob1.bytes, 3, b"s1:vlob1v3"), - (5, vlob3.bytes, 1, b"s1:vlob3v1"), - ] - row = con.execute("SELECT count(*) from realm_role").fetchone() - assert row[0] == 3 # Contains alice's OWNER role and bob MANAGER&None roles on realm1 - row = con.execute("SELECT count(*) from user_").fetchone() - assert row[0] == 3 # Contains alice, bob and adam - row = con.execute("SELECT count(*) from device").fetchone() - assert row[0] == 4 # Contains alice@dev1, alice@dev2, bob@dev1 and adam@dev1 - - # Bonus points: check errors handling on invalid input - - default_args = { - "organization_id": coolorg.organization_id, - "realm_id": realm1, - "service_id": s1.service_id, - "output_db_path": output_db_path, - "input_dbh": backend.sequester.dbh, - "input_blockstore": backend.blockstore, - } - # Unknown organization - dummy_org = OrganizationID("Dummy") - with pytest.raises(RealmExporterInputError): - async with RealmExporter.run(**{**default_args, "organization_id": dummy_org}): - pass - # Not bootstrapped organization - await backend.organization.create(id=dummy_org, bootstrap_token="") - with pytest.raises(RealmExporterInputError): - async with RealmExporter.run(**{**default_args, "organization_id": dummy_org}): - pass - # Unknown realm - with pytest.raises(RealmExporterInputError): - async with RealmExporter.run(**{**default_args, "realm_id": VlobID.new()}): - pass - # Unknown sequester service - with pytest.raises(RealmExporterInputError): - async with RealmExporter.run(**{**default_args, "service_id": SequesterServiceID.new()}): - pass - # Non sequestered organization - with pytest.raises(RealmExporterInputError): - async with RealmExporter.run( - **{**default_args, "organization_id": other_org.organization_id} - ): - pass - - -@pytest.mark.trio -async def test_export_reader_full_run(tmp_path, coolorg: OrganizationFullData, alice, bob, adam): - output_db_path = tmp_path / "export.sqlite" - realm1 = VlobID.new() - # Don't use such a small key size in real world, this is only for test ! - # (RSA key generation gets ~10x slower between 1024 and 4096) - service_decryption_key, service_encryption_key = SequesterPrivateKeyDer.generate_pair(1024) - - # Generate the export db by hand here - con = sqlite3.connect(output_db_path) - con.executescript(OUTPUT_DB_INIT_QUERY) - - con.execute( - "INSERT INTO info (realm_id, root_verify_key) VALUES (?, ?)", - (realm1.bytes, coolorg.root_verify_key.encode()), - ) - - # Timelapse: - # - # 2000-01-01: Orga bootstrapped by Alice - # 2000-01-02: Bob created by Alice - # 2000-01-03: Adam created by Alice - # - # 2000-02-01: Realm created by Alice - # 2000-02-02: Bob got READER Realm access from Alice - # 2000-02-03: Bob got MANAGER Realm access from Alice - # 2000-02-04: Adam got CONTRIBUTOR Realm access from Bob - # - # 2000-03-01: Alice upload workspace manifest v1 (no children) - # 2000-03-10: Bob upload /file1's manifest v1 (empty file) - # 2000-03-11: Bob upload /folder1's manifest v1 (empty file) - # 2000-03-12: Bob upload /folder2's manifest v1 (empty file) - # 2000-03-13: Bob upload workspace manifest v2 (containing file1 and folder1&2) - # 2000-03-20: Adam upload /folder2/file2's manifest v1 (empty file) - # 2000-03-21: Adam upload /folder2/folder3's manifest v1 (no children) - # 2000-03-22: Adam upload /folder2's manifest v2 (containing file2 and folder3) - # 2000-03-23: Adam upload file2block1 - # 2000-03-24: Adam upload file2block2 - # 2000-03-25: Adam upload /folder2/file2's manifest v2 (containing file2block1&2) - # - # 2000-04-01: Adam removed from Realm access by Alice - # 2000-05-01: Adam revoked by Alice - - # Populate `user_` table - alice_user_certif = UserCertificate( - user_id=alice.user_id, - profile=UserProfile.ADMIN, - human_handle=alice.human_handle, - public_key=alice.public_key, - author=None, - timestamp=DateTime(2000, 1, 1), - ).dump_and_sign(coolorg.root_signing_key) - bob_user_certif = UserCertificate( - user_id=bob.user_id, - profile=UserProfile.STANDARD, - human_handle=bob.human_handle, - public_key=bob.public_key, - author=alice.device_id, - timestamp=DateTime(2000, 1, 2), - ).dump_and_sign(alice.signing_key) - adam_user_certif = UserCertificate( - user_id=adam.user_id, - profile=UserProfile.STANDARD, - human_handle=adam.human_handle, - public_key=adam.public_key, - author=alice.device_id, - timestamp=DateTime(2000, 1, 3), - ).dump_and_sign(alice.signing_key) - adam_revoked_user_certif = RevokedUserCertificate( - user_id=adam.user_id, author=alice.device_id, timestamp=DateTime(2000, 5, 1) - ).dump_and_sign(alice.signing_key) - con.executemany( - "INSERT INTO user_(_id, user_certificate, revoked_user_certificate) VALUES (?, ?, ?)", - [ - (1, alice_user_certif, None), - (2, bob_user_certif, None), - (3, adam_user_certif, adam_revoked_user_certif), - ], - ) - - # Populate `device` table - alice_device_certif = DeviceCertificate( - device_id=alice.device_id, - device_label=alice.device_label, - verify_key=alice.verify_key, - author=None, - timestamp=DateTime(2000, 1, 1), - ).dump_and_sign(coolorg.root_signing_key) - bob_device_certif = DeviceCertificate( - device_id=bob.device_id, - device_label=bob.device_label, - verify_key=bob.verify_key, - author=alice.device_id, - timestamp=DateTime(2000, 1, 2), - ).dump_and_sign(alice.signing_key) - adam_device_certif = DeviceCertificate( - device_id=adam.device_id, - device_label=adam.device_label, - verify_key=adam.verify_key, - author=alice.device_id, - timestamp=DateTime(2000, 1, 3), - ).dump_and_sign(alice.signing_key) - alice_device_internal_id = 1 - bob_device_internal_id = 2 - adam_device_internal_id = 3 - con.executemany( - "INSERT INTO device(_id, device_certificate) VALUES (?, ?)", - [ - (alice_device_internal_id, alice_device_certif), - (bob_device_internal_id, bob_device_certif), - (adam_device_internal_id, adam_device_certif), - ], - ) - - # Populate `realm_role` table - realm_roles = [ - RealmRoleCertificate( - realm_id=realm1, - user_id=alice.user_id, - role=RealmRole.OWNER, - timestamp=DateTime(2000, 2, 1), - author=None, - ).dump_and_sign(coolorg.root_signing_key), - RealmRoleCertificate( - realm_id=realm1, - user_id=bob.user_id, - role=RealmRole.READER, - timestamp=DateTime(2000, 2, 2), - author=alice.device_id, - ).dump_and_sign(alice.signing_key), - RealmRoleCertificate( - realm_id=realm1, - user_id=bob.user_id, - role=RealmRole.MANAGER, - timestamp=DateTime(2000, 2, 3), - author=alice.device_id, - ).dump_and_sign(alice.signing_key), - RealmRoleCertificate( - realm_id=realm1, - user_id=adam.user_id, - role=RealmRole.CONTRIBUTOR, - timestamp=DateTime(2000, 2, 4), - author=bob.device_id, - ).dump_and_sign(bob.signing_key), - RealmRoleCertificate( - realm_id=realm1, - user_id=adam.user_id, - role=None, - timestamp=DateTime(2000, 4, 1), - author=alice.device_id, - ).dump_and_sign(alice.signing_key), - ] - con.executemany( - "INSERT INTO realm_role(_id, role_certificate) VALUES (?, ?)", enumerate(realm_roles) - ) - - # Populate `block` table - file2block1 = BlockID.new() - file2block1_key = SecretKey.generate() - file2block1_data = file2block1_key.encrypt(b"a" * 10) - file2block1_digest = HashDigest.from_data(b"a" * 10) - file2block2 = BlockID.new() - file2block2_key = SecretKey.generate() - file2block2_data = file2block2_key.encrypt(b"b" * 10) - file2block2_digest = HashDigest.from_data(b"b" * 10) - - blocks = [ - (1, file2block1.bytes, file2block1_data, alice_device_internal_id), - (2, file2block2.bytes, file2block2_data, alice_device_internal_id), - ] - con.executemany("INSERT INTO block(_id, block_id, data, author) VALUES (?, ?, ?, ?)", blocks) - - # Populate `vlob` table - workspace_id = realm1 - file1 = VlobID.new() - file2 = VlobID.new() - folder1 = VlobID.new() - folder2 = VlobID.new() - folder3 = VlobID.new() - workspace_manifest_v1 = WorkspaceManifest( - author=alice.device_id, - timestamp=DateTime(2000, 3, 1), - version=1, - id=workspace_id, - created=DateTime(2000, 3, 1), - updated=DateTime(2000, 3, 1), - children={}, - ).dump_and_sign(author_signkey=alice.signing_key) - file1_manifest_v1 = FileManifest( - author=bob.device_id, - timestamp=DateTime(2000, 3, 10), - version=1, - id=file1, - parent=workspace_id, - created=DateTime(2000, 3, 10), - updated=DateTime(2000, 3, 10), - size=0, - blocksize=10, - blocks=[], - ).dump_and_sign(author_signkey=bob.signing_key) - folder1_manifest_v1 = FolderManifest( - author=bob.device_id, - timestamp=DateTime(2000, 3, 11), - version=1, - id=folder1, - parent=workspace_id, - created=DateTime(2000, 3, 11), - updated=DateTime(2000, 3, 11), - children={}, - ).dump_and_sign(author_signkey=bob.signing_key) - folder2_manifest_v1 = FolderManifest( - author=bob.device_id, - timestamp=DateTime(2000, 3, 12), - version=1, - id=folder2, - parent=workspace_id, - created=DateTime(2000, 3, 12), - updated=DateTime(2000, 3, 12), - children={}, - ).dump_and_sign(author_signkey=bob.signing_key) - workspace_manifest_v2 = WorkspaceManifest( - author=bob.device_id, - timestamp=DateTime(2000, 3, 13), - version=2, - id=workspace_id, - created=DateTime(2000, 3, 13), - updated=DateTime(2000, 3, 13), - children={ - EntryName("file1"): file1, - EntryName("folder1"): folder1, - EntryName("folder2"): folder2, - }, - ).dump_and_sign(author_signkey=bob.signing_key) - file2_manifest_v1 = FileManifest( - author=adam.device_id, - timestamp=DateTime(2000, 3, 20), - version=1, - id=file2, - parent=folder2, - created=DateTime(2000, 3, 20), - updated=DateTime(2000, 3, 20), - size=0, - blocksize=10, - blocks=[], - ).dump_and_sign(author_signkey=adam.signing_key) - folder3_manifest_v1 = FolderManifest( - author=adam.device_id, - timestamp=DateTime(2000, 3, 21), - version=1, - id=folder3, - parent=workspace_id, - created=DateTime(2000, 3, 21), - updated=DateTime(2000, 3, 21), - children={}, - ).dump_and_sign(author_signkey=adam.signing_key) - folder2_manifest_v2 = FolderManifest( - author=adam.device_id, - timestamp=DateTime(2000, 3, 22), - version=2, - id=folder2, - parent=workspace_id, - created=DateTime(2000, 3, 22), - updated=DateTime(2000, 3, 22), - children={EntryName("file2"): file2, EntryName("folder3"): folder3}, - ).dump_and_sign(author_signkey=adam.signing_key) - file2_manifest_v2 = FileManifest( - author=adam.device_id, - timestamp=DateTime(2000, 3, 25), - version=2, - id=file2, - parent=folder2, - created=DateTime(2000, 3, 25), - updated=DateTime(2000, 3, 25), - size=20, - blocksize=10, - blocks=[ - BlockAccess( - id=file2block1, key=file2block1_key, offset=0, size=10, digest=file2block1_digest - ), - BlockAccess( - id=file2block2, key=file2block2_key, offset=10, size=10, digest=file2block2_digest - ), - ], - ).dump_and_sign(author_signkey=adam.signing_key) - - def _sqlite_ts(year, month, day): - return int(DateTime(year, month, day).timestamp() * 1000000) - - vlob_atoms = [ - # / v1 - ( - 1, - workspace_id.bytes, - 1, - service_encryption_key.encrypt(workspace_manifest_v1), - alice_device_internal_id, - _sqlite_ts(2000, 3, 1), - ), - # /file1 v1 - ( - 2, - file1.bytes, - 1, - service_encryption_key.encrypt(file1_manifest_v1), - bob_device_internal_id, - _sqlite_ts(2000, 3, 10), - ), - # /folder1 v1 - ( - 3, - folder1.bytes, - 1, - service_encryption_key.encrypt(folder1_manifest_v1), - bob_device_internal_id, - _sqlite_ts(2000, 3, 11), - ), - # /folder2 v1 - ( - 4, - folder2.bytes, - 1, - service_encryption_key.encrypt(folder2_manifest_v1), - bob_device_internal_id, - _sqlite_ts(2000, 3, 12), - ), - # / v2 - ( - 5, - workspace_id.bytes, - 2, - service_encryption_key.encrypt(workspace_manifest_v2), - bob_device_internal_id, - _sqlite_ts(2000, 3, 13), - ), - # /folder2/file2 v1 - ( - 6, - file2.bytes, - 1, - service_encryption_key.encrypt(file2_manifest_v1), - adam_device_internal_id, - _sqlite_ts(2000, 3, 20), - ), - # /folder2/folder3 v1 - ( - 7, - folder3.bytes, - 1, - service_encryption_key.encrypt(folder3_manifest_v1), - adam_device_internal_id, - _sqlite_ts(2000, 3, 21), - ), - # /folder2 v2 - ( - 8, - folder2.bytes, - 2, - service_encryption_key.encrypt(folder2_manifest_v2), - adam_device_internal_id, - _sqlite_ts(2000, 3, 22), - ), - # /folder2/file2 v2 - ( - 9, - file2.bytes, - 2, - service_encryption_key.encrypt(file2_manifest_v2), - adam_device_internal_id, - _sqlite_ts(2000, 3, 25), - ), - ] - con.executemany( - "INSERT INTO vlob_atom(_id, vlob_id, version, blob, author, timestamp) VALUES (?, ?, ?, ?, ?, ?)", - vlob_atoms, - ) - - con.commit() - con.close() - - # Finally do the actual export \o/ - - dump_path = tmp_path / "extract_dump" - list( - extract_workspace( - output=dump_path, - export_db=output_db_path, - decryption_key=service_decryption_key, - filter_on_date=DateTime.now(), - ) - ) - - # Check the result - assert {x.name for x in dump_path.iterdir()} == {"file1", "folder1", "folder2"} - assert (dump_path / "file1").read_bytes() == b"" - assert {x.name for x in (dump_path / "folder1").iterdir()} == set() - assert {x.name for x in (dump_path / "folder2").iterdir()} == {"file2", "folder3"} - assert (dump_path / "folder2/file2").read_bytes() == b"a" * 10 + b"b" * 10 - assert {x.name for x in (dump_path / "folder2/folder3").iterdir()} == set() - - # Extract dump at 2000-03-14, where folder2 was empty - dump_path_ts = tmp_path / "extract_dump_ts" - list( - extract_workspace( - output=dump_path_ts, - export_db=output_db_path, - decryption_key=service_decryption_key, - filter_on_date=DateTime(2000, 3, 14), - ) - ) - # Check the result - assert {x.name for x in dump_path_ts.iterdir()} == {"file1", "folder1", "folder2"} - assert (dump_path_ts / "file1").read_bytes() == b"" - assert {x.name for x in (dump_path_ts / "folder1").iterdir()} == set() - assert {x.name for x in (dump_path_ts / "folder2").iterdir()} == set() diff --git a/server/tests/backend/sequester/test_service.py b/server/tests/backend/sequester/test_service.py deleted file mode 100644 index 4c46a436263..00000000000 --- a/server/tests/backend/sequester/test_service.py +++ /dev/null @@ -1,233 +0,0 @@ -# Parsec Cloud (https://parsec.cloud) Copyright (c) BUSL-1.1 2016-present Scille SAS -from __future__ import annotations - -import pytest - -from parsec._parsec import DateTime -from parsec.api.protocol import OrganizationID, SequesterServiceID -from parsec.backend.sequester import ( - SequesterCertificateValidationError, - SequesterDisabledError, - SequesterOrganizationNotFoundError, - SequesterServiceAlreadyDisabledError, - SequesterServiceAlreadyExists, - SequesterServiceNotFoundError, - SequesterServiceType, -) -from tests.common import ( - OrganizationFullData, - customize_fixtures, - sequester_authority_factory, - sequester_service_factory, -) - -# Sequester service modification is not exposed as a command API, so we only -# test the internal component API - - -@pytest.mark.trio -@customize_fixtures(coolorg_is_sequestered_organization=True) -async def test_create_disable_services( - coolorg: OrganizationFullData, other_org: OrganizationFullData, backend -): - service = sequester_service_factory("Test Service", coolorg.sequester_authority) - - # 1) Service creation - - # Unknown organization ID - with pytest.raises(SequesterOrganizationNotFoundError): - await backend.sequester.create_service( - organization_id=OrganizationID("DummyOrg"), service=service.backend_service - ) - - # Try to create service in a non sequestered organization - with pytest.raises(SequesterDisabledError): - await backend.sequester.create_service( - organization_id=other_org.organization_id, service=service.backend_service - ) - - # Invalid service certificate - with pytest.raises(SequesterCertificateValidationError): - await backend.sequester.create_service( - organization_id=coolorg.organization_id, - service=service.backend_service.evolve(service_certificate=b""), - ) - - # Valid service certificate, but with invalid signature - bad_authority = sequester_authority_factory(coolorg.root_signing_key) - service_signed_by_bad_authority = sequester_service_factory("Test Service", bad_authority) - with pytest.raises(SequesterCertificateValidationError): - await backend.sequester.create_service( - organization_id=coolorg.organization_id, - service=service_signed_by_bad_authority.backend_service, - ) - - # Service certificate timestamp out of ballpark are allowed ! - service_very_old_timestamp = sequester_service_factory( - "sequester_service_1", coolorg.sequester_authority, timestamp=DateTime(2000, 1, 1) - ) - await backend.sequester.create_service( - organization_id=coolorg.organization_id, service=service_very_old_timestamp.backend_service - ) - - # Successful service creation - await backend.sequester.create_service( - organization_id=coolorg.organization_id, service=service.backend_service - ) - - # Service ID already exists - with pytest.raises(SequesterServiceAlreadyExists): - await backend.sequester.create_service( - organization_id=coolorg.organization_id, service=service.backend_service - ) - - # Retrieve service list - services = await backend.sequester.get_organization_services( - organization_id=coolorg.organization_id - ) - assert services == [service_very_old_timestamp.backend_service, service.backend_service] - - # Cannot retrieve service list on non sequestered organization - with pytest.raises(SequesterDisabledError): - services = await backend.sequester.get_organization_services( - organization_id=other_org.organization_id - ) - - # 2) Disable service - disabled_on = DateTime.now() - - # Unknown organization ID - with pytest.raises(SequesterOrganizationNotFoundError): - await backend.sequester.disable_service( - organization_id=OrganizationID("DummyOrg"), - service_id=service.service_id, - disabled_on=disabled_on, - ) - - # Unknown sequestre service - with pytest.raises(SequesterServiceNotFoundError): - await backend.sequester.disable_service( - organization_id=coolorg.organization_id, - service_id=SequesterServiceID.new(), - disabled_on=disabled_on, - ) - - # Try disable in a non sequestered organization - with pytest.raises(SequesterDisabledError): - await backend.sequester.disable_service( - organization_id=other_org.organization_id, - service_id=service.service_id, - disabled_on=disabled_on, - ) - - # Successful disable - await backend.sequester.disable_service( - organization_id=coolorg.organization_id, - service_id=service.service_id, - disabled_on=disabled_on, - ) - - # Already disabled - with pytest.raises(SequesterServiceAlreadyDisabledError): - await backend.sequester.disable_service( - organization_id=coolorg.organization_id, - service_id=service.service_id, - disabled_on=disabled_on, - ) - - # Retrieve service list - services = await backend.sequester.get_organization_services( - organization_id=coolorg.organization_id - ) - assert services == [ - service_very_old_timestamp.backend_service, - service.backend_service.evolve(disabled_on=disabled_on), - ] - - # 3) Bonus: Create after disabled - - # Cannot recreate a disabled service - with pytest.raises(SequesterServiceAlreadyExists): - await backend.sequester.create_service( - organization_id=coolorg.organization_id, service=service.backend_service - ) - - # Successful service creation - backend_service2 = service.backend_service.evolve(service_id=SequesterServiceID.new()) - await backend.sequester.create_service( - organization_id=coolorg.organization_id, service=backend_service2 - ) - - # 4) Bonus: list services - - # Retrieve service list - services = await backend.sequester.get_organization_services( - organization_id=coolorg.organization_id - ) - expected_backend_service1 = service.backend_service.evolve(disabled_on=disabled_on) - assert services == [ - service_very_old_timestamp.backend_service, - expected_backend_service1, - backend_service2, - ] - - # Retrieve single service - retrieved_backend_service1 = await backend.sequester.get_service( - organization_id=coolorg.organization_id, service_id=service.service_id - ) - assert retrieved_backend_service1 == expected_backend_service1 - - # Retrieve single service unknown organization ID - with pytest.raises(SequesterOrganizationNotFoundError): - await backend.sequester.get_service( - organization_id=OrganizationID("DummyOrg"), service_id=service.service_id - ) - - # Retrieve service list unknown organization ID - with pytest.raises(SequesterOrganizationNotFoundError): - await backend.sequester.get_organization_services( - organization_id=OrganizationID("DummyOrg") - ) - - # Unknown sequestre service - with pytest.raises(SequesterServiceNotFoundError): - await backend.sequester.get_service( - organization_id=coolorg.organization_id, service_id=SequesterServiceID.new() - ) - - # Try retrieve service list in a non sequestered organization - with pytest.raises(SequesterDisabledError): - await backend.sequester.get_organization_services(organization_id=other_org.organization_id) - - # Try retrieve single service in a non sequestered organization - with pytest.raises(SequesterDisabledError): - await backend.sequester.get_service( - organization_id=other_org.organization_id, service_id=service.service_id - ) - - # 5) Bonus: webhook service - webhook_service = sequester_service_factory( - "TestWebhookService", - coolorg.sequester_authority, - service_type=SequesterServiceType.WEBHOOK, - webhook_url="http://somewhere.post", - ) - await backend.sequester.create_service( - organization_id=coolorg.organization_id, service=webhook_service.backend_service - ) - services = await backend.sequester.get_organization_services( - organization_id=coolorg.organization_id - ) - assert services == [ - service_very_old_timestamp.backend_service, - expected_backend_service1, - backend_service2, - webhook_service.backend_service, - ] - - # 6) Bonus: service creation in a expired organization is allowed - await backend.organization.update(id=coolorg.organization_id, is_expired=True) - backend_service3 = service.backend_service.evolve(service_id=SequesterServiceID.new()) - await backend.sequester.create_service( - organization_id=coolorg.organization_id, service=backend_service3 - ) diff --git a/server/tests/backend/sequester/test_vlob.py b/server/tests/backend/sequester/test_vlob.py deleted file mode 100644 index c131c56a28d..00000000000 --- a/server/tests/backend/sequester/test_vlob.py +++ /dev/null @@ -1,511 +0,0 @@ -# Parsec Cloud (https://parsec.cloud) Copyright (c) BUSL-1.1 2016-present Scille SAS -from __future__ import annotations - -import json -import urllib.error -from unittest.mock import Mock, patch -from urllib.parse import parse_qs, urlsplit - -import pytest - -from parsec.api.protocol import ( - OrganizationID, - SequesterServiceID, - VlobCreateRepOk, - VlobCreateRepRejectedBySequesterService, - VlobCreateRepSequesterInconsistency, - VlobCreateRepTimeout, - VlobID, - VlobUpdateRepOk, - VlobUpdateRepRejectedBySequesterService, - VlobUpdateRepSequesterInconsistency, - VlobUpdateRepTimeout, -) -from parsec.backend.sequester import ( - SequesterOrganizationNotFoundError, - SequesterServiceNotFoundError, - SequesterServiceType, -) -from tests.backend.common import vlob_create, vlob_update -from tests.common import OrganizationFullData, customize_fixtures, sequester_service_factory - - -@customize_fixtures(coolorg_is_sequestered_organization=True) -@pytest.mark.trio -async def test_vlob_create_update_and_sequester_access( - coolorg: OrganizationFullData, alice_ws, realm, backend -): - # s1&s2 are valid sequester services, s3 is a disabled sequester service - s1 = sequester_service_factory( - authority=coolorg.sequester_authority, label="Sequester service 1" - ) - await backend.sequester.create_service( - organization_id=coolorg.organization_id, service=s1.backend_service - ) - s2 = sequester_service_factory( - authority=coolorg.sequester_authority, label="Sequester service 2" - ) - await backend.sequester.create_service( - organization_id=coolorg.organization_id, service=s2.backend_service - ) - s3 = sequester_service_factory( - authority=coolorg.sequester_authority, label="Sequester service 3" - ) - await backend.sequester.create_service( - organization_id=coolorg.organization_id, service=s3.backend_service - ) - await backend.sequester.disable_service( - organization_id=coolorg.organization_id, service_id=s3.service_id - ) - - # Webhook specific errors are tested in other test - s4 = sequester_service_factory( - authority=coolorg.sequester_authority, - label="Sequester webhook service", - service_type=SequesterServiceType.WEBHOOK, - webhook_url="http://somewhere.post", - ) - await backend.sequester.create_service( - organization_id=coolorg.organization_id, service=s4.backend_service - ) - - vlob_id = VlobID.from_hex("00000000000000000000000000000001") - dummy_service_id = SequesterServiceID.from_hex("0000000000000000000000000000000A") - blob = b"" - vlob_version = 0 - - async def _test(vlob_cmd, **cmd_kwargs): - nonlocal vlob_version - vlob_version += 1 - b1 = f"".encode() - b2 = f"".encode() - b3 = f"".encode() - b4 = f"".encode() - - # 1) Try without sequester blob - rep = await vlob_cmd(alice_ws, **cmd_kwargs, check_rep=False) - - # vlob_cmd can be create or update so we test field - assert isinstance( - rep, (VlobCreateRepSequesterInconsistency, VlobUpdateRepSequesterInconsistency) - ) - assert rep.sequester_authority_certificate == coolorg.sequester_authority.certif - assert rep.sequester_services_certificates == [s1.certif, s2.certif, s4.certif] - - # 2) Try with sequester blob missing for one service - rep = await vlob_cmd( - alice_ws, **cmd_kwargs, sequester_blob={s1.service_id: b1}, check_rep=False - ) - - assert isinstance( - rep, (VlobCreateRepSequesterInconsistency, VlobUpdateRepSequesterInconsistency) - ) - assert rep.sequester_authority_certificate == coolorg.sequester_authority.certif - assert rep.sequester_services_certificates == [s1.certif, s2.certif, s4.certif] - - # 3) Try with unknown additional sequester blob - rep = await vlob_cmd( - alice_ws, - **cmd_kwargs, - sequester_blob={s1.service_id: b1, s2.service_id: b2, dummy_service_id: b""}, - check_rep=False, - ) - - assert isinstance( - rep, (VlobCreateRepSequesterInconsistency, VlobUpdateRepSequesterInconsistency) - ) - assert rep.sequester_authority_certificate == coolorg.sequester_authority.certif - assert rep.sequester_services_certificates == [s1.certif, s2.certif, s4.certif] - - # 4) Try with blob for a removed sequester service - rep = await vlob_cmd( - alice_ws, - **cmd_kwargs, - sequester_blob={ - s1.service_id: b1, - s2.service_id: b2, - s3.service_id: b3, - s4.service_id: b4, - }, - check_rep=False, - ) - - assert isinstance( - rep, (VlobCreateRepSequesterInconsistency, VlobUpdateRepSequesterInconsistency) - ) - assert rep.sequester_authority_certificate == coolorg.sequester_authority.certif - assert rep.sequester_services_certificates == [s1.certif, s2.certif, s4.certif] - - # 5) Finally the valid operation - rep = await vlob_cmd( - alice_ws, - **cmd_kwargs, - sequester_blob={s1.service_id: b1, s2.service_id: b2, s4.service_id: b4}, - check_rep=False, - ) - assert isinstance(rep, (VlobCreateRepOk, VlobUpdateRepOk)) - - # First test vlob create&update - - with patch("parsec.backend.http_utils.urllib.request") as mock: - await _test(vlob_create, realm_id=realm, vlob_id=vlob_id, blob=blob) - await _test(vlob_update, vlob_id=vlob_id, version=2, blob=blob) - mock.Request.assert_called() - - # Then test vlob access from sequester services - - # 1) Tests service 1 & 2 - - realm_s1_dump = await backend.sequester.dump_realm( - organization_id=coolorg.organization_id, service_id=s1.service_id, realm_id=realm - ) - assert realm_s1_dump == [(vlob_id, 1, b""), (vlob_id, 2, b"")] - realm_s2_dump = await backend.sequester.dump_realm( - organization_id=coolorg.organization_id, service_id=s2.service_id, realm_id=realm - ) - assert realm_s2_dump == [(vlob_id, 1, b""), (vlob_id, 2, b"")] - - # 2) Ensure service 3 is empty - - realm_s3_dump = await backend.sequester.dump_realm( - organization_id=coolorg.organization_id, service_id=s3.service_id, realm_id=realm - ) - assert realm_s3_dump == [] - - # 3) Test various errors in sequester vlob acces - - # Unknown organization - with pytest.raises(SequesterOrganizationNotFoundError): - await backend.sequester.dump_realm( - organization_id=OrganizationID("DummyOrg"), service_id=s1.service_id, realm_id=realm - ) - - # Unknown sequester service - with pytest.raises(SequesterServiceNotFoundError): - await backend.sequester.dump_realm( - organization_id=coolorg.organization_id, service_id=dummy_service_id, realm_id=realm - ) - - -async def _register_service_and_create_vlob( - coolorg, backend, alice_ws, realm, vlob_id, blob, sequester_blob, url -): - # Register webhook service - service = sequester_service_factory( - "TestWebhookService", - coolorg.sequester_authority, - service_type=SequesterServiceType.WEBHOOK, - webhook_url=url, - ) - await backend.sequester.create_service( - organization_id=coolorg.organization_id, service=service.backend_service - ) - - # Create one vlob - rep = await vlob_create( - alice_ws, - realm_id=realm, - vlob_id=vlob_id, - blob=blob, - sequester_blob={service.service_id: sequester_blob}, - check_rep=False, - ) - - assert isinstance(rep, VlobCreateRepOk) - - return service - - -@customize_fixtures(coolorg_is_sequestered_organization=True) -@pytest.mark.trio -async def test_webhook_vlob_create_update( - coolorg: OrganizationFullData, alice, alice_ws, realm, backend -): - vlob_id = VlobID.from_hex("00000000000000000000000000000001") - blob = b"" - sequester_blob = b"" - - url = "http://somewhere.post" - - with patch("parsec.backend.http_utils.urllib.request") as mock: - # Register webhook service - service = await _register_service_and_create_vlob( - coolorg, backend, alice_ws, realm, vlob_id, blob, sequester_blob, url - ) - - # Helper - def _assert_webhook_posted(expected_sequester_data): - mock.Request.assert_called_once() - # Extract args - args, kwargs = mock.Request.call_args - assert args[0].startswith(url) - assert kwargs["method"] == "POST" - # Extract url params - params = parse_qs(urlsplit(args[0]).query) - assert coolorg.organization_id.str == params["organization_id"][0] - assert service.service_id.hex == params["service_id"][0] - # Extract http data - assert expected_sequester_data == kwargs["data"] - # Reset - mock.reset_mock() - - # Vlob has been created, assert that data have been posted - _assert_webhook_posted(sequester_blob) - - sequester_blob = b"" - rep = await vlob_update( - alice_ws, - vlob_id=vlob_id, - version=2, - blob=blob, - sequester_blob={service.service_id: sequester_blob}, - check_rep=False, - ) - - assert isinstance(rep, VlobUpdateRepOk) - _assert_webhook_posted(sequester_blob) - - -@customize_fixtures(coolorg_is_sequestered_organization=True) -@pytest.mark.trio -async def test_webhook_errors(caplog, coolorg: OrganizationFullData, alice_ws, realm, backend): - vlob_id = VlobID.from_hex("00000000000000000000000000000001") - blob = b"" - sequester_blob = b"" - - url = "http://somewhere.post" - - with patch("parsec.backend.http_utils.urllib.request") as mock: - service = await _register_service_and_create_vlob( - coolorg, backend, alice_ws, realm, vlob_id, blob, sequester_blob, url - ) - - new_vlob_id = VlobID.from_hex("00000000000000000000000000000002") - - # Test httpURLError - def raise_urlerror(*args, **kwargs): - raise urllib.error.URLError(reason="CONNECTION REFUSED") - - mock.urlopen.side_effect = raise_urlerror - rep = await vlob_create( - alice_ws, - vlob_id=new_vlob_id, - realm_id=realm, - blob=blob, - sequester_blob={service.service_id: sequester_blob}, - check_rep=False, - ) - assert isinstance(rep, VlobCreateRepTimeout) - caplog.assert_occurred_once( - f"[warning ] Cannot reach webhook server [parsec.backend.vlob] service_id={service.service_id.hex} service_label=TestWebhookService" - ) - caplog.clear() - - rep = await vlob_update( - alice_ws, - vlob_id=vlob_id, - version=2, - blob=blob, - sequester_blob={service.service_id: sequester_blob}, - check_rep=False, - ) - assert isinstance(rep, VlobUpdateRepTimeout) - caplog.assert_occurred_once( - f"[warning ] Cannot reach webhook server [parsec.backend.vlob] service_id={service.service_id.hex} service_label=TestWebhookService" - ) - caplog.clear() - - # Test httperror - def raise_httperror(*args, **kwargs): - fp = Mock() - raise urllib.error.HTTPError(url, 405, "METHOD NOT ALLOWED", None, fp) - - mock.urlopen.side_effect = raise_httperror - rep = await vlob_create( - alice_ws, - vlob_id=new_vlob_id, - realm_id=realm, - blob=blob, - sequester_blob={service.service_id: sequester_blob}, - check_rep=False, - ) - assert isinstance(rep, VlobCreateRepTimeout) - caplog.assert_occurred_once( - f"[warning ] Invalid HTTP status returned by webhook [parsec.backend.vlob] service_id={service.service_id.hex} service_label=TestWebhookService status=405" - ) - caplog.clear() - - rep = await vlob_update( - alice_ws, - vlob_id=vlob_id, - version=2, - blob=blob, - sequester_blob={service.service_id: sequester_blob}, - check_rep=False, - ) - assert isinstance(rep, VlobUpdateRepTimeout) - caplog.assert_occurred_once( - f"[warning ] Invalid HTTP status returned by webhook [parsec.backend.vlob] service_id={service.service_id.hex} service_label=TestWebhookService status=405" - ) - caplog.clear() - - # Test error from service - - def raise_httperror_400(*args, **kwargs): - fp = Mock() - fp.read.return_value = json.dumps({"reason": "some_error_from_service"}) - raise urllib.error.HTTPError(url, 400, "", None, fp) - - mock.urlopen.side_effect = raise_httperror_400 - rep = await vlob_create( - alice_ws, - vlob_id=new_vlob_id, - realm_id=realm, - blob=blob, - sequester_blob={service.service_id: sequester_blob}, - check_rep=False, - ) - caplog.assert_not_occurred("warning") - assert isinstance(rep, VlobCreateRepRejectedBySequesterService) - assert rep.service_label == service.backend_service.service_label - assert rep.service_id == service.service_id - assert rep.reason == "some_error_from_service" - - rep = await vlob_update( - alice_ws, - vlob_id=vlob_id, - version=2, - blob=blob, - sequester_blob={service.service_id: sequester_blob}, - check_rep=False, - ) - caplog.assert_not_occurred("warning") - assert isinstance(rep, VlobUpdateRepRejectedBySequesterService) - assert rep.service_label == service.backend_service.service_label - assert rep.service_id == service.service_id - assert rep.reason == "some_error_from_service" - - # Test json error - - def raise_json_error_400(*args, **kwargs): - fp = Mock() - fp.read.return_value = b"not a json" - raise urllib.error.HTTPError(url, 400, "", None, fp) - - mock.urlopen.side_effect = raise_json_error_400 - rep = await vlob_create( - alice_ws, - vlob_id=new_vlob_id, - realm_id=realm, - blob=blob, - sequester_blob={service.service_id: sequester_blob}, - check_rep=False, - ) - caplog.assert_occurred_once( - f"[warning ] Invalid rejection reason body returned by webhook [parsec.backend.vlob] body=b'not a json' service_id={service.service_id.hex} service_label=TestWebhookService" - ) - caplog.clear() - assert isinstance(rep, VlobCreateRepRejectedBySequesterService) - assert rep.service_label == service.backend_service.service_label - assert rep.service_id == service.service_id - assert rep.reason == "File rejected (no reason)" - - rep = await vlob_update( - alice_ws, - vlob_id=vlob_id, - version=2, - blob=blob, - sequester_blob={service.service_id: sequester_blob}, - check_rep=False, - ) - caplog.assert_occurred_once( - f"[warning ] Invalid rejection reason body returned by webhook [parsec.backend.vlob] body=b'not a json' service_id={service.service_id.hex} service_label=TestWebhookService" - ) - caplog.clear() - assert isinstance(rep, VlobUpdateRepRejectedBySequesterService) - assert rep.service_label == service.backend_service.service_label - assert rep.service_id == service.service_id - assert rep.reason == "File rejected (no reason)" - - -@customize_fixtures(coolorg_is_sequestered_organization=True) -@pytest.mark.trio -async def test_sequester_dump_realm( - coolorg: OrganizationFullData, alice_ws, bob_ws, realm, backend -): - vlob_id = VlobID.from_hex("00000000000000000000000000000001") - blob = b"" - sequester_blob = b"" - - # Create and update vlob without sequester - await vlob_create( - alice_ws, realm_id=realm, vlob_id=vlob_id, blob=blob, sequester_blob={}, check_rep=True - ) - await vlob_update( - alice_ws, version=2, vlob_id=vlob_id, sequester_blob={}, blob=blob, check_rep=True - ) - - # Create sequester service - s1 = sequester_service_factory( - authority=coolorg.sequester_authority, label="Sequester service 1" - ) - - await backend.sequester.create_service( - organization_id=coolorg.organization_id, service=s1.backend_service - ) - - # Create updates - await vlob_update( - alice_ws, - version=3, - vlob_id=vlob_id, - blob=blob, - sequester_blob={s1.service_id: sequester_blob}, - check_rep=True, - ) - await vlob_update( - alice_ws, - version=4, - vlob_id=vlob_id, - blob=blob, - sequester_blob={s1.service_id: sequester_blob}, - check_rep=True, - ) - # Dump realm - dump = await backend.sequester.dump_realm( - organization_id=coolorg.organization_id, service_id=s1.service_id, realm_id=realm - ) - - assert dump == [(vlob_id, 3, sequester_blob), (vlob_id, 4, sequester_blob)] - - # Create another vlob - another_vlob_id = VlobID.from_hex("00000000000000000000000000000002") - another_sequester_blob = b"" - - await vlob_create( - alice_ws, - realm_id=realm, - vlob_id=another_vlob_id, - blob=blob, - sequester_blob={s1.service_id: another_sequester_blob}, - check_rep=True, - ) - await vlob_update( - alice_ws, - version=2, - vlob_id=another_vlob_id, - sequester_blob={s1.service_id: another_sequester_blob}, - blob=blob, - check_rep=True, - ) - - dump = await backend.sequester.dump_realm( - organization_id=coolorg.organization_id, service_id=s1.service_id, realm_id=realm - ) - - assert dump == [ - (vlob_id, 3, sequester_blob), - (vlob_id, 4, sequester_blob), - (another_vlob_id, 1, another_sequester_blob), - (another_vlob_id, 2, another_sequester_blob), - ] diff --git a/server/tests/backend/test_access.py b/server/tests/backend/test_access.py deleted file mode 100644 index 30b6b75bb07..00000000000 --- a/server/tests/backend/test_access.py +++ /dev/null @@ -1,53 +0,0 @@ -# Parsec Cloud (https://parsec.cloud) Copyright (c) BUSL-1.1 2016-present Scille SAS -from __future__ import annotations - -import pytest - -from parsec._parsec import InvitationType -from parsec.api.protocol import AUTHENTICATED_CMDS, INVITED_CMDS, packb, unpackb - - -async def check_forbidden_cmds(ws, cmds): - for cmd in cmds: - if cmd == "events_listen": - # Must pass wait option otherwise backend will hang forever - await ws.send(packb({"cmd": cmd, "wait": False})) - else: - await ws.send(packb({"cmd": cmd})) - rep = await ws.receive() - assert unpackb(rep) == {"status": "invalid_msg_format", "reason": "Invalid message format"} - - -async def check_allowed_cmds(ws, cmds): - for cmd in cmds: - if cmd == "events_listen": - # Must pass wait option otherwise backend will hang forever - await ws.send(packb({"cmd": cmd, "wait": False})) - else: - await ws.send(packb({"cmd": cmd})) - rep = await ws.receive() - assert unpackb(rep)["status"] != "unknown_command" - - -@pytest.mark.trio -async def test_invited_has_limited_access( - backend, backend_asgi_app, backend_invited_ws_factory, alice -): - invitation = await backend.invite.new_for_device( - organization_id=alice.organization_id, greeter_user_id=alice.user_id - ) - - async with backend_invited_ws_factory( - backend_asgi_app, - organization_id=alice.organization_id, - invitation_type=InvitationType.DEVICE, - token=invitation.token, - ) as ws: - await check_forbidden_cmds(ws, AUTHENTICATED_CMDS - INVITED_CMDS) - await check_allowed_cmds(ws, INVITED_CMDS) - - -@pytest.mark.trio -async def test_authenticated_has_limited_access(alice_ws): - await check_forbidden_cmds(alice_ws, INVITED_CMDS - AUTHENTICATED_CMDS) - await check_allowed_cmds(alice_ws, AUTHENTICATED_CMDS) diff --git a/server/tests/backend/test_administration_rest_api.py b/server/tests/backend/test_administration_rest_api.py deleted file mode 100644 index ea039cf6995..00000000000 --- a/server/tests/backend/test_administration_rest_api.py +++ /dev/null @@ -1,750 +0,0 @@ -# Parsec Cloud (https://parsec.cloud) Copyright (c) BUSL-1.1 2016-present Scille SAS -from __future__ import annotations - -from unittest.mock import ANY - -import pytest -import trio - -from parsec._parsec import ActiveUsersLimit, BackendEventOrganizationExpired, DateTime -from parsec.api.protocol import ( - BlockID, - HandshakeOrganizationExpired, - OrganizationID, - UserProfile, - VlobID, -) -from parsec.api.rest import organization_stats_rep_serializer -from parsec.backend.organization import Organization -from tests.common import customize_fixtures, local_device_to_backend_user - - -@pytest.mark.trio -@pytest.mark.parametrize("bad_auth_reason", ["no_header", "bad_header"]) -async def test_administration_api_auth(backend_asgi_app, coolorg, bad_auth_reason): - if bad_auth_reason == "bad_header": - headers = {"authorization": "Bearer dummy"} - else: - assert bad_auth_reason == "no_header" - headers = {} - - client = backend_asgi_app.test_client() - - response = await client.get( - f"/administration/organizations/{coolorg.organization_id.str}", headers=headers - ) - assert response.status_code == 403 - assert await response.get_json() == {"error": "not_allowed"} - - response = await client.patch( - f"/administration/organizations/{coolorg.organization_id.str}", json={}, headers=headers - ) - assert response.status_code == 403 - assert await response.get_json() == {"error": "not_allowed"} - - response = await client.post(f"/administration/organizations", json={}, headers=headers) - assert response.status_code == 403 - assert await response.get_json() == {"error": "not_allowed"} - - -### organization_create ### - - -@pytest.mark.trio -async def test_organization_create(backend_asgi_app): - organization_id = OrganizationID("NewOrg") - client = backend_asgi_app.test_client() - - response = await client.post( - f"/administration/organizations", - headers={"Authorization": f"Bearer {backend_asgi_app.backend.config.administration_token}"}, - json={"organization_id": organization_id.str}, - ) - - response_content = await response.get_json() - assert response.status_code == 200 - assert response_content == {"bootstrap_token": ANY} - - org = await backend_asgi_app.backend.organization.get(organization_id) - assert org == Organization( - organization_id=organization_id, - bootstrap_token=response_content["bootstrap_token"], - is_expired=False, - created_on=ANY, - bootstrapped_on=None, - root_verify_key=None, - user_profile_outsider_allowed=True, - active_users_limit=ActiveUsersLimit.NO_LIMIT, - sequester_authority=None, - sequester_services_certificates=None, - ) - - -@pytest.mark.trio -async def test_organization_create_bad_data(backend_asgi_app): - organization_id = OrganizationID("NewOrg") - for bad_body in [ - # Bad OrganizationID - {"organization_id": ""}, # Empty - {"organization_id": "x" * 33}, # Too long - {"organization_id": "My!Org"}, # Forbidden characters - {"organization_id": "C%C3%A9TAC%C3%A9"}, # Unexpected url escape (so forbidden characters) - # Missing required field - {"active_users_limit": 10}, - # Bad field value - {"organization_id": organization_id.str, "active_users_limit": -1}, - {"organization_id": organization_id.str, "active_users_limit": "foo"}, - {"organization_id": organization_id.str, "user_profile_outsider_allowed": 42}, - {"organization_id": organization_id.str, "user_profile_outsider_allowed": "foo"}, - ]: - client = backend_asgi_app.test_client() - response = await client.post( - "/administration/organizations", - json=bad_body, - headers={ - "Authorization": f"Bearer {backend_asgi_app.backend.config.administration_token}" - }, - ) - assert response.status_code == 400 - assert await response.get_json() == {"error": "bad_data", "reason": ANY} - - -@pytest.mark.trio -@pytest.mark.parametrize("expired", (False, True)) -async def test_organization_create_already_exists_not_bootstrapped(backend_asgi_app, expired): - organization_id = OrganizationID("NewOrg") - original_bootstrap_token = "123" - await backend_asgi_app.backend.organization.create( - id=organization_id, bootstrap_token=original_bootstrap_token - ) - if expired: - await backend_asgi_app.backend.organization.update(id=organization_id, is_expired=True) - - client = backend_asgi_app.test_client() - - response = await client.post( - f"/administration/organizations", - json={"organization_id": organization_id.str}, - headers={"Authorization": f"Bearer {backend_asgi_app.backend.config.administration_token}"}, - ) - assert response.status_code == 200 - response_content = await response.get_json() - assert response_content == {"bootstrap_token": ANY} - - # Token should be regenerated each time, and the configuration should be overwritten - assert response_content["bootstrap_token"] != original_bootstrap_token - - org = await backend_asgi_app.backend.organization.get(id=organization_id) - assert org == Organization( - organization_id=organization_id, - bootstrap_token=response_content["bootstrap_token"], - is_expired=False, - created_on=ANY, - bootstrapped_on=None, - root_verify_key=None, - user_profile_outsider_allowed=True, - active_users_limit=ActiveUsersLimit.NO_LIMIT, - sequester_authority=None, - sequester_services_certificates=None, - ) - - -@pytest.mark.trio -async def test_organization_create_already_exists_and_bootstrapped(backend_asgi_app, coolorg): - client = backend_asgi_app.test_client() - - response = await client.post( - f"/administration/organizations", - json={"organization_id": coolorg.organization_id.str}, - headers={"Authorization": f"Bearer {backend_asgi_app.backend.config.administration_token}"}, - ) - assert response.status_code == 400 - assert await response.get_json() == {"error": "already_exists"} - - -@pytest.mark.trio -async def test_organization_create_with_custom_initial_config(backend_asgi_app): - organization_id = OrganizationID("NewOrg") - original_bootstrap_token = "123" - - await backend_asgi_app.backend.organization.create( - id=organization_id, bootstrap_token=original_bootstrap_token - ) - client = backend_asgi_app.test_client() - response = await client.post( - f"/administration/organizations", - json={ - "organization_id": organization_id.str, - "user_profile_outsider_allowed": False, - "active_users_limit": None, - }, - headers={"Authorization": f"Bearer {backend_asgi_app.backend.config.administration_token}"}, - ) - assert response.status_code == 200 - response_content = await response.get_json() - assert response_content == {"bootstrap_token": ANY} - - org = await backend_asgi_app.backend.organization.get(organization_id) - assert org == Organization( - organization_id=organization_id, - bootstrap_token=response_content["bootstrap_token"], - is_expired=False, - created_on=ANY, - bootstrapped_on=None, - root_verify_key=None, - user_profile_outsider_allowed=False, - active_users_limit=ActiveUsersLimit.NO_LIMIT, - sequester_authority=None, - sequester_services_certificates=None, - ) - - # New custom initial config should be taken into account each time the org is recreated - response = await client.post( - f"/administration/organizations", - json={ - "organization_id": organization_id.str, - "user_profile_outsider_allowed": True, - "active_users_limit": 10, - }, - headers={"Authorization": f"Bearer {backend_asgi_app.backend.config.administration_token}"}, - ) - response_content = await response.get_json() - assert response.status_code == 200 - assert response_content == {"bootstrap_token": ANY} - - org = await backend_asgi_app.backend.organization.get(organization_id) - assert org == Organization( - organization_id=organization_id, - bootstrap_token=response_content["bootstrap_token"], - is_expired=False, - created_on=ANY, - bootstrapped_on=None, - root_verify_key=None, - user_profile_outsider_allowed=True, - active_users_limit=ActiveUsersLimit.LimitedTo(10), - sequester_authority=None, - sequester_services_certificates=None, - ) - - # Default initial config should also be used if org is recreated without custom config - response = await client.post( - f"/administration/organizations", - json={"organization_id": organization_id.str}, - headers={"Authorization": f"Bearer {backend_asgi_app.backend.config.administration_token}"}, - ) - assert response.status_code == 200 - response_content = await response.get_json() - assert response_content == {"bootstrap_token": ANY} - - org = await backend_asgi_app.backend.organization.get(organization_id) - assert org == Organization( - organization_id=organization_id, - bootstrap_token=response_content["bootstrap_token"], - is_expired=False, - created_on=ANY, - bootstrapped_on=None, - root_verify_key=None, - user_profile_outsider_allowed=True, - active_users_limit=ActiveUsersLimit.NO_LIMIT, - sequester_authority=None, - sequester_services_certificates=None, - ) - - -### organization_config ### - - -@pytest.mark.trio -@pytest.mark.parametrize("type", ("unknown", "invalid")) -async def test_organization_config_not_found(backend_asgi_app, type): - org = "dummy" if type == "unknown" else "x" * 33 - client = backend_asgi_app.test_client() - response = await client.get( - f"/administration/organizations/{org}", - headers={"Authorization": f"Bearer {backend_asgi_app.backend.config.administration_token}"}, - ) - assert response.status_code == 404 - assert await response.get_json() == {"error": "not_found"} - - -@pytest.mark.trio -@pytest.mark.parametrize("bootstrapped", (True, False)) -async def test_organization_config_ok(backend_asgi_app, coolorg, bootstrapped): - if not bootstrapped: - organization_id = OrganizationID("NewOrg") - await backend_asgi_app.backend.organization.create( - id=organization_id, bootstrap_token="123" - ) - else: - organization_id = coolorg.organization_id - - client = backend_asgi_app.test_client() - - response = await client.get( - f"/administration/organizations/{organization_id.str}", - headers={"Authorization": f"Bearer {backend_asgi_app.backend.config.administration_token}"}, - ) - assert response.status_code == 200 - assert await response.get_json() == { - "active_users_limit": None, - "is_bootstrapped": bootstrapped, - "is_expired": False, - "user_profile_outsider_allowed": True, - } - - # Ensure config change is taken into account - await backend_asgi_app.backend.organization.update( - id=organization_id, active_users_limit=ActiveUsersLimit.LimitedTo(42), is_expired=True - ) - response = await client.get( - f"/administration/organizations/{organization_id.str}", - headers={"Authorization": f"Bearer {backend_asgi_app.backend.config.administration_token}"}, - ) - assert response.status_code == 200 - assert await response.get_json() == { - "active_users_limit": 42, - "is_bootstrapped": bootstrapped, - "is_expired": True, - "user_profile_outsider_allowed": True, - } - - -### organization_update ### - - -@pytest.mark.trio -@pytest.mark.parametrize("type", ("unknown", "invalid")) -async def test_organization_update_not_found(backend_asgi_app, type): - org = "dummy" if type == "unknown" else "x" * 33 - client = backend_asgi_app.test_client() - response = await client.patch( - f"/administration/organizations/{org}", - json={"bootstrap_token": "123"}, - headers={"Authorization": f"Bearer {backend_asgi_app.backend.config.administration_token}"}, - ) - assert response.status_code == 404 - assert await response.get_json() == {"error": "not_found"} - - # Empty update is an interesting case - response = await client.patch( - f"/administration/organizations/dummy", - json={}, - headers={"Authorization": f"Bearer {backend_asgi_app.backend.config.administration_token}"}, - ) - assert response.status_code == 404 - assert await response.get_json() == {"error": "not_found"} - - -@pytest.mark.trio -@pytest.mark.parametrize("bootstrapped", (True, False)) -async def test_organization_update_ok(backend_asgi_app, coolorg, bootstrapped): - if not bootstrapped: - organization_id = OrganizationID("NewOrg") - await backend_asgi_app.backend.organization.create( - id=organization_id, bootstrap_token="123" - ) - else: - organization_id = coolorg.organization_id - - client = backend_asgi_app.test_client() - - with backend_asgi_app.backend.event_bus.listen() as spy: - response = await client.patch( - f"/administration/organizations/{organization_id.str}", - json={"user_profile_outsider_allowed": False, "active_users_limit": 10}, - headers={ - "Authorization": f"Bearer {backend_asgi_app.backend.config.administration_token}" - }, - ) - assert response.status_code == 200 - assert await response.get_json() == {} - - org = await backend_asgi_app.backend.organization.get(organization_id) - assert org.user_profile_outsider_allowed is False - assert org.active_users_limit == ActiveUsersLimit.LimitedTo(10) - - # Partial update - response = await client.patch( - f"/administration/organizations/{organization_id.str}", - json={"active_users_limit": None}, - headers={ - "Authorization": f"Bearer {backend_asgi_app.backend.config.administration_token}" - }, - ) - assert response.status_code == 200 - assert await response.get_json() == {} - - org = await backend_asgi_app.backend.organization.get(organization_id) - assert org.user_profile_outsider_allowed is False - assert org.active_users_limit is ActiveUsersLimit.NO_LIMIT - - # Partial update with unknown field - response = await client.patch( - f"/administration/organizations/{organization_id.str}", - json={"dummy": "whatever"}, - headers={ - "Authorization": f"Bearer {backend_asgi_app.backend.config.administration_token}" - }, - ) - assert response.status_code == 200 - assert await response.get_json() == {} - - # Empty update - response = await client.patch( - f"/administration/organizations/{organization_id.str}", - json={}, - headers={ - "Authorization": f"Bearer {backend_asgi_app.backend.config.administration_token}" - }, - ) - assert response.status_code == 200 - assert await response.get_json() == {} - - org = await backend_asgi_app.backend.organization.get(organization_id) - assert org.user_profile_outsider_allowed is False - assert org.active_users_limit is ActiveUsersLimit.NO_LIMIT - - # No ORGANIZATION_EXPIRED event should have occurred - await trio.testing.wait_all_tasks_blocked() - assert spy.events == [] - - -@pytest.mark.trio -@customize_fixtures(backend_not_populated=True) -async def test_bootstrap_expired_organization(backend_asgi_app, backend, alice, coolorg): - bootstrap_token = "123" - await backend_asgi_app.backend.organization.create( - id=coolorg.organization_id, bootstrap_token=bootstrap_token - ) - await backend_asgi_app.backend.organization.update(id=coolorg.organization_id, is_expired=True) - - # Bootstrap should go fine - backend_user, backend_first_device = local_device_to_backend_user(alice, coolorg) - await backend.organization.bootstrap( - id=coolorg.organization_id, - user=backend_user, - first_device=backend_first_device, - bootstrap_token=bootstrap_token, - root_verify_key=coolorg.root_verify_key, - ) - - # Once bootstrapped, the organization is still expired - org = await backend.organization.get(id=coolorg.organization_id) - assert org.is_expired is True - - -@pytest.mark.trio -async def test_organization_update_expired_field( - backend_asgi_app, coolorg, alice, backend_authenticated_ws_factory -): - organization_id = coolorg.organization_id - - client = backend_asgi_app.test_client() - - with backend_asgi_app.backend.event_bus.listen() as spy: - response = await client.patch( - f"/administration/organizations/{organization_id.str}", - json={"is_expired": True}, - headers={ - "Authorization": f"Bearer {backend_asgi_app.backend.config.administration_token}" - }, - ) - assert response.status_code == 200 - assert await response.get_json() == {} - await spy.wait_with_timeout( - BackendEventOrganizationExpired(organization_id=organization_id) - ) - - org = await backend_asgi_app.backend.organization.get(organization_id) - assert org.is_expired is True - - # Not longer allowed to use the organization - with pytest.raises(HandshakeOrganizationExpired): - async with backend_authenticated_ws_factory(backend_asgi_app, alice): - pass - - # Re-enable the organization - response = await client.patch( - f"/administration/organizations/{organization_id.str}", - json={"is_expired": False}, - headers={"Authorization": f"Bearer {backend_asgi_app.backend.config.administration_token}"}, - ) - assert response.status_code == 200 - assert await response.get_json() == {} - - org = await backend_asgi_app.backend.organization.get(organization_id) - assert org.is_expired is False - - # Now device can connect to the organization - - async with backend_authenticated_ws_factory(backend_asgi_app, alice): - pass - - -# `active_users_limit` is already tested in test/backend/user/test_user_create.py - - -@pytest.mark.trio -async def test_organization_update_bad_data(backend_asgi_app, coolorg): - client = backend_asgi_app.test_client() - for bad_body in [ - # Bad field value - {"active_users_limit": -1}, - {"active_users_limit": "foo"}, - {"user_profile_outsider_allowed": 42}, - {"user_profile_outsider_allowed": "foo"}, - ]: - response = await client.patch( - f"/administration/organizations/{coolorg.organization_id.str}", - json=bad_body, - headers={ - "Authorization": f"Bearer {backend_asgi_app.backend.config.administration_token}" - }, - ) - assert response.status_code == 400 - assert await response.get_json() == {"error": "bad_data", "reason": ANY} - - -### organization_stats ### - - -@pytest.mark.trio -@pytest.mark.parametrize("type", ("unknown", "invalid")) -async def test_organization_stats_not_found(backend_asgi_app, type): - org = "dummy" if type == "unknown" else "x" * 33 - client = backend_asgi_app.test_client() - response = await client.get( - f"/administration/organizations/{org}/stats", - headers={"Authorization": f"Bearer {backend_asgi_app.backend.config.administration_token}"}, - ) - assert response.status_code == 404 - assert await response.get_json() == {"error": "not_found"} - - -@pytest.mark.trio -async def test_organization_stats_data(backend_asgi_app, realm, realm_factory, alice): - client = backend_asgi_app.test_client() - - async def organization_stats(): - response = await client.get( - f"/administration/organizations/{alice.organization_id.str}/stats", - headers={ - "Authorization": f"Bearer {backend_asgi_app.backend.config.administration_token}" - }, - ) - assert response.status_code == 200 - return organization_stats_rep_serializer.load(await response.get_json()) - - rep = await organization_stats() - assert rep == { - "data_size": 0, - "metadata_size": ANY, - "users": 3, - "active_users": 3, - "users_per_profile_detail": [ - {"profile": UserProfile.ADMIN, "active": 2, "revoked": 0}, - {"profile": UserProfile.STANDARD, "active": 1, "revoked": 0}, - {"profile": UserProfile.OUTSIDER, "active": 0, "revoked": 0}, - ], - "realms": 4, - } - initial_metadata_size = rep["metadata_size"] - - # Create new metadata - await backend_asgi_app.backend.vlob.create( - organization_id=alice.organization_id, - author=alice.device_id, - realm_id=realm, - encryption_revision=1, - vlob_id=VlobID.new(), - timestamp=DateTime.now(), - blob=b"1234", - ) - rep = await organization_stats() - assert rep == { - "data_size": 0, - "metadata_size": initial_metadata_size + 4, - "users": 3, - "active_users": 3, - "users_per_profile_detail": [ - {"profile": UserProfile.ADMIN, "active": 2, "revoked": 0}, - {"profile": UserProfile.STANDARD, "active": 1, "revoked": 0}, - {"profile": UserProfile.OUTSIDER, "active": 0, "revoked": 0}, - ], - "realms": 4, - } - - # Create new data - await backend_asgi_app.backend.block.create( - organization_id=alice.organization_id, - author=alice.device_id, - block_id=BlockID.new(), - realm_id=realm, - block=b"1234", - ) - rep = await organization_stats() - assert rep == { - "data_size": 4, - "metadata_size": initial_metadata_size + 4, - "users": 3, - "active_users": 3, - "users_per_profile_detail": [ - {"profile": UserProfile.ADMIN, "active": 2, "revoked": 0}, - {"profile": UserProfile.STANDARD, "active": 1, "revoked": 0}, - {"profile": UserProfile.OUTSIDER, "active": 0, "revoked": 0}, - ], - "realms": 4, - } - - # create new workspace - await realm_factory(backend_asgi_app.backend, alice) - rep = await organization_stats() - assert rep == { - "data_size": 4, - "metadata_size": initial_metadata_size + 4, - "users": 3, - "active_users": 3, - "users_per_profile_detail": [ - {"profile": UserProfile.ADMIN, "active": 2, "revoked": 0}, - {"profile": UserProfile.STANDARD, "active": 1, "revoked": 0}, - {"profile": UserProfile.OUTSIDER, "active": 0, "revoked": 0}, - ], - "realms": 5, - } - - -@pytest.mark.trio -@customize_fixtures(backend_not_populated=True) -async def test_organization_stats_users( - backend_asgi_app, - backend_data_binder_factory, - organization_factory, - local_device_factory, - other_org, -): - client = backend_asgi_app.test_client() - - async def organization_stats(organization_id): - response = await client.get( - f"/administration/organizations/{organization_id.str}/stats", - headers={ - "Authorization": f"Bearer {backend_asgi_app.backend.config.administration_token}" - }, - ) - assert response.status_code == 200 - return organization_stats_rep_serializer.load(await response.get_json()) - - binder = backend_data_binder_factory(backend_asgi_app.backend) - org = organization_factory("IFD") - godfrey1 = local_device_factory( - org=org, - base_device_id="godfrey@d1", - base_human_handle="Godfrey Ho ", - profile=UserProfile.ADMIN, - ) - await binder.bind_organization(org, godfrey1, initial_user_manifest="not_synced") - - expected_stats = { - "users": 1, - "active_users": 1, - "users_per_profile_detail": [ - {"profile": UserProfile.ADMIN, "active": 1, "revoked": 0}, - {"profile": UserProfile.STANDARD, "active": 0, "revoked": 0}, - {"profile": UserProfile.OUTSIDER, "active": 0, "revoked": 0}, - ], - "data_size": 0, - "metadata_size": 0, - "realms": 0, - } - - for profile in UserProfile.VALUES: - i = [ - i - for i, v in enumerate(expected_stats["users_per_profile_detail"]) - if v["profile"] == profile - ][0] - device = local_device_factory(profile=profile, org=org) - await binder.bind_device(device, certifier=godfrey1, initial_user_manifest="not_synced") - expected_stats["users"] += 1 - expected_stats["active_users"] += 1 - expected_stats["users_per_profile_detail"][i]["active"] += 1 - stats = await organization_stats(org.organization_id) - assert stats == expected_stats - - await binder.bind_revocation(device.user_id, certifier=godfrey1) - expected_stats["active_users"] -= 1 - expected_stats["users_per_profile_detail"][i]["active"] -= 1 - expected_stats["users_per_profile_detail"][i]["revoked"] += 1 - stats = await organization_stats(org.organization_id) - assert stats == expected_stats - - # Also make sure stats are isolated between organizations - other_org_device = local_device_factory(org=other_org, profile=UserProfile.ADMIN) - await binder.bind_organization(other_org, other_org_device, initial_user_manifest="not_synced") - stats = await organization_stats(other_org_device.organization_id) - assert stats == { - "users": 1, - "active_users": 1, - "users_per_profile_detail": [ - {"profile": UserProfile.ADMIN, "active": 1, "revoked": 0}, - {"profile": UserProfile.STANDARD, "active": 0, "revoked": 0}, - {"profile": UserProfile.OUTSIDER, "active": 0, "revoked": 0}, - ], - "data_size": 0, - "metadata_size": 0, - "realms": 0, - } - - -@pytest.mark.trio -async def test_handles_escaped_path(backend_asgi_app): - organization_id = "CéTACé" - escaped_organization_id = "C%C3%A9TAC%C3%A9" - bad_escaped_organization_id = "C%C3%A9TAC%+C3%A9" - - ROUTES_PATTERN = ( - "/administration/organizations/{organization_id}", - "/administration/organizations/{organization_id}/stats", - ) - - client = backend_asgi_app.test_client() - - # Not found - for route_pattern in ROUTES_PATTERN: - route = route_pattern.format(organization_id=escaped_organization_id) - response = await client.get( - route, - headers={ - "Authorization": f"Bearer {backend_asgi_app.backend.config.administration_token}" - }, - ) - assert response.status_code == 404, route - assert await response.get_json() == {"error": "not_found"}, route - - # Now create the org - response = await client.post( - f"/administration/organizations", - json={"organization_id": organization_id}, - headers={"Authorization": f"Bearer {backend_asgi_app.backend.config.administration_token}"}, - ) - assert response.status_code == 200 - - # Found ! - for route_pattern in ROUTES_PATTERN: - route = route_pattern.format(organization_id=escaped_organization_id) - response = await client.get( - route, - headers={ - "Authorization": f"Bearer {backend_asgi_app.backend.config.administration_token}" - }, - ) - assert response.status_code == 200, route - - route = route_pattern.format(organization_id=bad_escaped_organization_id) - response = await client.get( - route, - headers={ - "Authorization": f"Bearer {backend_asgi_app.backend.config.administration_token}" - }, - ) - assert response.status_code == 404, route diff --git a/server/tests/backend/test_base.py b/server/tests/backend/test_base.py deleted file mode 100644 index bd9183eac4b..00000000000 --- a/server/tests/backend/test_base.py +++ /dev/null @@ -1,64 +0,0 @@ -# Parsec Cloud (https://parsec.cloud) Copyright (c) BUSL-1.1 2016-present Scille SAS -from __future__ import annotations - -import json -from pathlib import Path - -import pytest - -from parsec.api.protocol import packb, unpackb - - -@pytest.mark.trio -async def test_connection(alice_ws): - await alice_ws.send(packb({"cmd": "ping", "ping": "42"})) - rep = await alice_ws.receive() - assert unpackb(rep) == {"status": "ok", "pong": "42"} - - -@pytest.mark.trio -async def test_bad_cmd(alice_ws): - await alice_ws.send(packb({"cmd": "dummy"})) - rep = await alice_ws.receive() - assert unpackb(rep) == {"status": "invalid_msg_format", "reason": "Invalid message format"} - - -@pytest.mark.trio -@pytest.mark.parametrize( - "kind", ["string_message", "valid_msgpack_but_not_a_dict", "invalid_msgpack"] -) -async def test_bad_msg_format(alice_ws, kind): - if kind == "string_message": - await alice_ws.send("hello") # Only websocket bytes message are accepted - elif kind == "valid_msgpack_but_not_a_dict": - await alice_ws.send(b"\x00") # Encodes the number 0 as positive fix int - else: - assert kind == "invalid_msgpack" - await alice_ws.send(b"\xc1") # Never used value according to msgpack spec - rep = await alice_ws.receive() - assert unpackb(rep) == {"status": "invalid_msg_format", "reason": "Invalid message format"} - - -@pytest.mark.trio -async def test_all_api_cmds_implemented(backend): - from parsec import _parsec - - schema_dir = (Path(__file__) / "../../../../libparsec/crates/protocol/schema/").resolve() - for family_dir in schema_dir.iterdir(): - family_mod_name = family_dir.name - for cmd_file in family_dir.glob("*.json5"): - cmd_specs = json.loads( - "\n".join( - [ - x - for x in cmd_file.read_text(encoding="utf8").splitlines() - if not x.strip().startswith("//") - ] - ) - ) - for cmd_spec in cmd_specs: - family_mod = getattr(_parsec, family_mod_name) - for version in cmd_spec["major_versions"]: - version_mod = getattr(family_mod, f"v{version}") - cmd_mod = getattr(version_mod, cmd_spec["req"]["cmd"]) - assert cmd_mod.Req in backend.apis diff --git a/server/tests/backend/test_compatibility.py b/server/tests/backend/test_compatibility.py deleted file mode 100644 index 6e74b424b75..00000000000 --- a/server/tests/backend/test_compatibility.py +++ /dev/null @@ -1,150 +0,0 @@ -# Parsec Cloud (https://parsec.cloud) Copyright (c) BUSL-1.1 2016-present Scille SAS -# TODO -# ruff: noqa: F821 -from __future__ import annotations - -import pytest - -from parsec._parsec import ApiVersion, DateTime -from parsec.api.protocol import ( - AuthenticatedClientHandshake, - HandshakeType, - RealmCreateRepBadTimestamp, - ServerHandshake, - packb, -) -from parsec.serde import BaseSchema, fields -from parsec.utils import BALLPARK_CLIENT_EARLY_OFFSET, BALLPARK_CLIENT_LATE_OFFSET - - -@pytest.mark.xfail(reason="TODO: fix this !") -def test_timestamp_out_of_ballpark_rep_schema_compatibility(): - client_timestamp = DateTime.now() - backend_timestamp = DateTime.now().add(minutes=5) - - # Backend API >= 2.4 with older clients - RealmCreateRepBadTimestamp( - reason=None, - ballpark_client_early_offset=BALLPARK_CLIENT_EARLY_OFFSET, - ballpark_client_late_offset=BALLPARK_CLIENT_LATE_OFFSET, - backend_timestamp=backend_timestamp, - client_timestamp=client_timestamp, - ) - - # Backend API < 2.4 with newer clients - RealmCreateRepBadTimestamp( - reason=None, - ballpark_client_early_offset=None, - ballpark_client_late_offset=None, - backend_timestamp=None, - client_timestamp=None, - ) - - -@pytest.mark.xfail(reason="TODO: fix this !") -def test_handshake_challenge_schema_compatibility(): - # Old handshake definition - class OlderHandshakeChallengeSchema(BaseSchema): - handshake = fields.CheckedConstant("challenge", required=True) - challenge = fields.Bytes(required=True) - supported_api_versions = fields.List(ApiVersionField(), required=True) - - older_handshake_challenge_serializer = serializer_factory(OlderHandshakeChallengeSchema) - - timestamp = DateTime.now() - old_data = {"challenge": b"123", "handshake": "challenge", "supported_api_versions": []} - new_data = { - **old_data, - "ballpark_client_early_offset": 1.0, - "ballpark_client_late_offset": 1.0, - "backend_timestamp": timestamp, - } - compat_data = { - **old_data, - "ballpark_client_early_offset": None, - "ballpark_client_late_offset": None, - "backend_timestamp": None, - } - - # Backend API >= 2.4 with older clients - data = handshake_challenge_serializer.dumps(new_data) - assert older_handshake_challenge_serializer.loads(data) == old_data - - # Backend API < 2.4 with newer clients - data = older_handshake_challenge_serializer.dumps(old_data) - assert handshake_challenge_serializer.loads(data) == {**compat_data, "client_timestamp": None} - - -@pytest.mark.xfail(reason="TODO: fix this !") -def test_handshake_challenge_schema_for_client_server_api_compatibility( - mallory, alice, monkeypatch -): - ash = ServerHandshake() - - challenge = b"1234567890" - - # Test server handshake: client API <= 2.4 server > 3.0 - client_version = ApiVersion(2, 4) - - answer = { - "handshake": "answer", - "type": HandshakeType.AUTHENTICATED.value, - "client_api_version": client_version, - "organization_id": alice.organization_id.str, - "device_id": alice.device_id.str, - "rvk": alice.root_verify_key.encode(), - "answer": alice.signing_key.sign(challenge), - } - - ash.build_challenge_req() - ash.challenge = challenge - ash.process_answer_req(packb(answer)) - result_req = ash.build_result_req(alice.verify_key) - result = handshake_result_serializer.loads(result_req) - assert result["result"] == "ok" - - # Test server handshake: client API <= 2.8 server > 3.0 - - ash = ServerHandshake() - client_version = ApiVersion(2, 8) - answer = { - "handshake": "answer", - "type": HandshakeType.AUTHENTICATED.value, - "client_api_version": client_version, - "organization_id": alice.organization_id.str, - "device_id": alice.device_id.str, - "rvk": alice.root_verify_key.encode(), - "answer": alice.signing_key.sign(answer_serializer.dumps({"answer": challenge})), - } - ash.build_challenge_req() - ash.challenge = challenge - ash.process_answer_req(packb(answer)) - result_req = ash.build_result_req(alice.verify_key) - result = handshake_result_serializer.loads(result_req) - assert result["result"] == "ok" - - # Authenticated client handshake: client api < 3 - - bch = AuthenticatedClientHandshake( - mallory.organization_id, mallory.device_id, mallory.signing_key, mallory.root_verify_key - ) - - client_version = ApiVersion(2, 8) - - req = { - "handshake": "challenge", - "challenge": challenge, - "supported_api_versions": ServerHandshake.SUPPORTED_API_VERSIONS, - "backend_timestamp": DateTime.now(), - "ballpark_client_early_offset": BALLPARK_CLIENT_EARLY_OFFSET, - "ballpark_client_late_offset": BALLPARK_CLIENT_LATE_OFFSET, - } - - monkeypatch.setattr(bch, "SUPPORTED_API_VERSIONS", [client_version]) - - answer_req = bch.process_challenge_req(packb(req)) - - answer = handshake_answer_serializer.loads(answer_req) - assert mallory.verify_key.verify(answer["answer"]) == answer_serializer.dumps( - {"answer": challenge} - ) diff --git a/server/tests/backend/test_config.py b/server/tests/backend/test_config.py deleted file mode 100644 index f46820815be..00000000000 --- a/server/tests/backend/test_config.py +++ /dev/null @@ -1,25 +0,0 @@ -# Parsec Cloud (https://parsec.cloud) Copyright (c) BUSL-1.1 2016-present Scille SAS -from __future__ import annotations - -import pytest - -from parsec.backend.cli.utils import _split_with_escaping - - -@pytest.mark.parametrize( - "txt,expected_parts", - [ - ("foo", ["foo"]), - ("foo:bar", ["foo", "bar"]), - ("foo\\:bar", ["foo:bar"]), - ("::", ["", "", ""]), - ("\\:\\:", ["::"]), - (":foo:", ["", "foo", ""]), - ("\\::\\:", [":", ":"]), - ("\\\\:\\\\", ["\\", "\\"]), - ("\\n\\", ["\\n\\"]), - ], -) -def test_split_with_escaping(txt, expected_parts): - parts = _split_with_escaping(txt) - assert parts == expected_parts diff --git a/server/tests/backend/test_events.py b/server/tests/backend/test_events.py deleted file mode 100644 index c68d29daa33..00000000000 --- a/server/tests/backend/test_events.py +++ /dev/null @@ -1,221 +0,0 @@ -# Parsec Cloud (https://parsec.cloud) Copyright (c) BUSL-1.1 2016-present Scille SAS -from __future__ import annotations - -import pytest -import trio - -from parsec._parsec import BackendEventPinged -from parsec.api.protocol import ( - APIEventPinged, - EventsListenRepOk, -) -from parsec.backend.asgi import app_factory -from tests.backend.common import ( - authenticated_ping, - real_clock_timeout, -) -from tests.common import AuthenticatedRpcApiClient - -# TODO: also test connection is cancelled when the organization gets expired - - -@pytest.mark.trio -async def test_sse_events_connection_closed_on_user_revoke( - backend_asgi_app, bob_rpc: AuthenticatedRpcApiClient, bob, alice -): - async def _do_revoke(): - await backend_asgi_app.backend.user.revoke_user( - organization_id=bob.organization_id, - user_id=bob.user_id, - revoked_user_certificate=b"whatever", - revoked_user_certifier=alice.device_id, - ) - # connection cancellation is handled through events, so wait - # for things to settle down to make sure there is no pending event - await trio.testing.wait_all_tasks_blocked() - - async with bob_rpc.connect_sse_events() as sse_con: - assert sse_con.status_code == 200 - - await _do_revoke() - async with real_clock_timeout(): - while True: - try: - await sse_con.connection.receive() - except trio.EndOfChannel: - # Connection is finally closed - break - - -@pytest.mark.trio -async def test_sse_events_subscribe( - backend, alice_rpc: AuthenticatedRpcApiClient, alice2_rpc: AuthenticatedRpcApiClient -): - async with real_clock_timeout(): - async with alice_rpc.connect_sse_events() as sse_con: - assert sse_con.status_code == 200 - - # Should ignore our own events - with backend.event_bus.listen() as spy: - await authenticated_ping(alice_rpc, "event1 (ignored)") - await authenticated_ping(alice2_rpc, "event2") - - # No guarantees those events occur before the commands' return - await spy.wait_multiple_with_timeout([BackendEventPinged, BackendEventPinged]) - - last_event_id, event = await sse_con.get_next_event_and_id() - assert event == EventsListenRepOk(APIEventPinged("event2")) - - # Also test last-event-id feature: we miss those events... - await authenticated_ping(alice2_rpc, "event3") - await authenticated_ping(alice_rpc, "event4 (ignored)") - await authenticated_ping(alice2_rpc, "event5") - async with real_clock_timeout(): - async with alice_rpc.connect_sse_events(last_event_id=last_event_id) as sse_con: - async with alice_rpc.connect_sse_events( - last_event_id="" - ) as sse_con_bad_last_event_id: - assert sse_con.status_code == 200 - assert sse_con_bad_last_event_id.status_code == 200 - - event = await sse_con.get_next_event() - assert event == EventsListenRepOk(APIEventPinged("event3")) - - await authenticated_ping(alice2_rpc, "event6") - - event = await sse_con.get_next_event() - assert event == EventsListenRepOk(APIEventPinged("event5")) - - event = await sse_con.get_next_event() - assert event == EventsListenRepOk(APIEventPinged("event6")) - - # If an unknown event is provided, we get notified about it... - with pytest.raises(RuntimeError) as exc: - await sse_con_bad_last_event_id.get_next_event() - assert str(exc.value) == "missed events !" - - # ...and then can read the events since we arrived normally - event = await sse_con_bad_last_event_id.get_next_event() - assert event == EventsListenRepOk(APIEventPinged("event6")) - - -@pytest.mark.trio -async def test_sse_events_bad_auth(alice_rpc: AuthenticatedRpcApiClient): - async with real_clock_timeout(): - - def _before_send_hook(args): - args["headers"]["Signature"] = "AAAA" - - async with alice_rpc.connect_sse_events(before_send_hook=_before_send_hook) as sse_con: - response = await sse_con.connection.as_response() - assert response.status_code == 401 - - -@pytest.mark.trio -async def test_sse_events_bad_accept_type(alice_rpc: AuthenticatedRpcApiClient): - async with real_clock_timeout(): - - def _before_send_hook(args): - args["headers"]["Accept"] = "application/json" - - async with alice_rpc.connect_sse_events(before_send_hook=_before_send_hook) as sse_con: - response = await sse_con.connection.as_response() - assert response.status_code == 406 - - -@pytest.mark.trio -@pytest.mark.postgresql -async def test_sse_cross_backend_event(backend_factory, alice, bob): - async with backend_factory() as backend_1, backend_factory(populated=False) as backend_2: - app_1 = app_factory(backend_1) - app_2 = app_factory(backend_2) - - bob_rpc = AuthenticatedRpcApiClient(app_1.test_client(), bob) - alice_rpc = AuthenticatedRpcApiClient(app_2.test_client(), alice) - - async with real_clock_timeout(): - async with alice_rpc.connect_sse_events() as sse_con: - await authenticated_ping(bob_rpc, ping="1") - - event = await sse_con.get_next_event() - assert event == EventsListenRepOk(APIEventPinged("1")) - assert sse_con.status_code == 200 - - await authenticated_ping(bob_rpc, ping="2") - await authenticated_ping(bob_rpc, ping="3") - - event = await sse_con.get_next_event() - assert event == EventsListenRepOk(APIEventPinged("2")) - event = await sse_con.get_next_event() - assert event == EventsListenRepOk(APIEventPinged("3")) - - -@pytest.mark.trio -async def test_sse_events_close_connection_on_back_pressure( - monkeypatch, backend, alice_rpc: AuthenticatedRpcApiClient, alice, bob -): - # The channel has a queue of size 1, meaning it will be filled after a single command - monkeypatch.setattr("parsec.backend.client_context.AUTHENTICATED_CLIENT_CHANNEL_SIZE", 1) - # `alice_rpc` fixture lazily initiate connection with the server, hence the - # monkeypatch of the queue size will be taken into account when creating client context - - async with real_clock_timeout(): - async with alice_rpc.connect_sse_events() as sse_con: - # In SSE, our code pops the events from the client context without waiting - # peer acknowledgement. Hence the TCP layer must saturate first before - # the client context has to actually piled up events in it queue. - # So here we use directly the event bus to send events synchronously, - # hence having the events pile up without a chance for the coroutine running - # the SSE handler to pop them. - # But there is a trick on top of that ! Trio event queue first looks for - # tasks to wakeup before actually queuing the event. So under certain - # concurrency 2 events is not enough (1st event triggers the wakeup for the - # SSE task, 2nd event gets queued), hence we dispatch 3 events here ! - backend.event_bus.send( - BackendEventPinged, - event_id="1", - payload=BackendEventPinged( - organization_id=alice.organization_id, - author=bob.device_id, - ping="1", - ), - ) - backend.event_bus.send( - BackendEventPinged, - event_id="2", - payload=BackendEventPinged( - organization_id=alice.organization_id, - author=bob.device_id, - ping="2", - ), - ) - backend.event_bus.send( - BackendEventPinged, - event_id="3", - payload=BackendEventPinged( - organization_id=alice.organization_id, - author=bob.device_id, - ping="3", - ), - ) - - # The connection simply gets closed without error status given nothing wrong - # occurred in practice - response = await sse_con.connection.as_response() - # Status code is 200 given it was provided with the very first event (and at - # that time the server didn't know the client will become non-responsive !) - assert response.status_code == 200 - # Note we don't check the response's body, this is because it is possible we - # receive some events before the connection is actually closed, typically the - # SSE code was waiting on the event memory channel so it receives one event, - # then gets the Cancelled exception next await. - - -@pytest.mark.trio -async def test_sse_events_keepalive(frozen_clock, alice_rpc: AuthenticatedRpcApiClient): - async with real_clock_timeout(): - async with alice_rpc.connect_sse_events() as sse_con: - for _ in range(3): - await frozen_clock.sleep_with_autojump(31) - raw = await sse_con.connection.receive() - assert raw == b":keepalive\n\n" diff --git a/server/tests/backend/test_http.py b/server/tests/backend/test_http.py deleted file mode 100644 index 0edcae5ba6a..00000000000 --- a/server/tests/backend/test_http.py +++ /dev/null @@ -1,239 +0,0 @@ -# Parsec Cloud (https://parsec.cloud) Copyright (c) BUSL-1.1 2016-present Scille SAS -from __future__ import annotations - -from urllib.request import HTTPError, Request, urlopen - -import pytest -import trio - -from parsec import __version__ as parsec_version -from parsec._parsec import BackendInvitationAddr, InvitationType -from parsec.api.protocol import InvitationToken, OrganizationID -from parsec.backend.asgi import MAX_CONTENT_LENGTH, serve_backend_with_asgi -from tests.common import customize_fixtures - - -async def _do_test_redirect(client): - # No redirection header. shouldn't redirect. - rep = await client.get("/test") - assert rep.status == "404 NOT FOUND" - - # Incorrect redirection header with good redirection protocol. shouldn't redirect. - rep = await client.get("/test", headers={"X-Forwa-P": "https"}) # cspell: ignore Forwa - assert rep.status == "404 NOT FOUND" - - # Correct header redirection but not same redirection protocol. should redirect. - rep = await client.get("/test", headers={"X-Forwarded-Proto": "42"}) - # Only non-ssl request should lead to redirection - assert rep.status == "301 MOVED PERMANENTLY" - - # Make sure header key is case insensitive... - rep = await client.get("/", headers={"x-forwarded-proto": "https"}) - assert rep.status == "200 OK" - - # ...but header value is not ! - rep = await client.get("/test", headers={"x-forwarded-proto": "HTTPS"}) - # Only non-ssl request should lead to redirection - assert rep.status == "301 MOVED PERMANENTLY" - - # Correct header and redirection protocol, no redirection. - rep = await client.get("/test", headers={"X-Forwarded-Proto": "https"}) - assert rep.status == "404 NOT FOUND" - - # Correct header and redirection protocol, no redirection. - # Root path actually return the index page of parsec so status 200 for this one. - rep = await client.get("/", headers={"X-Forwarded-Proto": "https"}) - assert rep.status == "200 OK" - - -@customize_fixtures(backend_forward_proto_enforce_https=("x-forwarded-proto", "https")) -@pytest.mark.trio -async def test_redirect_proxy(backend_asgi_app): - client = backend_asgi_app.test_client() - await _do_test_redirect(client) - - -@customize_fixtures(backend_forward_proto_enforce_https=("x-forwarded-proto", "https")) -@customize_fixtures(backend_over_ssl=True) -@pytest.mark.trio -async def test_forward_proto_enforce_https(backend_asgi_app): - client = backend_asgi_app.test_client() - await _do_test_redirect(client) - - -@pytest.mark.trio -@pytest.mark.parametrize("mode", ("prod", "debug")) -async def test_server_header_in_debug(backend_factory, mode): - if mode == "debug": - config = {"debug": True} - expected_server_header = f"parsec/{parsec_version}" - else: - assert mode == "prod" - config = {"debug": False} - expected_server_header = f"parsec" - - async with backend_factory(populated=False, config=config) as backend: - async with trio.open_nursery() as nursery: - binds = await nursery.start(serve_backend_with_asgi, backend, "127.0.0.1", 0) - baseurl = binds[0] - - for endpoint, method, expected_status in [ - ("/", "GET", 200), - ("/", "HEAD", 200), - ("/dummy", "GET", 404), - ("/", "POST", 405), - ]: - req = Request(url=f"{baseurl}{endpoint}", method=method) - try: - rep = await trio.to_thread.run_sync(urlopen, req) - except HTTPError as exc: - rep = exc - assert rep.status == expected_status - assert rep.headers["server"] == expected_server_header - - nursery.cancel_scope.cancel() - - -@pytest.mark.trio -async def test_get_404(backend_asgi_app): - client = backend_asgi_app.test_client() - rep = await client.get("/dummy") - assert rep.status == "404 NOT FOUND" - assert rep.headers["content-type"] == "text/html; charset=utf-8" - assert await rep.get_data() - - -@pytest.mark.trio -async def test_unexpected_exception_get_500(backend_asgi_app, monkeypatch, caplog): - class MyUnexpectedException(Exception): - pass - - async def _patched_render_template(*args, **kwargs): - raise MyUnexpectedException("Unexpected error !") - - monkeypatch.setattr("parsec.backend.asgi.render_template", _patched_render_template) - client = backend_asgi_app.test_client() - rep = await client.get("/dummy") - assert rep.status == "500 INTERNAL SERVER ERROR" - - # ASGI app also report the crash in the log - caplog.assert_occurred_once("Exception on request GET /dummy") - caplog.clear() - - -@pytest.mark.trio -async def test_get_root(backend_asgi_app): - client = backend_asgi_app.test_client() - rep = await client.get("/") - assert rep.status == "200 OK" - assert rep.headers["content-type"] == "text/html; charset=utf-8" - assert await rep.get_data() - - -@pytest.mark.trio -async def test_get_static(backend_asgi_app): - client = backend_asgi_app.test_client() - - # Get resource - rep = await client.get("/static/favicon.ico") - assert rep.status == "200 OK" - # rep.Oddly enough, Windows considers .ico to be `image/x-icon` while IANA says `image/vnd.microsoft.icon` - assert rep.headers["content-type"] in ("image/vnd.microsoft.icon", "image/x-icon") - assert await rep.get_data() - - # Also test resource in a subfolder - rep = await client.get("/static/base.css") - assert rep.status == "200 OK" - assert rep.headers["content-type"] == "text/css; charset=utf-8" - assert await rep.get_data() - - # Finally test non-existing resource - rep = await client.get("/static/dummy") - assert rep.status == "404 NOT FOUND" - - # Prevent from leaving the static directory - rep = await client.get("/static/../__init__.py") - assert rep.status == "404 NOT FOUND" - - -@pytest.mark.trio -async def test_get_redirect(backend_asgi_app, backend_addr): - client = backend_asgi_app.test_client() - - rep = await client.get("/redirect/foo/bar?a=1&b=2") - assert rep.status == "302 FOUND" - assert rep.headers["location"] == f"parsec://{backend_addr.netloc}/foo/bar?a=1&b=2&no_ssl=true" - - -@pytest.mark.trio -@customize_fixtures(backend_over_ssl=True) -async def test_get_redirect_over_ssl(backend_asgi_app, backend_addr): - client = backend_asgi_app.test_client() - - rep = await client.get("/redirect/foo/bar?a=1&b=2") - assert rep.status == "302 FOUND" - assert rep.headers["location"] == f"parsec://{backend_addr.netloc}/foo/bar?a=1&b=2" - - -@pytest.mark.trio -async def test_get_redirect_no_ssl_param_overwritten(backend_asgi_app, backend_addr): - client = backend_asgi_app.test_client() - - rep = await client.get("/redirect/spam?no_ssl=false&a=1&b=2") - assert rep.status == "302 FOUND" - assert rep.headers["location"] == f"parsec://{backend_addr.netloc}/spam?a=1&b=2&no_ssl=true" - - -@pytest.mark.trio -@customize_fixtures(backend_over_ssl=True) -async def test_get_redirect_no_ssl_param_overwritten_with_ssl_enabled( - backend_asgi_app, backend_addr -): - client = backend_asgi_app.test_client() - - rep = await client.get(f"/redirect/spam?a=1&b=2&no_ssl=true") - assert rep.status == "302 FOUND" - assert rep.headers["location"] == f"parsec://{backend_addr.netloc}/spam?a=1&b=2" - - -@pytest.mark.trio -async def test_get_redirect_invitation(backend_asgi_app, backend_addr): - client = backend_asgi_app.test_client() - - invitation_addr = BackendInvitationAddr.build( - backend_addr=backend_addr, - organization_id=OrganizationID("Org"), - invitation_type=InvitationType.USER, - token=InvitationToken.new(), - ) - # TODO: should use invitation_addr.to_redirection_url() when available ! - *_, target = invitation_addr.to_url().split("/") - rep = await client.get(f"/redirect/{target}") - assert rep.status == "302 FOUND" - location_addr = BackendInvitationAddr.from_url(rep.headers["location"]) - assert location_addr == invitation_addr - - -@pytest.mark.trio -@customize_fixtures(backend_over_ssl=True) -async def test_get_redirect_invitation_over_ssl(backend_asgi_app, backend_addr): - await test_get_redirect_invitation(backend_asgi_app, backend_addr) - - -@pytest.mark.trio -async def test_content_is_too_big(backend_asgi_app): - client = backend_asgi_app.test_client() - - max_length_content = b"x" * MAX_CONTENT_LENGTH - headers = {"Authorization": f"Bearer {backend_asgi_app.backend.config.administration_token}"} - - response = await client.post( - "/administration/organizations", headers=headers, data=max_length_content + b"y" - ) - assert response.status == "413 REQUEST ENTITY TOO LARGE" - - # Make sure max length is ok - response = await client.post( - "/administration/organizations", headers=headers, data=max_length_content - ) - assert response.status == "400 BAD REQUEST" diff --git a/server/tests/backend/test_logger.py b/server/tests/backend/test_logger.py deleted file mode 100644 index 569891222b9..00000000000 --- a/server/tests/backend/test_logger.py +++ /dev/null @@ -1,55 +0,0 @@ -# Parsec Cloud (https://parsec.cloud) Copyright (c) BUSL-1.1 2016-present Scille SAS -from __future__ import annotations - -import base64 -from typing import Optional - -from hypercorn.config import Config as HyperConfig -from hypercorn.typing import HTTPScope, ResponseSummary - -from parsec.backend.asgi.logger import ParsecLogger - - -def _create_http_scope(author: Optional[bytes] = None) -> HTTPScope: - scope = HTTPScope() - scope["type"] = "http" - scope["method"] = "GET" - scope["query_string"] = b"/" - scope["path"] = "" - scope["scheme"] = "" - scope["headers"] = [(b"Author", author)] if author is not None else [] - - return scope - - -def _create_empty_response() -> ResponseSummary: - resp = ResponseSummary() - resp["status"] = 200 - return resp - - -def test_base64_author(): - logger = ParsecLogger(HyperConfig()) - - author_bytes = base64.b64encode(b"alice@work") - mapped = logger.atoms(_create_http_scope(author_bytes), _create_empty_response(), 0.0) - - assert "author" in mapped - assert mapped["author"] == "alice@work" - - -def test_bad_base64_author(): - logger = ParsecLogger(HyperConfig()) - - # Invalid base64 sequence - author_bytes = b"" - mapped = logger.atoms(_create_http_scope(author_bytes), _create_empty_response(), 0.0) - - assert "author" not in mapped - - -def test_no_author_header(): - logger = ParsecLogger(HyperConfig()) - mapped = logger.atoms(_create_http_scope(), _create_empty_response(), 0.0) - - assert "author" not in mapped diff --git a/server/tests/backend/test_message.py b/server/tests/backend/test_message.py deleted file mode 100644 index ae27254e5e3..00000000000 --- a/server/tests/backend/test_message.py +++ /dev/null @@ -1,89 +0,0 @@ -# Parsec Cloud (https://parsec.cloud) Copyright (c) BUSL-1.1 2016-present Scille SAS -from __future__ import annotations - -import pytest - -from parsec._parsec import ( - DateTime, - authenticated_cmds, -) -from parsec.backend.asgi import app_factory -from parsec.backend.config import PostgreSQLBlockStoreConfig -from tests.backend.common import ( - message_get, -) -from tests.common import AuthenticatedRpcApiClient, real_clock_timeout - -Message = authenticated_cmds.latest.message_get.Message -MessageGetRepOk = authenticated_cmds.latest.message_get.RepOk - - -@pytest.mark.trio -async def test_message_from_bob_to_alice(backend, alice, bob, alice_rpc): - d1 = DateTime(2000, 1, 1) - await backend.message.send( - bob.organization_id, bob.device_id, alice.user_id, d1, b"Hello from Bob !" - ) - - rep = await message_get(alice_rpc) - assert rep == MessageGetRepOk( - messages=[ - Message( - body=b"Hello from Bob !", - sender=bob.device_id, - timestamp=d1, - index=1, - certificate_index=9, - ) - ], - ) - - -@pytest.mark.trio -async def test_message_get_with_offset(backend, alice, bob, alice_rpc): - d1 = DateTime(2000, 1, 1) - d2 = DateTime(2000, 1, 2) - await backend.message.send(bob.organization_id, bob.device_id, alice.user_id, d1, b"1") - await backend.message.send(bob.organization_id, bob.device_id, alice.user_id, d1, b"2") - await backend.message.send(bob.organization_id, bob.device_id, alice.user_id, d2, b"3") - - rep = await message_get(alice_rpc, 1) - assert rep == MessageGetRepOk( - messages=[ - Message(body=b"2", sender=bob.device_id, timestamp=d1, index=2, certificate_index=9), - Message(body=b"3", sender=bob.device_id, timestamp=d2, index=3, certificate_index=9), - ], - ) - - -@pytest.mark.trio -@pytest.mark.postgresql -async def test_message_from_bob_to_alice_multi_backends( - postgresql_url, alice, bob, backend_factory, backend_authenticated_ws_factory -): - d1 = DateTime(2000, 1, 1) - async with backend_factory( - config={"blockstore_config": PostgreSQLBlockStoreConfig(), "db_url": postgresql_url} - ) as backend_1, backend_factory( - populated=False, - config={"blockstore_config": PostgreSQLBlockStoreConfig(), "db_url": postgresql_url}, - ) as backend_2: - await backend_2.message.send( - bob.organization_id, bob.device_id, alice.user_id, d1, b"Hello from Bob !" - ) - app1 = app_factory(backend_1) - alice_rpc = AuthenticatedRpcApiClient(app1.test_client(), alice) - async with real_clock_timeout(): - while True: - rep = await message_get(alice_rpc) - assert rep == MessageGetRepOk( - messages=[ - Message( - body=b"Hello from Bob !", - sender=bob.device_id, - timestamp=d1, - index=1, - certificate_index=9, - ) - ], - ) diff --git a/server/tests/backend/test_pki.py b/server/tests/backend/test_pki.py deleted file mode 100644 index acabea70a53..00000000000 --- a/server/tests/backend/test_pki.py +++ /dev/null @@ -1,717 +0,0 @@ -# Parsec Cloud (https://parsec.cloud) Copyright (c) BUSL-1.1 2016-present Scille SAS -from __future__ import annotations - -import pytest - -from parsec._parsec import ( - ActiveUsersLimit, - BackendEventPkiEnrollmentUpdated, - DateTime, - DeviceCertificate, - DeviceID, - DeviceLabel, - EnrollmentID, - HumanHandle, - PublicKey, - UserCertificate, - UserProfile, - VerifyKey, - anonymous_cmds, - authenticated_cmds, -) -from parsec.api.data import ( - PkiEnrollmentAnswerPayload, - PkiEnrollmentSubmitPayload, - RevokedUserCertificate, -) -from parsec.api.protocol import ( - PkiEnrollmentAcceptRepActiveUsersLimitReached, - PkiEnrollmentAcceptRepAlreadyExists, - PkiEnrollmentAcceptRepInvalidCertification, - PkiEnrollmentAcceptRepNoLongerAvailable, - PkiEnrollmentAcceptRepNotFound, - PkiEnrollmentAcceptRepOk, - PkiEnrollmentInfoRepNotFound, - PkiEnrollmentInfoRepOk, - PkiEnrollmentInfoStatusAccepted, - PkiEnrollmentInfoStatusCancelled, - PkiEnrollmentInfoStatusRejected, - PkiEnrollmentInfoStatusSubmitted, - PkiEnrollmentListRepOk, - PkiEnrollmentRejectRepNoLongerAvailable, - PkiEnrollmentRejectRepNotFound, - PkiEnrollmentRejectRepOk, - PkiEnrollmentSubmitRepAlreadyEnrolled, - PkiEnrollmentSubmitRepAlreadySubmitted, - PkiEnrollmentSubmitRepEmailAlreadyUsed, - PkiEnrollmentSubmitRepIdAlreadyUsed, - PkiEnrollmentSubmitRepOk, - UserRevokeRepOk, -) -from parsec.serde import packb, unpackb -from tests.backend.common import ( - events_listen, - pki_enrollment_accept, - pki_enrollment_info, - pki_enrollment_list, - pki_enrollment_reject, - pki_enrollment_submit, - user_revoke, -) -from tests.common import LocalDevice - -# Helpers - - -def _create_new_user_certificates( - author: LocalDevice, - device_label: DeviceLabel | None, - human_handle: HumanHandle | None, - profile: UserProfile, - public_key: PublicKey, - verify_key: VerifyKey, -) -> tuple[bytes, bytes, bytes, bytes, DeviceID]: - """Helper to prepare the creation of a new user.""" - device_id = DeviceID.new() - timestamp = author.timestamp() - - user_certificate = UserCertificate( - author=author.device_id, - timestamp=timestamp, - user_id=device_id.user_id, - human_handle=human_handle, - public_key=public_key, - profile=profile, - ) - redacted_user_certificate = user_certificate.evolve(human_handle=None) - - device_certificate = DeviceCertificate( - author=author.device_id, - timestamp=timestamp, - device_id=device_id, - device_label=device_label, - verify_key=verify_key, - ) - redacted_device_certificate = device_certificate.evolve(device_label=None) - - user_certificate_bytes = user_certificate.dump_and_sign(author.signing_key) - redacted_user_certificate_bytes = redacted_user_certificate.dump_and_sign(author.signing_key) - device_certificate_bytes = device_certificate.dump_and_sign(author.signing_key) - redacted_device_certificate_bytes = redacted_device_certificate.dump_and_sign( - author.signing_key - ) - - return ( - user_certificate_bytes, - redacted_user_certificate_bytes, - device_certificate_bytes, - redacted_device_certificate_bytes, - device_id, - ) - - -async def _submit_request( - anonymous_backend_ws, - bob, - certif=b"", - signature=b"", - request_id=None, - force=False, - certif_email="new_challenger@jointhebattle.com", -): - if not request_id: - request_id = EnrollmentID.new() - payload = PkiEnrollmentSubmitPayload( - verify_key=bob.verify_key, - public_key=bob.public_key, - requested_device_label=bob.device_label, - ).dump() - rep = await pki_enrollment_submit( - anonymous_backend_ws, - enrollment_id=request_id, - force=force, - submitter_der_x509_certificate=certif, - submitter_der_x509_certificate_email=certif_email, - submit_payload_signature=signature, - submit_payload=payload, - ) - assert isinstance(rep, PkiEnrollmentSubmitRepOk) - - -def _prepare_accept_reply(admin, invitee): - ( - user_certificate, - redacted_user_certificate, - device_certificate, - redacted_device_certificate, - user_confirmation_device_id, - ) = _create_new_user_certificates( - admin, - invitee.device_label, - invitee.human_handle, - UserProfile.STANDARD, - admin.public_key, - admin.verify_key, - ) - payload = PkiEnrollmentAnswerPayload( - device_id=invitee.device_id, - device_label=invitee.device_label, - human_handle=invitee.human_handle, - profile=UserProfile.STANDARD, - root_verify_key=admin.root_verify_key, - ).dump() - kwargs = { - "accepter_der_x509_certificate": b"", - "accept_payload_signature": b"", - "accept_payload": payload, - "user_certificate": user_certificate, - "device_certificate": device_certificate, - "redacted_user_certificate": redacted_user_certificate, - "redacted_device_certificate": redacted_device_certificate, - } - - return (user_confirmation_device_id, kwargs) - - -# Test pki_enrollment_submit - - -@pytest.mark.trio -async def test_pki_submit(backend, anonymous_rpc, bob, alice_rpc): - payload = PkiEnrollmentSubmitPayload( - verify_key=bob.verify_key, - public_key=bob.public_key, - requested_device_label=bob.device_label, - ).dump() - - async with events_listen(alice_rpc) as alice_events_listener: - with backend.event_bus.listen() as spy: - rep = await pki_enrollment_submit( - anonymous_rpc, - enrollment_id=EnrollmentID.new(), - force=False, - submitter_der_x509_certificate=b"", - submitter_der_x509_certificate_email="new_challenger@jointhebattle.com", - submit_payload_signature=b"", - submit_payload=payload, - ) - assert isinstance(rep, PkiEnrollmentSubmitRepOk) - await spy.wait_with_timeout(BackendEventPkiEnrollmentUpdated) - - assert ( - await alice_events_listener.do_recv() - == authenticated_cmds.latest.events_listen.RepOk( - authenticated_cmds.latest.events_listen.APIEventPkiEnrollmentUpdated() - ) - ) - - # Retry without force - - with backend.event_bus.listen() as spy: - rep = await pki_enrollment_submit( - anonymous_rpc, - enrollment_id=EnrollmentID.new(), - force=False, - submitter_der_x509_certificate=b"", - submitter_der_x509_certificate_email="new_challenger@jointhebattle.com", - submit_payload_signature=b"", - submit_payload=payload, - ) - - assert isinstance(rep, PkiEnrollmentSubmitRepAlreadySubmitted) - assert rep.submitted_on is not None - assert not spy.events - - # Retry with force - - with backend.event_bus.listen() as spy: - rep = await pki_enrollment_submit( - anonymous_rpc, - enrollment_id=EnrollmentID.new(), - force=True, - submitter_der_x509_certificate=b"", - submitter_der_x509_certificate_email="new_challenger@jointhebattle.com", - submit_payload_signature=b"", - submit_payload=payload, - ) - assert isinstance(rep, PkiEnrollmentSubmitRepOk) - await spy.wait_with_timeout(BackendEventPkiEnrollmentUpdated) - assert await apiv2v3_events_listen_nowait(alice_rpc) == ApiV2V3_EventsListenRepOk( - ApiV2V3_APIEventPkiEnrollmentUpdated() - ) - - -@pytest.mark.trio -async def test_pki_submit_same_id(anonymous_backend_ws, bob): - payload = PkiEnrollmentSubmitPayload( - verify_key=bob.verify_key, - public_key=bob.public_key, - requested_device_label=bob.device_label, - ).dump() - enrollment_id = EnrollmentID.new() - - rep = await pki_enrollment_submit( - anonymous_backend_ws, - enrollment_id=enrollment_id, - force=False, - submitter_der_x509_certificate=b"", - submitter_der_x509_certificate_email="new_challenger@jointhebattle.com", - submit_payload_signature=b"", - submit_payload=payload, - ) - assert isinstance(rep, PkiEnrollmentSubmitRepOk) - - # Same enrollment ID without Force - rep = await pki_enrollment_submit( - anonymous_backend_ws, - enrollment_id=enrollment_id, - force=False, - submitter_der_x509_certificate=b"", - submitter_der_x509_certificate_email="new_challenger@jointhebattle.com", - submit_payload_signature=b"", - submit_payload=payload, - ) - assert isinstance(rep, PkiEnrollmentSubmitRepIdAlreadyUsed) - - # Same enrollment ID with Force - rep = await pki_enrollment_submit( - anonymous_backend_ws, - enrollment_id=enrollment_id, - force=True, - submitter_der_x509_certificate=b"", - submitter_der_x509_certificate_email="new_challenger@jointhebattle.com", - submit_payload_signature=b"", - submit_payload=payload, - ) - assert isinstance(rep, PkiEnrollmentSubmitRepIdAlreadyUsed) - - -@pytest.mark.trio -async def test_pki_submit_already_used_email(anonymous_backend_ws, bob): - payload = PkiEnrollmentSubmitPayload( - verify_key=bob.verify_key, - public_key=bob.public_key, - requested_device_label=bob.device_label, - ).dump() - enrollment_id = EnrollmentID.new() - - rep = await pki_enrollment_submit( - anonymous_backend_ws, - enrollment_id=enrollment_id, - force=False, - submitter_der_x509_certificate=b"", - submitter_der_x509_certificate_email=bob.human_handle.email, # bob user with this email already exist - submit_payload_signature=b"", - submit_payload=payload, - ) - assert isinstance(rep, PkiEnrollmentSubmitRepEmailAlreadyUsed) - - -@pytest.mark.xfail(reason="TODO: investigate me !") -@pytest.mark.trio -async def test_pki_submit_no_email_provided(anonymous_rpc, bob): - # Test backend compatibility with core version < 2.8.3 that does not provide an email address field - payload = PkiEnrollmentSubmitPayload( - verify_key=bob.verify_key, - public_key=bob.public_key, - requested_device_label=bob.device_label, - ).dump() - enrollment_id = EnrollmentID.new() - - req = anonymous_cmds.v2.pki_enrollment_submit.Req( - enrollment_id=enrollment_id, - force=False, - submitter_der_x509_certificate=b"", - submitter_der_x509_certificate_email="removed.in@post.processing", - submit_payload_signature=b"", - submit_payload=payload, - ) - req = unpackb(req.dump()) - req.pop("submitter_der_x509_certificate_email") - raw_req = packb(req) - - raw_rep = await anonymous_rpc.send(raw_req, extra_headers={"Api-Version": "2.0"}) - rep = anonymous_cmds.v2.pki_enrollment_submit.Rep.load(raw_rep) - assert isinstance(rep, PkiEnrollmentSubmitRepOk) - - -# Test pki_enrollment_list - - -@pytest.mark.trio -async def test_pki_list(anonymous_backend_ws, bob, adam, alice_ws): - ref_time = DateTime.now() - bob_certif = b"" - bob_request_id = EnrollmentID.new() - bob_certif_signature = b"" - - await _submit_request( - anonymous_backend_ws, bob, bob_certif, bob_certif_signature, bob_request_id - ) - - rep = await pki_enrollment_list(alice_ws) - - assert isinstance(rep, PkiEnrollmentListRepOk) - assert len(rep.enrollments) == 1 - - submitted_request = rep.enrollments[0] - assert submitted_request.enrollment_id == bob_request_id - assert submitted_request.submitter_der_x509_certificate == bob_certif - assert submitted_request.submit_payload_signature == bob_certif_signature - # In theory we should have submitted_on > ref_time, but clock resolution on Windows is poor - assert submitted_request.submitted_on >= ref_time - - submitted_payload = PkiEnrollmentSubmitPayload.load(submitted_request.submit_payload) - assert submitted_payload.verify_key == bob.verify_key - assert submitted_payload.public_key == bob.public_key - assert submitted_payload.requested_device_label == bob.device_label - - # Add another user - - await _submit_request( - anonymous_backend_ws, adam, b"", b"", EnrollmentID.new() - ) - rep = await pki_enrollment_list(alice_ws) - - assert isinstance(rep, PkiEnrollmentListRepOk) - assert len(rep.enrollments) == 2 - - -@pytest.mark.trio -async def test_pki_list_empty(alice_ws): - rep = await pki_enrollment_list(alice_ws) - assert isinstance(rep, PkiEnrollmentListRepOk) - assert rep.enrollments == [] - - -# Test pki_enrollment_accept - - -@pytest.mark.trio -async def test_pki_accept(backend, anonymous_backend_ws, mallory, alice, alice_ws): - await apiv2v3_events_subscribe(alice_ws) - - # Assert mallory does not exist - rep = await backend.user.find_humans( - organization_id=alice.organization_id, query=mallory.human_handle.email - ) - assert rep == ([], 0) - - # Create request - request_id = EnrollmentID.new() - await _submit_request(anonymous_backend_ws, mallory, request_id=request_id) - - # Send reply - with backend.event_bus.listen() as spy: - user_confirmation_device_id, kwargs = _prepare_accept_reply(admin=alice, invitee=mallory) - rep = await pki_enrollment_accept(alice_ws, enrollment_id=request_id, **kwargs) - assert isinstance(rep, PkiEnrollmentAcceptRepOk) - await spy.wait_with_timeout(BackendEventPkiEnrollmentUpdated) - assert await apiv2v3_events_listen_nowait(alice_ws) == ApiV2V3_EventsListenRepOk( - ApiV2V3_APIEventPkiEnrollmentUpdated() - ) - - # Assert user has been created - rep = await backend.user.find_humans( - organization_id=alice.organization_id, query=mallory.human_handle.email - ) - assert rep[1] == 1 - rep_human_handle = rep[0][0] - assert not rep_human_handle.revoked - assert rep_human_handle.user_id == user_confirmation_device_id.user_id - assert rep_human_handle.human_handle == mallory.human_handle - - # Send reply twice - _, kwargs = _prepare_accept_reply(admin=alice, invitee=mallory) - with backend.event_bus.listen() as spy: - rep = await pki_enrollment_accept(alice_ws, enrollment_id=request_id, **kwargs) - assert isinstance(rep, PkiEnrollmentAcceptRepNoLongerAvailable) - assert not spy.events - - -@pytest.mark.trio -async def test_pki_accept_not_found(mallory, alice, alice_ws, backend): - request_id = EnrollmentID.new() - - _, kwargs = _prepare_accept_reply(admin=alice, invitee=mallory) - rep = await pki_enrollment_accept(alice_ws, enrollment_id=request_id, **kwargs) - assert isinstance(rep, PkiEnrollmentAcceptRepNotFound) - - rep = await backend.user.find_humans( - organization_id=alice.organization_id, query=mallory.human_handle.email - ) - assert rep == ([], 0) - - -@pytest.mark.trio -async def test_pki_accept_invalid_certificate(mallory, alice, alice_ws, backend): - request_id = EnrollmentID.new() - - # Create certificate with mallory user instead of alice - _, kwargs = _prepare_accept_reply(admin=mallory, invitee=mallory) - rep = await pki_enrollment_accept(alice_ws, enrollment_id=request_id, **kwargs) - assert isinstance(rep, PkiEnrollmentAcceptRepInvalidCertification) - - rep = await backend.user.find_humans( - organization_id=alice.organization_id, query=mallory.human_handle.email - ) - assert rep == ([], 0) - - -@pytest.mark.trio -async def test_pki_accept_outdated_submit(anonymous_backend_ws, mallory, alice, alice_ws, backend): - # First request - request_id = EnrollmentID.new() - await _submit_request(anonymous_backend_ws, mallory, request_id=request_id) - # Second request - await _submit_request(anonymous_backend_ws, mallory, force=True) - - _, kwargs = _prepare_accept_reply(admin=alice, invitee=mallory) - rep = await pki_enrollment_accept(alice_ws, enrollment_id=request_id, **kwargs) - assert isinstance(rep, PkiEnrollmentAcceptRepNoLongerAvailable) - - rep = await backend.user.find_humans( - organization_id=alice.organization_id, query=mallory.human_handle.email - ) - assert rep == ([], 0) - - -@pytest.mark.trio -async def test_pki_accept_user_already_exist(anonymous_backend_ws, bob, alice, alice_ws): - request_id = EnrollmentID.new() - await _submit_request(anonymous_backend_ws, bob, request_id=request_id) - - _, kwargs = _prepare_accept_reply(admin=alice, invitee=bob) - rep = await pki_enrollment_accept(alice_ws, enrollment_id=request_id, **kwargs) - assert isinstance(rep, PkiEnrollmentAcceptRepAlreadyExists) - - # Revoke user - now = DateTime.now() - bob_revocation = RevokedUserCertificate( - author=alice.device_id, timestamp=now, user_id=bob.user_id - ).dump_and_sign(alice.signing_key) - - rep = await user_revoke(alice_ws, revoked_user_certificate=bob_revocation) - assert isinstance(rep, UserRevokeRepOk) - - # Accept revoked user - rep = await pki_enrollment_accept(alice_ws, enrollment_id=request_id, **kwargs) - assert isinstance(rep, PkiEnrollmentAcceptRepOk) - - -@pytest.mark.trio -async def test_pki_accept_limit_reached(backend, anonymous_backend_ws, mallory, alice, alice_ws): - # Change organization settings - await backend.organization.update( - alice.organization_id, is_expired=False, active_users_limit=ActiveUsersLimit.LimitedTo(1) - ) - request_id = EnrollmentID.new() - await _submit_request(anonymous_backend_ws, mallory, request_id=request_id) - - _, kwargs = _prepare_accept_reply(admin=alice, invitee=mallory) - rep = await pki_enrollment_accept(alice_ws, enrollment_id=request_id, **kwargs) - - assert isinstance(rep, PkiEnrollmentAcceptRepActiveUsersLimitReached) - - -@pytest.mark.trio -async def test_pki_accept_already_rejected(backend, anonymous_backend_ws, mallory, alice, alice_ws): - request_id = EnrollmentID.new() - await _submit_request(anonymous_backend_ws, mallory, request_id=request_id) - - # Reject - rep = await pki_enrollment_reject(alice_ws, enrollment_id=request_id) - assert isinstance(rep, PkiEnrollmentRejectRepOk) - - _, kwargs = _prepare_accept_reply(admin=alice, invitee=mallory) - rep = await pki_enrollment_accept(alice_ws, enrollment_id=request_id, **kwargs) - assert isinstance(rep, PkiEnrollmentAcceptRepNoLongerAvailable) - - -# TODO: test_pki_accept_limit_expired ?? - -# Test pki_enrollment_reject - - -@pytest.mark.trio -async def test_pki_reject(backend, anonymous_backend_ws, mallory, alice_ws): - await apiv2v3_events_subscribe(alice_ws) - - # Create request - request_id = EnrollmentID.new() - await _submit_request(anonymous_backend_ws, mallory, request_id=request_id) - - with backend.event_bus.listen() as spy: - rep = await pki_enrollment_reject(alice_ws, enrollment_id=request_id) - assert isinstance(rep, PkiEnrollmentRejectRepOk) - await spy.wait_with_timeout(BackendEventPkiEnrollmentUpdated) - assert await apiv2v3_events_listen_nowait(alice_ws) == ApiV2V3_EventsListenRepOk( - ApiV2V3_APIEventPkiEnrollmentUpdated() - ) - - # Reject twice - with backend.event_bus.listen() as spy: - rep = await pki_enrollment_reject(alice_ws, enrollment_id=request_id) - assert isinstance(rep, PkiEnrollmentRejectRepNoLongerAvailable) - assert not spy.events - - -@pytest.mark.trio -async def test_pki_reject_not_found(alice_ws): - request_id = EnrollmentID.new() - - rep = await pki_enrollment_reject(alice_ws, enrollment_id=request_id) - assert isinstance(rep, PkiEnrollmentRejectRepNotFound) - - -@pytest.mark.trio -async def test_pki_reject_already_accepted(anonymous_backend_ws, mallory, alice, alice_ws): - request_id = EnrollmentID.new() - await _submit_request(anonymous_backend_ws, mallory, request_id=request_id) - # Accept request - _, kwargs = _prepare_accept_reply(admin=alice, invitee=mallory) - rep = await pki_enrollment_accept(alice_ws, enrollment_id=request_id, **kwargs) - assert isinstance(rep, PkiEnrollmentAcceptRepOk) - - # Reject accepted request - rep = await pki_enrollment_reject(alice_ws, enrollment_id=request_id) - assert isinstance(rep, PkiEnrollmentRejectRepNoLongerAvailable) - - -@pytest.mark.trio -async def test_pki_submit_already_accepted(anonymous_backend_ws, mallory, alice, alice_ws, backend): - request_id = EnrollmentID.new() - await _submit_request(anonymous_backend_ws, mallory, request_id=request_id) - - user_confirmation_device_id, kwargs = _prepare_accept_reply(admin=alice, invitee=mallory) - rep = await pki_enrollment_accept(alice_ws, enrollment_id=request_id, **kwargs) - assert isinstance(rep, PkiEnrollmentAcceptRepOk) - - # Pki enrollment is accepted and user not revoked - payload = PkiEnrollmentSubmitPayload( - verify_key=mallory.verify_key, - public_key=mallory.public_key, - requested_device_label=mallory.device_label, - ).dump() - rep = await pki_enrollment_submit( - anonymous_backend_ws, - enrollment_id=EnrollmentID.new(), - force=False, - submitter_der_x509_certificate=b"", - submitter_der_x509_certificate_email="new_challenger@jointhebattle.com", - submit_payload_signature=b"", - submit_payload=payload, - ) - assert isinstance(rep, PkiEnrollmentSubmitRepAlreadyEnrolled) - - # Revoke user - now = DateTime.now() - revocation = RevokedUserCertificate( - author=alice.device_id, timestamp=now, user_id=user_confirmation_device_id.user_id - ).dump_and_sign(alice.signing_key) - - rep = await user_revoke(alice_ws, revoked_user_certificate=revocation) - assert isinstance(rep, UserRevokeRepOk) - - # Pki enrollment is accepted and user revoked - rep = await pki_enrollment_submit( - anonymous_backend_ws, - enrollment_id=EnrollmentID.new(), - force=False, - submitter_der_x509_certificate=b"", - submitter_der_x509_certificate_email="new_challenger@jointhebattle.com", - submit_payload_signature=b"", - submit_payload=payload, - ) - - assert isinstance(rep, PkiEnrollmentSubmitRepOk) - - -# Test pki_enrollment_info - - -@pytest.mark.trio -async def test_pki_info(anonymous_backend_ws, mallory, alice, alice_ws): - request_id = EnrollmentID.new() - - # Request not found - rep = await pki_enrollment_info(anonymous_backend_ws, request_id) - assert isinstance(rep, PkiEnrollmentInfoRepNotFound) - - # Request submitted - await _submit_request(anonymous_backend_ws, mallory, request_id=request_id) - rep = await pki_enrollment_info(anonymous_backend_ws, request_id) - assert isinstance(rep, PkiEnrollmentInfoRepOk) - assert isinstance(rep.unit, PkiEnrollmentInfoStatusSubmitted) - - # Request cancelled - new_request_id = EnrollmentID.new() - await _submit_request(anonymous_backend_ws, mallory, request_id=new_request_id, force=True) - rep = await pki_enrollment_info(anonymous_backend_ws, request_id) - assert isinstance(rep, PkiEnrollmentInfoRepOk) - assert isinstance(rep.unit, PkiEnrollmentInfoStatusCancelled) - - -@pytest.mark.trio -async def test_pki_info_accepted(anonymous_backend_ws, mallory, alice, alice_ws): - request_id = EnrollmentID.new() - await _submit_request(anonymous_backend_ws, mallory, request_id=request_id) - - _, kwargs = _prepare_accept_reply(admin=alice, invitee=mallory) - rep = await pki_enrollment_accept(alice_ws, enrollment_id=request_id, **kwargs) - assert isinstance(rep, PkiEnrollmentAcceptRepOk) - - rep = await pki_enrollment_info(anonymous_backend_ws, request_id) - assert isinstance(rep, PkiEnrollmentInfoRepOk) - assert isinstance(rep.unit, PkiEnrollmentInfoStatusAccepted) - - -@pytest.mark.trio -async def test_pki_info_rejected(anonymous_backend_ws, mallory, alice_ws): - request_id = EnrollmentID.new() - await _submit_request(anonymous_backend_ws, mallory, request_id=request_id) - - rep = await pki_enrollment_reject(alice_ws, enrollment_id=request_id) - assert isinstance(rep, PkiEnrollmentRejectRepOk) - - rep = await pki_enrollment_info(anonymous_backend_ws, request_id) - assert isinstance(rep, PkiEnrollmentInfoRepOk) - assert isinstance(rep.unit, PkiEnrollmentInfoStatusRejected) - - -@pytest.mark.trio -async def test_pki_complete_sequence(anonymous_backend_ws, mallory, alice_ws, alice): - async def _cancel(): - await _submit_request(anonymous_backend_ws, mallory, force=True) - # Create more than once cancel request - await _submit_request(anonymous_backend_ws, mallory, force=True) - - async def _reject(): - request_id = EnrollmentID.new() - await _submit_request(anonymous_backend_ws, mallory, request_id=request_id, force=True) - rep = await pki_enrollment_reject(alice_ws, enrollment_id=request_id) - assert isinstance(rep, PkiEnrollmentRejectRepOk) - - async def _accept(): - request_id = EnrollmentID.new() - await _submit_request(anonymous_backend_ws, mallory, request_id=request_id, force=True) - user_confirmation_device_id, kwargs = _prepare_accept_reply(admin=alice, invitee=mallory) - rep = await pki_enrollment_accept(alice_ws, enrollment_id=request_id, **kwargs) - assert isinstance(rep, PkiEnrollmentAcceptRepOk) - return user_confirmation_device_id.user_id - - async def _revoke(user_id): - now = DateTime.now() - revocation = RevokedUserCertificate( - author=alice.device_id, timestamp=now, user_id=user_id - ).dump_and_sign(alice.signing_key) - - rep = await user_revoke(alice_ws, revoked_user_certificate=revocation) - assert isinstance(rep, UserRevokeRepOk) - - for _ in range(2): - await _cancel() - await _reject() - await _cancel() - user_id = await _accept() - await _revoke(user_id) diff --git a/server/tests/backend/test_postgres_concurrency.py b/server/tests/backend/test_postgres_concurrency.py deleted file mode 100644 index 70a7b7085ef..00000000000 --- a/server/tests/backend/test_postgres_concurrency.py +++ /dev/null @@ -1,278 +0,0 @@ -# Parsec Cloud (https://parsec.cloud) Copyright (c) BUSL-1.1 2016-present Scille SAS -from __future__ import annotations - -from contextlib import contextmanager -from unittest.mock import patch - -import pytest -import trio -import triopg - -from parsec._parsec import ActiveUsersLimit, DateTime, EnrollmentID -from parsec.backend.organization import OrganizationAlreadyBootstrappedError -from parsec.backend.pki import PkiEnrollmentNoLongerAvailableError -from parsec.backend.user import UserActiveUsersLimitReached, UserAlreadyExistsError -from tests.common import local_device_to_backend_user - -# Testing concurrency interactions is hard given it involve precise timing -# (otherwise the test appear to be concurrent, but the queries are in fact -# executed one after another...) -# The solution we choose here is to send multiple concurrent queries and -# try to ensure they are reaching the sensitive part roughly at the same -# time. -# Of course this is far from perfect (and produce non-reproductible errors...) -# but better than nothing ;-) - - -@contextmanager -def ensure_pg_transaction_concurrency_barrier(concurrency: int = 2): - # In theory we would want to plug into all triopg connection methods that - # can send request to PostgreSQL (so execute, fetch etc.), but this is - # cumbersome given triopg is a dynamic wrapper over asyncpg... - # In spite of that we consider a transaction block is always opened for - # operations that involve concurrency issues. This seems like a reasonable - # bet given a single INSERT/UPDATE without transaction is atomic all by - # itself (including the SELECT inlined in the query). - - from triopg._triopg import TrioTransactionProxy - - current_concurrency = 0 - concurrency_reached = trio.Event() - - class PatchedTrioTransactionProxy(TrioTransactionProxy): - async def __aenter__(self, *args, **kwargs): - nonlocal current_concurrency - current_concurrency += 1 - if current_concurrency >= concurrency: - concurrency_reached.set() - await concurrency_reached.wait() - - return await super().__aenter__(*args, **kwargs) - - with patch("triopg._triopg.TrioTransactionProxy", PatchedTrioTransactionProxy): - yield - - assert current_concurrency >= concurrency # Sanity check - - -@pytest.mark.trio -@pytest.mark.postgresql -async def test_concurrency_bootstrap_organization(postgresql_url, backend_factory, coolorg, alice): - results = [] - - backend_user, backend_first_device = local_device_to_backend_user(alice, coolorg) - - async def _concurrent_bootstrap(backend): - try: - await backend.organization.bootstrap( - id=coolorg.organization_id, - user=backend_user, - first_device=backend_first_device, - bootstrap_token=coolorg.bootstrap_token, - root_verify_key=coolorg.root_verify_key, - ) - results.append(None) - - except Exception as exc: - results.append(exc) - - async with backend_factory( - config={"db_url": postgresql_url, "db_max_connections": 10}, populated=False - ) as backend: - # Create the organization - await backend.organization.create( - id=coolorg.organization_id, bootstrap_token=coolorg.bootstrap_token - ) - - # Concurrent bootstrap - with ensure_pg_transaction_concurrency_barrier(concurrency=10): - async with trio.open_nursery() as nursery: - for _ in range(10): - nursery.start_soon(_concurrent_bootstrap, backend) - - assert len(results) == 10 - assert len([r for r in results if isinstance(r, OrganizationAlreadyBootstrappedError)]) == 9 - - async with triopg.connect(postgresql_url) as conn: - res = await conn.fetchrow("SELECT count(*) FROM organization") - assert res["count"] == 1 - res = await conn.fetchrow("SELECT count(*) FROM user_") - assert res["count"] == 1 - res = await conn.fetchrow("SELECT count(*) FROM device") - assert res["count"] == 1 - - -@pytest.mark.trio -@pytest.mark.postgresql -async def test_concurrency_create_user( - postgresql_url, backend_factory, backend_data_binder_factory, coolorg, alice, bob -): - results = [] - - backend_user, backend_first_device = local_device_to_backend_user(bob, alice) - - async def _concurrent_create(backend): - try: - await backend.user.create_user( - coolorg.organization_id, backend_user, backend_first_device - ) - results.append(None) - - except Exception as exc: - results.append(exc) - - async with backend_factory( - config={"db_url": postgresql_url, "db_max_connections": 10}, populated=False - ) as backend: - # Create&bootstrap the organization - binder = backend_data_binder_factory(backend) - await binder.bind_organization(coolorg, alice) - - # Concurrent user creation - with ensure_pg_transaction_concurrency_barrier(concurrency=10): - async with trio.open_nursery() as nursery: - for _ in range(10): - nursery.start_soon(_concurrent_create, backend) - - assert len(results) == 10 - assert len([r for r in results if isinstance(r, UserAlreadyExistsError)]) == 9 - - async with triopg.connect(postgresql_url) as conn: - res = await conn.fetchrow("SELECT count(*) FROM organization") - assert res["count"] == 1 - res = await conn.fetchrow("SELECT count(*) FROM user_ WHERE user_id = 'bob'") - assert res["count"] == 1 - res = await conn.fetchrow( - "SELECT count(*) FROM device WHERE user_ = (SELECT _id FROM user_ WHERE user_id = 'bob')" - ) - assert res["count"] == 1 - - -@pytest.mark.trio -@pytest.mark.postgresql -async def test_concurrency_create_user_with_limit_reached( - postgresql_url, - backend_factory, - backend_data_binder_factory, - coolorg, - alice, - local_device_factory, -): - results = [] - - async def _concurrent_create(backend, user): - backend_user, backend_first_device = local_device_to_backend_user(user, alice) - try: - await backend.user.create_user( - coolorg.organization_id, backend_user, backend_first_device - ) - results.append(None) - - except Exception as exc: - results.append(exc) - - async with backend_factory( - config={"db_url": postgresql_url, "db_max_connections": 10}, populated=False - ) as backend: - # Create&bootstrap the organization - binder = backend_data_binder_factory(backend) - await binder.bind_organization(coolorg, alice) - - # Set a limit that will be soon reached - await backend.organization.update( - alice.organization_id, active_users_limit=ActiveUsersLimit.LimitedTo(3) - ) - - # Concurrent user creation - with ensure_pg_transaction_concurrency_barrier(concurrency=10): - async with trio.open_nursery() as nursery: - for _ in range(10): - nursery.start_soon( - _concurrent_create, backend, local_device_factory(org=coolorg) - ) - - assert len(results) == 10 - assert len([r for r in results if isinstance(r, UserActiveUsersLimitReached)]) == 8 - assert len([r for r in results if r is None]) == 2 - - async with triopg.connect(postgresql_url) as conn: - res = await conn.fetchrow("SELECT count(*) FROM organization") - assert res["count"] == 1 - res = await conn.fetchrow("SELECT count(*) FROM user_") - assert res["count"] == 3 - res = await conn.fetchrow("SELECT count(*) FROM device") - assert res["count"] == 3 - - -@pytest.mark.trio -@pytest.mark.postgresql -async def test_concurrency_pki_enrollment_accept( - postgresql_url, backend_factory, backend_data_binder_factory, coolorg, alice, bob -): - results = [] - enrollment_id = EnrollmentID.new() - - backend_user, backend_first_device = local_device_to_backend_user(bob, alice) - - async def _concurrent_enrollment_accept(backend): - try: - await backend.pki.accept( - organization_id=coolorg.organization_id, - enrollment_id=enrollment_id, - accepter_der_x509_certificate=b"whatever", - accept_payload_signature=b"whatever", - accept_payload=b"whatever", - accepted_on=DateTime.now(), - user=backend_user, - first_device=backend_first_device, - ) - results.append(None) - - except AssertionError: - # Improve pytest --pdb behavior - raise - - except Exception as exc: - results.append(exc) - - async with backend_factory( - config={"db_url": postgresql_url, "db_max_connections": 10}, populated=False - ) as backend: - # Create&bootstrap the organization - binder = backend_data_binder_factory(backend) - await binder.bind_organization(coolorg, alice) - - # Create the PKI enrollment - await backend.pki.submit( - organization_id=coolorg.organization_id, - enrollment_id=enrollment_id, - force=False, - submitter_der_x509_certificate=b"whatever", - submitter_der_x509_certificate_email="whatever", - submit_payload_signature=b"whatever", - submit_payload=b"whatever", - submitted_on=DateTime.now(), - ) - - # Concurrent PKI enrollment accept - with ensure_pg_transaction_concurrency_barrier(concurrency=10): - async with trio.open_nursery() as nursery: - for _ in range(10): - nursery.start_soon(_concurrent_enrollment_accept, backend) - - assert len(results) == 10 - assert len([r for r in results if isinstance(r, PkiEnrollmentNoLongerAvailableError)]) == 9 - - async with triopg.connect(postgresql_url) as conn: - res = await conn.fetchrow("SELECT count(*) FROM organization") - assert res["count"] == 1 - res = await conn.fetchrow("SELECT count(*) FROM user_ WHERE user_id = 'bob'") - assert res["count"] == 1 - res = await conn.fetchrow( - "SELECT count(*) FROM device WHERE user_ = (SELECT _id FROM user_ WHERE user_id = 'bob')" - ) - assert res["count"] == 1 - res = await conn.fetchrow("SELECT count(*) FROM pki_enrollment") - assert res["count"] == 1 - res = await conn.fetchrow("SELECT enrollment_state FROM pki_enrollment") - res["enrollment_state"] == "ACCEPTED" diff --git a/server/tests/backend/test_postgres_db.py b/server/tests/backend/test_postgres_db.py deleted file mode 100644 index 8649de3117b..00000000000 --- a/server/tests/backend/test_postgres_db.py +++ /dev/null @@ -1,357 +0,0 @@ -# Parsec Cloud (https://parsec.cloud) Copyright (c) BUSL-1.1 2016-present Scille SAS -from __future__ import annotations - -import logging -import sys -from asyncio import InvalidStateError -from datetime import datetime, timezone -from uuid import uuid4 - -import pytest -import trio -import triopg - -from parsec._parsec import ActiveUsersLimit, DateTime, VlobID -from parsec.backend.cli.run import RetryPolicy, _run_backend -from parsec.backend.config import BackendConfig, PostgreSQLBlockStoreConfig -from parsec.backend.postgresql.handler import handle_datetime, handle_integer, handle_uuid -from tests.common import real_clock_timeout - - -async def wait_for_listeners(conn, to_terminate=False): - async with real_clock_timeout(): - while True: - rows = await conn.fetch( - "SELECT pid FROM pg_stat_activity WHERE query ILIKE 'listen %' AND state ILIKE 'idle'" - ) - if (not to_terminate and rows) or (to_terminate and not rows): - return [r["pid"] for r in rows] - - -@pytest.mark.trio -@pytest.mark.postgresql -async def test_postgresql_connection_ok(postgresql_url, backend_factory): - async with backend_factory(config={"db_url": postgresql_url}): - pass - - -@pytest.mark.trio -@pytest.mark.postgresql -async def test_postgresql_notification_listener_terminated(postgresql_url, backend_factory): - async with triopg.connect(postgresql_url) as conn: - with pytest.raises(ConnectionError): - async with backend_factory(config={"db_url": postgresql_url}): - (pid,) = await wait_for_listeners(conn) - (value,) = await conn.fetchrow("SELECT pg_terminate_backend($1)", pid) - assert value - # Wait to get cancelled by the backend app - async with real_clock_timeout(): - await trio.sleep_forever() - - -@pytest.mark.trio -@pytest.mark.postgresql -async def test_postgresql_connection_not_ok(postgresql_url, backend_factory, unused_tcp_port): - postgresql_url = f"postgresql://127.0.0.1:{unused_tcp_port}/dummy" - with pytest.raises(OSError) as exc: - async with backend_factory(config={"db_url": postgresql_url}): - pass - if sys.platform == "darwin": - errno = 61 - else: - errno = 111 - assert f"[Errno {errno}] Connect call failed" in str(exc.value) - - -@pytest.mark.trio -@pytest.mark.postgresql -async def test_retry_policy_no_retry(postgresql_url, asyncio_loop): - app_config = BackendConfig( - administration_token="s3cr3t", - db_min_connections=1, - db_max_connections=5, - sse_keepalive=30, - debug=False, - blockstore_config=PostgreSQLBlockStoreConfig(), - email_config=None, - backend_addr=None, - forward_proto_enforce_https=None, - organization_spontaneous_bootstrap=False, - organization_bootstrap_webhook_url=None, - db_url=postgresql_url, - ) - - # No retry - retry_policy = RetryPolicy(maximum_attempts=0, pause_before_retry=0) - - # Expect a connection error - with pytest.raises(ConnectionError): - async with trio.open_nursery() as nursery: - # Run backend in the background - nursery.start_soon( - lambda: _run_backend( - host="127.0.0.1", - port=0, - ssl_certfile=None, - ssl_keyfile=None, - retry_policy=retry_policy, - app_config=app_config, - ) - ) - # Connect to PostgreSQL database - async with triopg.connect(postgresql_url) as conn: - # Wait for the backend to be connected - (pid,) = await wait_for_listeners(conn) - # Terminate the backend listener connection - (value,) = await conn.fetchrow("SELECT pg_terminate_backend($1)", pid) - assert value - # Wait to get cancelled by the connection error `_run_backend` - async with real_clock_timeout(): - await trio.sleep_forever() - - -@pytest.mark.trio -@pytest.mark.postgresql -async def test_retry_policy_allow_retry(postgresql_url, asyncio_loop, caplog): - app_config = BackendConfig( - administration_token="s3cr3t", - db_min_connections=1, - db_max_connections=5, - sse_keepalive=30, - debug=False, - blockstore_config=PostgreSQLBlockStoreConfig(), - email_config=None, - backend_addr=None, - forward_proto_enforce_https=None, - organization_spontaneous_bootstrap=False, - organization_bootstrap_webhook_url=None, - db_url=postgresql_url, - ) - # Allow to retry once - retry_policy = RetryPolicy(maximum_attempts=1, pause_before_retry=0) - async with trio.open_nursery() as nursery: - # Run backend in the background - nursery.start_soon( - lambda: _run_backend( - host="127.0.0.1", - port=0, - ssl_certfile=None, - ssl_keyfile=None, - retry_policy=retry_policy, - app_config=app_config, - ) - ) - # Connect to PostgreSQL database - async with triopg.connect(postgresql_url) as conn: - # Test for 10 cycles - pid = None - for _ in range(10): - # Wait for the backend to be connected - (new_pid,) = await wait_for_listeners(conn) - # Make sure a new connection has been created - assert new_pid != pid - pid = new_pid - # Terminate the backend listener connection - (value,) = await conn.fetchrow("SELECT pg_terminate_backend($1)", pid) - assert value - # Wait for the listener to terminate - await wait_for_listeners(conn, to_terminate=True) - - # Cancel the backend nursery - nursery.cancel_scope.cancel() - - # Ignore error logs that looks like: - # *** asyncio.exceptions.InvalidStateError: invalid state - # Traceback (most recent call last): - # File "asyncio/base_events.py", line 1779, in call_exception_handler - # self.default_exception_handler(context) - # File "site-packages/trio_asyncio/_async.py", line 44, in default_exception_handler - # raise exception - # File "asyncio/selector_events.py", line 868, in _read_ready__data_received - # self._protocol.data_received(data) - # File "site-packages/asyncpg/connect_utils.py", line 674, in data_received - # self.on_data.set_result(False) - # Or like this: - # *** ConnectionError: unexpected connection_lost() call - # Traceback (most recent call last): - # File "asyncio/base_events.py", line 1779, in call_exception_handler - # self.default_exception_handler(context) - # File "site-packages/trio_asyncio/_async.py", line 44, in default_exception_handler - # raise exception - # Those happen about 14% and 5% of the runs, respectively. - # TODO: Investigate - for record in caplog.get_records("call"): - if record.levelno < logging.ERROR or record.name != "asyncio": - continue - try: - _, exc, _ = record.exc_info - except ValueError: - continue - if isinstance(exc, (ConnectionError, InvalidStateError)): - try: - caplog.asserted_records.add(record) - except AttributeError: - caplog.asserted_records = {record} - - -@pytest.mark.trio -@pytest.mark.postgresql -async def test_active_users_limit_correctly_serialized(postgresql_url, backend_factory): - user_limit_py = 2 - user_limit_rs = ActiveUsersLimit.LimitedTo(2) - - async with triopg.connect(postgresql_url) as vanilla_conn: - async with triopg.connect(postgresql_url) as patched_conn: - await handle_integer(patched_conn) - - await vanilla_conn.execute( - f""" - DROP TABLE IF EXISTS active_users_limit; - CREATE TABLE IF NOT EXISTS active_users_limit ( - _id SERIAL PRIMARY KEY, - user_limit integer - )""" - ) - - # Insert ActiveUsersLimit - await vanilla_conn.execute( - "INSERT INTO active_users_limit (_id, user_limit) VALUES (0, $1)", - user_limit_py, - ) - await patched_conn.execute( - "INSERT INTO active_users_limit (_id, user_limit) VALUES (1, $1)", - user_limit_rs, - ) - - # Retrieve ActiveUsersLimit inserted by vanilla - from_vanilla_to_py = await vanilla_conn.fetchval( - "SELECT user_limit FROM active_users_limit WHERE _id = 0" - ) - # Retrieve ActiveUsersLimit inserted by patched - from_patched_to_py = await vanilla_conn.fetchval( - "SELECT user_limit FROM active_users_limit WHERE _id = 1" - ) - # Retrieve ActiveUsersLimit inserted by vanilla - from_vanilla_to_rs = await patched_conn.fetchval( - "SELECT user_limit FROM active_users_limit WHERE _id = 0" - ) - # Retrieve ActiveUsersLimit inserted by patched - from_patched_to_rs = await patched_conn.fetchval( - "SELECT user_limit FROM active_users_limit WHERE _id = 1" - ) - - assert from_vanilla_to_py == from_patched_to_py - assert from_vanilla_to_rs == from_patched_to_rs - - -@pytest.mark.trio -@pytest.mark.postgresql -async def test_rust_datetime_correctly_serialized(postgresql_url, backend_factory): - now_py = datetime.now(timezone.utc) - now_rs = DateTime.from_timestamp(now_py.timestamp()) - - async with triopg.connect(postgresql_url) as vanilla_conn: - async with triopg.connect(postgresql_url) as patched_conn: - await handle_datetime(patched_conn) - - await vanilla_conn.execute( - f""" - DROP TABLE IF EXISTS datetime; - CREATE TABLE IF NOT EXISTS datetime ( - _id SERIAL PRIMARY KEY, - timestamp TIMESTAMPTZ - )""" - ) - - # Insert DateTime - await vanilla_conn.execute( - "INSERT INTO datetime (_id, timestamp) VALUES (0, $1)", - now_py, - ) - await patched_conn.execute( - "INSERT INTO datetime (_id, timestamp) VALUES (1, $1)", - now_rs, - ) - - # Retrieve datetime inserted by vanilla - from_vanilla_to_py = await vanilla_conn.fetchval( - "SELECT timestamp FROM datetime WHERE _id = 0" - ) - # Retrieve datetime inserted by patched - from_patched_to_py = await vanilla_conn.fetchval( - "SELECT timestamp FROM datetime WHERE _id = 1" - ) - # Retrieve Datetime inserted by vanilla - from_vanilla_to_rs = await patched_conn.fetchval( - "SELECT timestamp FROM datetime WHERE _id = 0" - ) - # Retrieve Datetime inserted by patched - from_patched_to_rs = await patched_conn.fetchval( - "SELECT timestamp FROM datetime WHERE _id = 1" - ) - - assert from_vanilla_to_py == from_patched_to_py - assert from_vanilla_to_rs == from_patched_to_rs - assert ( - from_vanilla_to_py.timestamp() - == from_patched_to_py.timestamp() - == from_vanilla_to_rs.timestamp() - == from_patched_to_rs.timestamp() - == now_py.timestamp() - == now_rs.timestamp() - ) - - -@pytest.mark.trio -@pytest.mark.postgresql -async def test_rust_uuid_correctly_serialized(postgresql_url, backend_factory): - id_py = uuid4() - id_rs = VlobID.from_hex(id_py.hex) - - async with triopg.connect(postgresql_url) as vanilla_conn: - async with triopg.connect(postgresql_url) as patched_conn: - await handle_uuid(patched_conn) - - await vanilla_conn.execute( - f""" - DROP TABLE IF EXISTS uuid; - CREATE TABLE IF NOT EXISTS uuid ( - _id SERIAL PRIMARY KEY, - id UUID - )""" - ) - - # Insert DateTime - await vanilla_conn.execute( - "INSERT INTO uuid (_id, id) VALUES (0, $1)", - id_py, - ) - await patched_conn.execute( - "INSERT INTO uuid (_id, id) VALUES (1, $1)", - id_rs, - ) - - # Retrieve uuid inserted by vanilla - from_vanilla_to_py = await vanilla_conn.fetchval("SELECT id FROM uuid WHERE _id = 0") - # Retrieve uuid inserted by patched - from_patched_to_py = await vanilla_conn.fetchval("SELECT id FROM uuid WHERE _id = 1") - # Retrieve hex inserted by vanilla - from_vanilla_to_rs = await patched_conn.fetchval("SELECT id FROM uuid WHERE _id = 0") - # Retrieve hex inserted by patched - from_patched_to_rs = await patched_conn.fetchval("SELECT id FROM uuid WHERE _id = 1") - - # Test that we can retrieve our UUIDs - # because deserializer doesn't know which ID - from_vanilla_to_rs = VlobID.from_hex(from_vanilla_to_rs) - from_patched_to_rs = VlobID.from_hex(from_patched_to_rs) - - assert from_vanilla_to_py == from_patched_to_py - assert from_vanilla_to_rs == from_patched_to_rs - assert ( - from_vanilla_to_py.hex - == from_patched_to_py.hex - == from_vanilla_to_rs.hex - == from_patched_to_rs.hex - == id_py.hex - == id_rs.hex - ) diff --git a/server/tests/backend/test_rpc_handshake.py b/server/tests/backend/test_rpc_handshake.py deleted file mode 100644 index f0063595938..00000000000 --- a/server/tests/backend/test_rpc_handshake.py +++ /dev/null @@ -1,371 +0,0 @@ -# Parsec Cloud (https://parsec.cloud) Copyright (c) BUSL-1.1 2016-present Scille SAS -from __future__ import annotations - -import logging -from base64 import b64encode -from unittest.mock import patch - -import pytest - -from parsec._parsec import ApiVersion, DateTime, DeviceID, anonymous_cmds -from parsec.backend import BackendApp -from parsec.serde import packb -from tests.common import AnonymousRpcApiClient, AuthenticatedRpcApiClient, LocalDevice -from tests.common.rpc_api import InvitedRpcApiClient - -PING_RAW_REQ = packb({"cmd": "ping", "ping": "foo"}) - - -async def _test_good_handshake( - client: AuthenticatedRpcApiClient | AnonymousRpcApiClient | InvitedRpcApiClient, -): - # Sanity check: make sure base query is valid - rep = await client.send(PING_RAW_REQ, check_rep=False) - assert rep.status_code == 200 - assert rep.headers["Api-Version"] == str(ApiVersion.API_LATEST_VERSION) - - -async def _test_handshake_bad_organization( - client: AuthenticatedRpcApiClient | AnonymousRpcApiClient | InvitedRpcApiClient, -): - for bad_org in [ - "dummy", # Unknown organization - "a" * 65, # Invalid organization ID - ]: - - def _before_send_hook(args): - args["path"] = args["path"].replace("CoolOrg", bad_org) - - rep = await client.send( - PING_RAW_REQ, - before_send_hook=_before_send_hook, - check_rep=False, - ) - assert rep.status_code == 404 - assert rep.headers["Api-Version"] == str(ApiVersion.API_LATEST_VERSION) - - -async def _test_handshake_api_version_header( - client: AuthenticatedRpcApiClient | AnonymousRpcApiClient | InvitedRpcApiClient, -): - server_versions = [ApiVersion(2, 1), ApiVersion(3, 1), ApiVersion(4, 1)] - cmds_load_fn = {i: anonymous_cmds.latest.AnyCmdReq.load for i in (2, 3, 4)} - with ( - patch("parsec.backend.asgi.rpc.SUPPORTED_API_VERSIONS", server_versions), - patch("parsec.backend.asgi.rpc.AUTHENTICATED_CMDS_LOAD_FN", cmds_load_fn), - patch("parsec.backend.asgi.rpc.INVITED_CMDS_LOAD_FN", cmds_load_fn), - patch("parsec.backend.asgi.rpc.ANONYMOUS_CMDS_LOAD_FN", cmds_load_fn), - ): - # Plain invalide header value - rep = await client.send( - PING_RAW_REQ, - extra_headers={"Api-Version": "dummy"}, - check_rep=False, - ) - assert rep.status_code == 422 - assert rep.headers["Supported-Api-Versions"] == "2.1;3.1;4.1" - - # Missing header, fallback to default value for backward compatibility - rep = await client.send( - PING_RAW_REQ, - extra_headers={"Api-Version": None}, - check_rep=False, - ) - # Special case for anonymous - assert rep.status_code == 200 - rep.headers["Api-Version"] == "3.0" - - # Client provides an incompatible api version - unknown_major = ApiVersion(version=5, revision=0) - too_old_major = ApiVersion(version=1, revision=0) - for bad_version in (too_old_major, unknown_major): - rep = await client.send( - PING_RAW_REQ, - extra_headers={"Api-Version": str(bad_version)}, - check_rep=False, - ) - assert rep.status_code == 422 - assert rep.headers["Supported-Api-Versions"] == "2.1;3.1;4.1" - - # Client provides a compatible api version - unknown_minor = ApiVersion(version=4, revision=2) - rep = await client.send( - PING_RAW_REQ, - extra_headers={"Api-Version": str(unknown_minor)}, - check_rep=False, - ) - assert rep.status_code == 200 - assert rep.headers["Api-Version"] == "4.1" - - -async def _test_handshake_content_type_header( - client: AuthenticatedRpcApiClient | AnonymousRpcApiClient | InvitedRpcApiClient, -): - # Bad header value - rep = await client.send( - PING_RAW_REQ, - extra_headers={"Content-Type": "application/json"}, - check_rep=False, - ) - assert rep.status_code == 415 - assert rep.headers["Api-Version"] == str(ApiVersion.API_LATEST_VERSION) - - # Missing header - rep = await client.send( - PING_RAW_REQ, - extra_headers={"Content-Type": None}, - check_rep=False, - ) - assert rep.status_code == 415 - assert rep.headers["Api-Version"] == str(ApiVersion.API_LATEST_VERSION) - - # Incorrect header that was used in Parsec <= 2.11.1 - rep = await client.send( - PING_RAW_REQ, - extra_headers={"Content-Type": "application/x-www-form-urlencoded"}, - check_rep=False, - ) - assert rep.status_code == 200 - rep.headers["Content-Type"] == "application/msgpack" - - -async def _test_authenticated_handshake_bad_signature_header( - client: AuthenticatedRpcApiClient, alice: LocalDevice, bob: LocalDevice -): - # First test missing & plain bad headers - for expected_status_code, extra_headers in [ - (401, {"Signature": None}), # Missing Signature header - (401, {"Signature": "dummy"}), # Bad Signature header - (401, {"Signature": b64encode(b"dummy")}), # Base64 as expected but still bad signature - (401, {"Authorization": None}), # Missing Authorization header - (401, {"Authorization": "PARSEC-SIGN-RSA-4096"}), # Bad Authorization header - (401, {"Author": None}), # Missing Author header - (401, {"Author": "dummy"}), # Bad Author header - ]: - rep = await client.send( - PING_RAW_REQ, - extra_headers=extra_headers, - check_rep=False, - ) - assert rep.status_code == expected_status_code, (rep, extra_headers) - assert rep.headers["Api-Version"] == str(ApiVersion.API_LATEST_VERSION) - - # Valid signature format, but bad signing key - def _before_send_hook(args): - signature = bob.signing_key.sign_only_signature(args["data"]) - args["headers"]["signature"] = b64encode(signature).decode("ascii") - - rep = await client.send( - PING_RAW_REQ, - before_send_hook=_before_send_hook, - check_rep=False, - ) - assert rep.status_code == 401 - assert rep.headers["Api-Version"] == str(ApiVersion.API_LATEST_VERSION) - - # Valid signature, but bad body - def _before_send_hook(args): - signature = alice.signing_key.sign_only_signature(b"dummy") - args["headers"]["signature"] = b64encode(signature).decode("ascii") - - rep = await client.send( - PING_RAW_REQ, - before_send_hook=_before_send_hook, - check_rep=False, - ) - assert rep.status_code == 401 - assert rep.headers["Api-Version"] == str(ApiVersion.API_LATEST_VERSION) - - -async def _test_handshake_body_not_msgpack( - client: AuthenticatedRpcApiClient | AnonymousRpcApiClient | InvitedRpcApiClient, - alice: LocalDevice, -): - now = DateTime.now() - - def _before_send_hook(args): - bad_body = b"dummy" - args["data"] = bad_body - if isinstance(client, AuthenticatedRpcApiClient): - signature = alice.signing_key.sign_only_signature(bad_body) - args["headers"]["signature"] = b64encode(signature).decode("ascii") - - rep = await client.send( - PING_RAW_REQ, - before_send_hook=_before_send_hook, - check_rep=False, - now=now, - ) - assert rep.status_code == 415 - assert rep.headers["Api-Version"] == str( - ApiVersion.API_LATEST_VERSION - ) # This header must always be present ! - - -async def _test_handshake_body_msgpack_bad_unknown_cmd( - client: AuthenticatedRpcApiClient | AnonymousRpcApiClient | InvitedRpcApiClient, - alice: LocalDevice, -): - now = DateTime.now() - - def _before_send_hook(args): - bad_body = packb({"cmd": "dummy"}) - args["data"] = bad_body - if isinstance(client, AuthenticatedRpcApiClient): - signature = alice.signing_key.sign_only_signature(bad_body) - args["headers"]["signature"] = b64encode(signature).decode("ascii") - - rep = await client.send( - PING_RAW_REQ, - before_send_hook=_before_send_hook, - check_rep=False, - now=now, - ) - assert rep.status_code == 415 - assert rep.headers["Api-Version"] == str( - ApiVersion.API_LATEST_VERSION - ) # This header must always be present ! - - -async def _test_authenticated_handshake_author_not_found( - alice_http_client: AuthenticatedRpcApiClient, -): - rep = await alice_http_client.send( - PING_RAW_REQ, extra_headers={"Author": DeviceID("foo@bar")}, check_rep=False - ) - assert rep.status_code == 401 - assert rep.headers["Api-Version"] == str( - ApiVersion.API_LATEST_VERSION - ) # This header must always be present ! - - -async def _test_handshake_organization_expired( - client: AuthenticatedRpcApiClient | AnonymousRpcApiClient | InvitedRpcApiClient, -): - rep = await client.send(PING_RAW_REQ, check_rep=False) - assert rep.status_code == 460 - assert rep.headers["Api-Version"] == str( - ApiVersion.API_LATEST_VERSION - ) # This header must always be present ! - - -async def _test_authenticated_handshake_user_revoked( - alice_http_client: AuthenticatedRpcApiClient, -): - rep = await alice_http_client.send(PING_RAW_REQ, check_rep=False) - assert rep.status_code == 461 - assert rep.headers["Api-Version"] == str( - ApiVersion.API_LATEST_VERSION - ) # This header must always be present ! - - -async def _test_invited_handshake_invitation_token_not_found(client: InvitedRpcApiClient): - rep = await client.send(PING_RAW_REQ, check_rep=False, extra_headers={"Invitation-Token": None}) - assert rep.status_code == 415 - assert rep.headers["Api-Version"] == str( - ApiVersion.API_LATEST_VERSION - ) # This header must always be present ! - - -async def _test_invited_handshake_invitation_invalid_token(client): - rep = await client.send( - PING_RAW_REQ, check_rep=False, extra_headers={"Invitation-Token": "a" * 100} - ) - assert rep.status_code == 415 - assert rep.headers["Api-Version"] == str( - ApiVersion.API_LATEST_VERSION - ) # This header must always be present ! - - -@pytest.mark.trio -async def test_handshake( - alice_rpc: AuthenticatedRpcApiClient, - anonymous_rpc: AuthenticatedRpcApiClient, - invited_rpc: InvitedRpcApiClient, - alice: LocalDevice, - bob: LocalDevice, - backend: BackendApp, -): - # Merging all those tests into a single one saves plenty of time given - # we don't have to recreate the fixtures - - await _test_good_handshake(alice_rpc) - await _test_good_handshake(anonymous_rpc) - await _test_good_handshake(invited_rpc) - - await _test_handshake_bad_organization(alice_rpc) - await _test_handshake_bad_organization(anonymous_rpc) - await _test_handshake_bad_organization(invited_rpc) - - await _test_handshake_api_version_header(alice_rpc) - await _test_handshake_api_version_header(anonymous_rpc) - await _test_handshake_api_version_header(invited_rpc) - - await _test_handshake_content_type_header(alice_rpc) - await _test_handshake_content_type_header(anonymous_rpc) - await _test_handshake_content_type_header(invited_rpc) - - await _test_authenticated_handshake_bad_signature_header(alice_rpc, alice, bob) - - await _test_handshake_body_not_msgpack(alice_rpc, alice) - await _test_handshake_body_not_msgpack(anonymous_rpc, alice) - await _test_handshake_body_not_msgpack(invited_rpc, alice) - - await _test_handshake_body_msgpack_bad_unknown_cmd(alice_rpc, alice) - await _test_handshake_body_msgpack_bad_unknown_cmd(anonymous_rpc, alice) - await _test_handshake_body_msgpack_bad_unknown_cmd(invited_rpc, alice) - - await _test_authenticated_handshake_author_not_found(alice_rpc) - - await backend.organization.update(id=alice.organization_id, is_expired=True) - await _test_handshake_organization_expired(alice_rpc) - await _test_handshake_organization_expired(anonymous_rpc) - await _test_handshake_organization_expired(invited_rpc) - await backend.organization.update(id=alice.organization_id, is_expired=False) - - await backend.user.revoke_user( - organization_id=alice.organization_id, - user_id=alice.user_id, - revoked_user_certificate=b"dummy", - revoked_user_certifier=bob.device_id, - ) - await _test_authenticated_handshake_user_revoked(alice_rpc) - - await _test_invited_handshake_invitation_token_not_found(invited_rpc) - await _test_invited_handshake_invitation_invalid_token(invited_rpc) - - -@pytest.mark.trio -async def test_client_version_in_logs( - alice_rpc: AuthenticatedRpcApiClient, - anonymous_rpc: AnonymousRpcApiClient, - invited_rpc: InvitedRpcApiClient, - caplog, -): - client_api_version = ApiVersion( - ApiVersion.API_LATEST_VERSION.version, ApiVersion.API_LATEST_VERSION.revision + 1 - ) - alice_rpc.API_VERSION = client_api_version - anonymous_rpc.API_VERSION = client_api_version - invited_rpc.API_VERSION = client_api_version - with caplog.at_level(logging.INFO): - # Authenticated - await _test_good_handshake(alice_rpc) - assert ( - f"Authenticated client successfully connected (client/server API version: {client_api_version}/{ApiVersion.API_LATEST_VERSION})" - in caplog.text - ) - - # Anonymous - await _test_good_handshake(anonymous_rpc) - assert ( - f"Anonymous client successfully connected (client/server API version: {client_api_version}/{ApiVersion.API_LATEST_VERSION})" - in caplog.text - ) - - # Invited - await _test_good_handshake(invited_rpc) - assert ( - f"Invited client successfully connected (client/server API version: {client_api_version}/{ApiVersion.API_LATEST_VERSION})" - in caplog.text - ) diff --git a/server/tests/backend/test_server_stats.py b/server/tests/backend/test_server_stats.py deleted file mode 100644 index aee7568d0c2..00000000000 --- a/server/tests/backend/test_server_stats.py +++ /dev/null @@ -1,335 +0,0 @@ -# Parsec Cloud (https://parsec.cloud) Copyright (c) BUSL-1.1 2016-present Scille SAS - -from typing import Optional - -import pytest - -from parsec._parsec import BlockID, DateTime, RealmRole, VlobID -from parsec.api.protocol.types import UserProfile -from parsec.backend.app import BackendApp -from parsec.backend.realm import RealmGrantedRole -from tests.common import customize_fixtures - - -async def server_stats( - client, - headers, - at: Optional[str] = None, - format: str = "json", -): - query_string = {"format": format} - if at: - query_string["at"] = at - rep = await client.get( - f"/administration/stats", - headers=headers, - query_string=query_string, - ) - assert rep.status_code == 200 - - return await rep.get_data(as_text=True) if format == "csv" else await rep.get_json() - - -@pytest.mark.trio -@customize_fixtures(backend_not_populated=True) -async def test_unauthorized_client(backend_asgi_app): - client = backend_asgi_app.test_client() # This client has no token - rep = await client.get("/administration/stats") - assert rep.status == "403 FORBIDDEN" - - -@pytest.mark.trio -@customize_fixtures(backend_not_populated=True) -async def test_bad_requests(backend_asgi_app): - client = backend_asgi_app.test_client() # This client has no token - headers = {"Authorization": f"Bearer {backend_asgi_app.backend.config.administration_token}"} - - async def _do_req(query_string: dict) -> dict: - rep = await client.get("/administration/stats", headers=headers, query_string=query_string) - assert rep.status == "400 BAD REQUEST" - return await rep.get_json() - - # No arguments in query string - assert await _do_req({}) == { - "error": "bad_data", - "reason": "Missing/invalid mandatory query argument `format` (expected `csv` or `json`)", - } - - # Missing `format` argument - assert await _do_req({"to": "2021-01-01T01:01:01Z"}) == { - "error": "bad_data", - "reason": "Missing/invalid mandatory query argument `format` (expected `csv` or `json`)", - } - - # Bad `format` argument - assert await _do_req({"format": "mp3"}) == { - "error": "bad_data", - "reason": "Missing/invalid mandatory query argument `format` (expected `csv` or `json`)", - } - - # Bad `at` argument - assert await _do_req({"format": "json", "at": "dummy"}) == { - "error": "bad_data", - "reason": "Invalid `at` query argument (expected RFC3339 datetime)", - } - - -@pytest.mark.trio -@customize_fixtures(backend_not_populated=True) -async def test_stats( - backend_asgi_app, - backend: BackendApp, - backend_data_binder, - organization_factory, - local_device_factory, -): - client = backend_asgi_app.test_client() - headers = {"Authorization": f"Bearer {backend_asgi_app.backend.config.administration_token}"} - - async def _get_stats(at=""): - rep = await client.get( - "/administration/stats", - headers=headers, - query_string={"format": "json", "at": at}, - ) - assert rep.status_code == 200 - return await rep.get_json() - - # Stat on empty server - stats = await _get_stats() - assert stats == {"stats": []} - - orgs = [organization_factory(), organization_factory()] - - # Non-bootstrapped organization are present but empty - dt1 = DateTime(2000, 1, 1) - for org in orgs: - await backend.organization.create( - org.organization_id, bootstrap_token=org.bootstrap_token, created_on=dt1 - ) - assert await _get_stats() == { - "stats": [ - { - "organization_id": org, - "metadata_size": 0, - "data_size": 0, - "realms": 0, - "active_users": 0, - "users_per_profile_detail": [ - {"active": 0, "revoked": 0, "profile": "ADMIN"}, - {"active": 0, "revoked": 0, "profile": "STANDARD"}, - {"active": 0, "revoked": 0, "profile": "OUTSIDER"}, - ], - } - for org in ["Org1", "Org2"] - ] - } - - # Bootstrap & populate two organizations on February and March - for i in range(2, 4): - dtx1 = DateTime(2000, i, 1) - org = orgs[i - 2] - org_d1 = local_device_factory(org=org, profile=UserProfile.ADMIN) - await backend_data_binder.bind_organization( - org=org, - first_device=org_d1, - initial_user_manifest="not_synced", - timestamp=dtx1, - create_needed=False, - ) - - dtx2 = DateTime(2000, i, 2) - org_d2 = local_device_factory(org=org, profile=UserProfile.STANDARD) - org_d3 = local_device_factory(org=org, profile=UserProfile.OUTSIDER) - await backend_data_binder.bind_device( - device=org_d2, initial_user_manifest="not_synced", timestamp=dtx2 - ) - await backend_data_binder.bind_device( - device=org_d3, initial_user_manifest="not_synced", timestamp=dtx2 - ) - - dtx3 = DateTime(2000, i, 3) - dtx4 = DateTime(2000, i, 4) - for _ in range(2): - realm_id = VlobID.new() - await backend.realm.create( - organization_id=org.organization_id, - self_granted_role=RealmGrantedRole( - realm_id=realm_id, - user_id=org_d1.user_id, - certificate=b"", - role=RealmRole.OWNER, - granted_by=org_d1.device_id, - granted_on=dtx3, - ), - ) - vlob_id = VlobID.new() - await backend.vlob.create( - organization_id=org.organization_id, - author=org_d1.device_id, - encryption_revision=1, - vlob_id=vlob_id, - realm_id=realm_id, - timestamp=dtx4, - blob=b"\x00" * 10, - ) - await backend.block.create( - organization_id=org.organization_id, - author=org_d1.device_id, - block_id=BlockID.new(), - realm_id=realm_id, - created_on=dtx4, - block=b"\x00" * 100, - ) - - # Now check the stats - dty0_stats = await _get_stats() - assert dty0_stats == { - "stats": [ - { - "organization_id": org, - "metadata_size": 20, - "data_size": 200, - "realms": 2, - "active_users": 3, - "users_per_profile_detail": [ - {"active": 1, "revoked": 0, "profile": "ADMIN"}, - {"active": 1, "revoked": 0, "profile": "STANDARD"}, - {"active": 1, "revoked": 0, "profile": "OUTSIDER"}, - ], - } - for org in ["Org1", "Org2"] - ] - } - - # Update the 2nd organization - dty1 = DateTime(2001, 1, 1) - await backend.user.revoke_user( - organization_id=org.organization_id, - user_id=org_d2.user_id, - revoked_user_certificate=b"", - revoked_user_certifier=org_d1.device_id, - revoked_on=dty1, - ) - await backend.user.revoke_user( - organization_id=org.organization_id, - user_id=org_d3.user_id, - revoked_user_certificate=b"", - revoked_user_certifier=org_d1.device_id, - revoked_on=dty1, - ) - await backend.realm.create( - organization_id=org.organization_id, - self_granted_role=RealmGrantedRole( - realm_id=VlobID.new(), - user_id=org_d1.user_id, - certificate=b"", - role=RealmRole.OWNER, - granted_by=org_d1.device_id, - granted_on=dty1, - ), - ) - await backend.vlob.update( - organization_id=org.organization_id, - author=org_d1.device_id, - encryption_revision=1, - vlob_id=vlob_id, - version=2, - timestamp=dty1, - blob=b"\x00" * 10, - ) - await backend.block.create( - organization_id=org.organization_id, - author=org_d1.device_id, - block_id=BlockID.new(), - realm_id=realm_id, - created_on=dty1, - block=b"\x00" * 100, - ) - - # Stats should have changed - dty1_stats = await _get_stats() - assert dty1_stats == { - "stats": [ - { - "organization_id": "Org1", - "metadata_size": 20, - "data_size": 200, - "realms": 2, - "active_users": 3, - "users_per_profile_detail": [ - {"active": 1, "revoked": 0, "profile": "ADMIN"}, - {"active": 1, "revoked": 0, "profile": "STANDARD"}, - {"active": 1, "revoked": 0, "profile": "OUTSIDER"}, - ], - }, - { - "organization_id": "Org2", - "metadata_size": 30, - "data_size": 300, - "realms": 3, - "active_users": 1, - "users_per_profile_detail": [ - {"active": 1, "revoked": 0, "profile": "ADMIN"}, - {"active": 0, "revoked": 1, "profile": "STANDARD"}, - {"active": 0, "revoked": 1, "profile": "OUTSIDER"}, - ], - }, - ] - } - - # Check `at` option, note the bound is included - # assert await _get_stats(at=dty1.to_rfc3339()) == dty1_stats - # assert await _get_stats(at=dty1.add(microseconds=-1).to_rfc3339()) == dty0_stats - - # `at` filter don't excludes organization that are not bootstrapped at that time - assert await _get_stats(at="2000-02-01T00:00:00Z") == { - "stats": [ - # At that time, Org1 was already bootstrapped - { - "organization_id": "Org1", - "metadata_size": 0, - "data_size": 0, - "realms": 0, - "active_users": 1, - "users_per_profile_detail": [ - {"active": 1, "revoked": 0, "profile": "ADMIN"}, - {"active": 0, "revoked": 0, "profile": "STANDARD"}, - {"active": 0, "revoked": 0, "profile": "OUTSIDER"}, - ], - }, - # At that time, Org2 was created but not yet bootstrapped - { - "organization_id": "Org2", - "metadata_size": 0, - "data_size": 0, - "realms": 0, - "active_users": 0, - "users_per_profile_detail": [ - {"active": 0, "revoked": 0, "profile": "ADMIN"}, - {"active": 0, "revoked": 0, "profile": "STANDARD"}, - {"active": 0, "revoked": 0, "profile": "OUTSIDER"}, - ], - }, - ] - } - # At that time, the server was empty - assert await _get_stats(at="1999-01-01T00:00:00Z") == {"stats": []} - - # Finally test the csv format - rep = await client.get( - "/administration/stats", - headers=headers, - query_string={"format": "csv"}, - ) - assert rep.status_code == 200 - assert rep.headers["content-type"] == "text/csv" - csv_data = await rep.get_data() - # We use Excel like CSV that use carriage return and line feed ('\r\n') as line separator - assert ( - csv_data - == b"""organization_id,data_size,metadata_size,realms,active_users,admin_users_active,admin_users_revoked,standard_users_active,standard_users_revoked,outsider_users_active,outsider_users_revoked\r -Org1,200,20,2,3,1,0,1,0,1,0\r -Org2,300,30,3,1,1,0,0,1,0,1\r -""" - ) diff --git a/server/tests/backend/test_swift_blockstore.py b/server/tests/backend/test_swift_blockstore.py deleted file mode 100644 index 32f0b1817bf..00000000000 --- a/server/tests/backend/test_swift_blockstore.py +++ /dev/null @@ -1,91 +0,0 @@ -# Parsec Cloud (https://parsec.cloud) Copyright (c) BUSL-1.1 2016-present Scille SAS -from __future__ import annotations - -from unittest import mock -from unittest.mock import Mock - -import pytest -from swiftclient.exceptions import ClientException - -from parsec.api.protocol import BlockID, OrganizationID -from parsec.backend.block import BlockStoreError -from parsec.backend.swift_blockstore import SwiftBlockStoreComponent - - -@pytest.mark.trio -async def test_swift_get(caplog): - org_id = OrganizationID("org42") - block_id = BlockID.from_hex("0694a21176354e8295e28a543e5887f9") - - def _assert_log(): - log = caplog.assert_occurred_once("[warning ] Block read error") - assert f"organization_id={org_id.str}" in log - assert f"block_id={block_id.hex}" in log - assert len(caplog.messages) == 1 - caplog.clear() - - with mock.patch("swiftclient.Connection") as connection_mock: - connection_mock.return_value = Mock() - connection_mock().head_container.return_value = True - blockstore = SwiftBlockStoreComponent("http://url", "scille", "parsec", "john", "secret") - - # Ok - connection_mock().get_object.return_value = True, "content" - assert await blockstore.read(org_id, block_id) == "content" - connection_mock().get_object.assert_called_once_with( - "parsec", "org42/0694a211-7635-4e82-95e2-8a543e5887f9" - ) - connection_mock().get_object.reset_mock() - assert not caplog.messages - - # Not found - connection_mock().get_object.side_effect = ClientException(http_status=404, msg="") - with pytest.raises(BlockStoreError): - assert await blockstore.read(org_id, block_id) - _assert_log() - - # Other exception - connection_mock().get_object.side_effect = ClientException(http_status=500, msg="") - with pytest.raises(BlockStoreError): - assert await blockstore.read(org_id, block_id) - _assert_log() - - -@pytest.mark.trio -async def test_swift_create(caplog): - org_id = OrganizationID("org42") - block_id = BlockID.from_hex("0694a21176354e8295e28a543e5887f9") - - def _assert_log(): - log = caplog.assert_occurred_once("[warning ] Block create error") - assert f"organization_id={org_id.str}" in log - assert f"block_id={block_id.hex}" in log - assert len(caplog.messages) == 1 - caplog.clear() - - with mock.patch("swiftclient.Connection") as connection_mock: - connection_mock.return_value = Mock() - connection_mock().head_container.return_value = True - blockstore = SwiftBlockStoreComponent("http://url", "scille", "parsec", "john", "secret") - - # Ok - connection_mock().get_object.side_effect = ClientException(http_status=404, msg="") - await blockstore.create(org_id, block_id, "content") - connection_mock().put_object.assert_called_with( - "parsec", "org42/0694a211-7635-4e82-95e2-8a543e5887f9", "content" - ) - connection_mock().put_object.reset_mock() - assert not caplog.messages - - # Connection error at PUT - connection_mock().get_object.side_effect = ClientException(msg="Connection error") - connection_mock().put_object.side_effect = ClientException(msg="Connection error") - with pytest.raises(BlockStoreError): - await blockstore.create(org_id, block_id, "content") - _assert_log() - - # Unknown exception at PUT - connection_mock().put_object.side_effect = ClientException(http_status=500, msg="") - with pytest.raises(BlockStoreError): - await blockstore.create(org_id, block_id, "content") - _assert_log() diff --git a/server/tests/backend/test_ws_handshake.py b/server/tests/backend/test_ws_handshake.py deleted file mode 100644 index 63e4df19bc7..00000000000 --- a/server/tests/backend/test_ws_handshake.py +++ /dev/null @@ -1,317 +0,0 @@ -# Parsec Cloud (https://parsec.cloud) Copyright (c) BUSL-1.1 2016-present Scille SAS -from __future__ import annotations - -import logging -from unittest.mock import ANY - -import pytest - -from parsec._parsec import ApiVersion, BackendEventOrganizationExpired -from parsec.api.protocol import ( - AuthenticatedClientHandshake, - HandshakeBadIdentity, - HandshakeOrganizationExpired, - HandshakeRVKMismatch, - InvitationToken, - InvitationType, - InvitedClientHandshake, - packb, - unpackb, -) -from parsec.api.protocol.handshake import ServerHandshake - - -@pytest.mark.trio -@pytest.mark.parametrize( - "kind", - ["bad_handshake_type", "irrelevant_dict", "valid_msgpack_but_not_a_dict", "invalid_msgpack"], -) -async def test_handshake_send_invalid_answer_data(backend_asgi_app, kind): - if kind == "bad_handshake_type": - bad_req = packb( - { - "handshake": "dummy", - "client_api_version": ( - ApiVersion.API_LATEST_VERSION.version, - ApiVersion.API_LATEST_VERSION.revision, - ), - } - ) - elif kind == "irrelevant_dict": - bad_req = packb({"foo": "bar"}) - elif kind == "valid_msgpack_but_not_a_dict": - bad_req = b"\x00" # Encodes the number 0 as positive fix int - else: - assert kind == "invalid_msgpack" - bad_req = b"\xc1" # Never used value according to msgpack spec - - client = backend_asgi_app.test_client() - async with client.websocket("/ws") as ws: - await ws.receive() # Get challenge - await ws.send(bad_req) - result_req = await ws.receive() - assert unpackb(result_req) == {"handshake": "result", "result": "bad_protocol", "help": ANY} - - -@pytest.mark.trio -async def test_handshake_incompatible_version(backend_asgi_app): - client = backend_asgi_app.test_client() - async with client.websocket("/ws") as ws: - incompatible_version = ApiVersion(ApiVersion.API_LATEST_VERSION.version + 1, 0) - await ws.receive() # Get challenge - req = { - "handshake": "answer", - "type": "anonymous", - "client_api_version": (incompatible_version.version, incompatible_version.revision), - "organization_id": "Org", - "token": "whatever", - } - await ws.send(packb(req)) - result_req = await ws.receive() - assert unpackb(result_req) == { - "handshake": "result", - "result": "bad_protocol", - "help": f"No overlap between client API versions {{{incompatible_version}}} and backend API versions {{{', '.join(map(str, ServerHandshake.SUPPORTED_API_VERSIONS))}}}", - } - - -@pytest.mark.trio -async def test_authenticated_handshake_good(backend_asgi_app, alice): - ch = AuthenticatedClientHandshake( - organization_id=alice.organization_id, - device_id=alice.device_id, - user_signkey=alice.signing_key, - root_verify_key=alice.root_verify_key, - ) - - client = backend_asgi_app.test_client() - async with client.websocket("/ws") as ws: - challenge_req = await ws.receive() - answer_req = ch.process_challenge_req(challenge_req) - - await ws.send(answer_req) - result_req = await ws.receive() - ch.process_result_req(result_req) - - assert ch.client_api_version == ApiVersion.API_V3_VERSION - assert ch.backend_api_version == ApiVersion.API_V3_VERSION - - -@pytest.mark.trio -async def test_authenticated_handshake_bad_rvk(backend_asgi_app, alice, other_org): - ch = AuthenticatedClientHandshake( - organization_id=alice.organization_id, - device_id=alice.device_id, - user_signkey=alice.signing_key, - root_verify_key=other_org.root_verify_key, - ) - client = backend_asgi_app.test_client() - async with client.websocket("/ws") as ws: - challenge_req = await ws.receive() - answer_req = ch.process_challenge_req(challenge_req) - - await ws.send(answer_req) - result_req = await ws.receive() - with pytest.raises(HandshakeRVKMismatch): - ch.process_result_req(result_req) - - -@pytest.mark.trio -@pytest.mark.parametrize("invitation_type", (InvitationType.USER, InvitationType.DEVICE)) -async def test_invited_handshake_good(backend_asgi_app, backend, alice, invitation_type): - if invitation_type == InvitationType.USER: - invitation = await backend.invite.new_for_user( - organization_id=alice.organization_id, - greeter_user_id=alice.user_id, - claimer_email="zack@example.com", - ) - else: # Claim device - invitation = await backend.invite.new_for_device( - organization_id=alice.organization_id, greeter_user_id=alice.user_id - ) - - ch = InvitedClientHandshake( - organization_id=alice.organization_id, - invitation_type=invitation_type, - token=invitation.token, - ) - client = backend_asgi_app.test_client() - async with client.websocket("/ws") as ws: - challenge_req = await ws.receive() - answer_req = ch.process_challenge_req(challenge_req) - - await ws.send(answer_req) - result_req = await ws.receive() - ch.process_result_req(result_req) - - assert ch.client_api_version == ApiVersion.API_V3_VERSION - assert ch.backend_api_version == ApiVersion.API_V3_VERSION - - -@pytest.mark.trio -async def test_api_version_in_logs_on_handshake(backend_asgi_app, backend, alice, caplog): - invitation = await backend.invite.new_for_user( - organization_id=alice.organization_id, - greeter_user_id=alice.user_id, - claimer_email="zack@example.com", - ) - - ch = InvitedClientHandshake( - organization_id=alice.organization_id, - invitation_type=InvitationType.USER, - token=invitation.token, - ) - client_api_version = ApiVersion(3, 99) - ch.SUPPORTED_API_VERSIONS = [client_api_version] - client = backend_asgi_app.test_client() - with caplog.at_level(logging.INFO): - async with client.websocket("/ws") as ws: - challenge_req = await ws.receive() - answer_req = ch.process_challenge_req(challenge_req) - - await ws.send(answer_req) - result_req = await ws.receive() - ch.process_result_req(result_req) - - # Sanity checks - assert ch.client_api_version == client_api_version - assert ch.backend_api_version == ApiVersion.API_V3_VERSION - - assert ( - f"(client/server API version: {client_api_version}/{ApiVersion.API_V3_VERSION})" - in caplog.text - ) - - -@pytest.mark.trio -@pytest.mark.parametrize("invitation_type", (InvitationType.USER, InvitationType.DEVICE)) -async def test_invited_handshake_bad_token(backend_asgi_app, coolorg, invitation_type): - ch = InvitedClientHandshake( - organization_id=coolorg.organization_id, - invitation_type=invitation_type, - token=InvitationToken.new(), - ) - client = backend_asgi_app.test_client() - async with client.websocket("/ws") as ws: - challenge_req = await ws.receive() - answer_req = ch.process_challenge_req(challenge_req) - - await ws.send(answer_req) - result_req = await ws.receive() - with pytest.raises(HandshakeBadIdentity): - ch.process_result_req(result_req) - - -@pytest.mark.trio -async def test_invited_handshake_bad_token_type(backend_asgi_app, backend, alice): - invitation = await backend.invite.new_for_device( - organization_id=alice.organization_id, greeter_user_id=alice.user_id - ) - - ch = InvitedClientHandshake( - organization_id=alice.organization_id, - invitation_type=InvitationType.USER, - token=invitation.token, - ) - client = backend_asgi_app.test_client() - async with client.websocket("/ws") as ws: - challenge_req = await ws.receive() - answer_req = ch.process_challenge_req(challenge_req) - - await ws.send(answer_req) - result_req = await ws.receive() - with pytest.raises(HandshakeBadIdentity): - ch.process_result_req(result_req) - - -@pytest.mark.trio -@pytest.mark.parametrize("type", ["invited", "authenticated"]) -async def test_handshake_unknown_organization(backend_asgi_app, organization_factory, alice, type): - bad_org = organization_factory() - if type == "invited": - ch = InvitedClientHandshake( - organization_id=bad_org.organization_id, - invitation_type=InvitationType.USER, - token=InvitationToken.new(), - ) - else: # authenticated - ch = AuthenticatedClientHandshake( - organization_id=bad_org.organization_id, - device_id=alice.device_id, - user_signkey=alice.signing_key, - root_verify_key=bad_org.root_verify_key, - ) - - client = backend_asgi_app.test_client() - async with client.websocket("/ws") as ws: - challenge_req = await ws.receive() - answer_req = ch.process_challenge_req(challenge_req) - - await ws.send(answer_req) - result_req = await ws.receive() - with pytest.raises(HandshakeBadIdentity): - ch.process_result_req(result_req) - - -@pytest.mark.trio -@pytest.mark.parametrize("type", ["invited", "authenticated"]) -async def test_handshake_expired_organization(backend_asgi_app, backend, expiredorg, alice, type): - if type == "invited": - ch = InvitedClientHandshake( - organization_id=expiredorg.organization_id, - invitation_type=InvitationType.USER, - token=InvitationToken.new(), - ) - else: # authenticated - ch = AuthenticatedClientHandshake( - organization_id=expiredorg.organization_id, - device_id=alice.device_id, - user_signkey=alice.signing_key, - root_verify_key=expiredorg.root_verify_key, - ) - - with backend.event_bus.listen() as spy: - client = backend_asgi_app.test_client() - async with client.websocket("/ws") as ws: - challenge_req = await ws.receive() - answer_req = ch.process_challenge_req(challenge_req) - - await ws.send(answer_req) - result_req = await ws.receive() - with pytest.raises(HandshakeOrganizationExpired): - ch.process_result_req(result_req) - await spy.wait_with_timeout(BackendEventOrganizationExpired) - - -@pytest.mark.trio -async def test_authenticated_handshake_unknown_device(backend_asgi_app, mallory): - ch = AuthenticatedClientHandshake( - organization_id=mallory.organization_id, - device_id=mallory.device_id, - user_signkey=mallory.signing_key, - root_verify_key=mallory.root_verify_key, - ) - client = backend_asgi_app.test_client() - async with client.websocket("/ws") as ws: - challenge_req = await ws.receive() - answer_req = ch.process_challenge_req(challenge_req) - - await ws.send(answer_req) - result_req = await ws.receive() - with pytest.raises(HandshakeBadIdentity): - ch.process_result_req(result_req) - - -@pytest.mark.trio -async def test_handshake_string_websocket_message(backend_asgi_app, mallory): - client = backend_asgi_app.test_client() - async with client.websocket("/ws") as ws: - await ws.receive() # Get the challenge - await ws.send("hello") - - result_req = await ws.receive() - assert unpackb(result_req) == { - "result": "bad_protocol", - "handshake": "result", - "help": "Expected bytes message in websocket", - } diff --git a/server/tests/backend/user/__init__.py b/server/tests/backend/user/__init__.py deleted file mode 100644 index 05e02a3b569..00000000000 --- a/server/tests/backend/user/__init__.py +++ /dev/null @@ -1 +0,0 @@ -# Parsec Cloud (https://parsec.cloud) Copyright (c) BUSL-1.1 2016-present Scille SAS diff --git a/server/tests/backend/user/test_device_create.py b/server/tests/backend/user/test_device_create.py deleted file mode 100644 index 6d7dd98bdc7..00000000000 --- a/server/tests/backend/user/test_device_create.py +++ /dev/null @@ -1,257 +0,0 @@ -# Parsec Cloud (https://parsec.cloud) Copyright (c) BUSL-1.1 2016-present Scille SAS -from __future__ import annotations - -import pytest - -from parsec._parsec import ( - DateTime, - UserProfile, -) -from parsec.api.data import DeviceCertificate -from parsec.api.protocol import ( - AuthenticatedPingRepOk, - DeviceCreateRepAlreadyExists, - DeviceCreateRepBadUserId, - DeviceCreateRepInvalidCertification, - DeviceCreateRepInvalidData, - DeviceCreateRepOk, -) -from parsec.backend.user import INVITATION_VALIDITY, Device -from tests.backend.common import authenticated_ping, device_create -from tests.common import customize_fixtures, freeze_time - - -@pytest.fixture -def alice_nd(local_device_factory, alice): - return local_device_factory(f"{alice.user_id.str}@new_device") - - -@pytest.mark.trio -@customize_fixtures( - alice_profile=UserProfile.OUTSIDER -) # Any profile is be allowed to create new devices -async def test_device_create_ok( - backend_asgi_app, backend_authenticated_ws_factory, alice_ws, alice, alice_nd -): - now = DateTime.now() - device_certificate = DeviceCertificate( - author=alice.device_id, - timestamp=now, - device_id=alice_nd.device_id, - device_label=alice_nd.device_label, - verify_key=alice_nd.verify_key, - ) - redacted_device_certificate = device_certificate.evolve(device_label=None) - device_certificate = device_certificate.dump_and_sign(alice.signing_key) - redacted_device_certificate = redacted_device_certificate.dump_and_sign(alice.signing_key) - - with backend_asgi_app.backend.event_bus.listen(): - rep = await device_create( - alice_ws, - device_certificate=device_certificate, - redacted_device_certificate=redacted_device_certificate, - ) - assert isinstance(rep, DeviceCreateRepOk) - - # Make sure the new device can connect now - async with backend_authenticated_ws_factory(backend_asgi_app, alice_nd) as sock: - rep = await authenticated_ping(sock, ping="Hello world !") - assert rep == AuthenticatedPingRepOk(pong="Hello world !") - - # Check the resulting data in the backend - _, backend_device = await backend_asgi_app.backend.user.get_user_with_device( - alice_nd.organization_id, alice_nd.device_id - ) - assert backend_device == Device( - device_id=alice_nd.device_id, - device_label=alice_nd.device_label, - device_certificate=device_certificate, - redacted_device_certificate=redacted_device_certificate, - device_certifier=alice.device_id, - created_on=now, - ) - - -@pytest.mark.trio -async def test_device_create_invalid_certified(alice_ws, alice, bob, alice_nd): - now = DateTime.now() - good_device_certificate = DeviceCertificate( - author=alice.device_id, - timestamp=now, - device_id=alice_nd.device_id, - device_label=alice_nd.device_label, - verify_key=alice_nd.verify_key, - ).dump_and_sign(alice.signing_key) - bad_device_certificate = DeviceCertificate( - author=bob.device_id, - timestamp=now, - device_id=alice_nd.device_id, - device_label=alice_nd.device_label, - verify_key=alice_nd.verify_key, - ).dump_and_sign(bob.signing_key) - - rep = await device_create( - alice_ws, - device_certificate=bad_device_certificate, - redacted_device_certificate=good_device_certificate, - ) - assert isinstance(rep, DeviceCreateRepInvalidCertification) - - # Same for the redacted part - - rep = await device_create( - alice_ws, - device_certificate=good_device_certificate, - redacted_device_certificate=bad_device_certificate, - ) - assert isinstance(rep, DeviceCreateRepInvalidCertification) - - -@pytest.mark.trio -async def test_user_redacted_non_redacted_mixed_up( - alice_ws, - alice, - alice_nd, -): - now = DateTime.now() - device_certificate = DeviceCertificate( - author=alice.device_id, - timestamp=now, - device_id=alice_nd.device_id, - device_label=alice_nd.device_label, - verify_key=alice_nd.verify_key, - ) - redacted_device_certificate = device_certificate.evolve(device_label=None) - - device_certificate = device_certificate.dump_and_sign(alice.signing_key) - redacted_device_certificate = redacted_device_certificate.dump_and_sign(alice.signing_key) - - kwargs = { - "device_certificate": device_certificate, - "redacted_device_certificate": redacted_device_certificate, - } - for field, value in [ - ("device_certificate", redacted_device_certificate), - ("redacted_device_certificate", device_certificate), - ]: - rep = await device_create(alice_ws, **{**kwargs, field: value}) - assert isinstance(rep, DeviceCreateRepInvalidData) - - -@pytest.mark.trio -async def test_device_create_already_exists(alice_ws, alice, alice2): - now = DateTime.now() - device_certificate = DeviceCertificate( - author=alice.device_id, - timestamp=now, - device_id=alice2.device_id, - device_label=alice2.device_label, - verify_key=alice2.verify_key, - ) - redacted_device_certificate = device_certificate.evolve(device_label=None) - redacted_device_certificate = redacted_device_certificate.dump_and_sign(alice.signing_key) - device_certificate = device_certificate.dump_and_sign(alice.signing_key) - - rep = await device_create( - alice_ws, - device_certificate=device_certificate, - redacted_device_certificate=redacted_device_certificate, - ) - - assert isinstance(rep, DeviceCreateRepAlreadyExists) - - -@pytest.mark.trio -async def test_device_create_not_own_user(bob_ws, bob, alice_nd): - now = DateTime.now() - device_certificate = DeviceCertificate( - author=bob.device_id, - timestamp=now, - device_id=alice_nd.device_id, - device_label=None, - verify_key=alice_nd.verify_key, - ).dump_and_sign(bob.signing_key) - - rep = await device_create( - bob_ws, - device_certificate=device_certificate, - redacted_device_certificate=device_certificate, - ) - assert isinstance(rep, DeviceCreateRepBadUserId) - - -@pytest.mark.trio -async def test_device_create_certify_too_old(alice_ws, alice, alice_nd): - now = DateTime(2000, 1, 1) - device_certificate = DeviceCertificate( - author=alice.device_id, - timestamp=now, - device_id=alice_nd.device_id, - device_label=None, - verify_key=alice_nd.verify_key, - ).dump_and_sign(alice.signing_key) - - with freeze_time(now.add(seconds=INVITATION_VALIDITY + 1)): - rep = await device_create( - alice_ws, - device_certificate=device_certificate, - redacted_device_certificate=device_certificate, - ) - assert isinstance(rep, DeviceCreateRepInvalidCertification) - - -@pytest.mark.trio -async def test_device_create_bad_redacted_device_certificate(alice_ws, alice, alice_nd): - now = DateTime.now() - device_certificate = DeviceCertificate( - author=alice.device_id, - timestamp=now, - device_id=alice_nd.device_id, - device_label=alice_nd.device_label, - verify_key=alice_nd.verify_key, - ) - good_redacted_device_certificate = device_certificate.evolve(device_label=None) - device_certificate = device_certificate.dump_and_sign(alice.signing_key) - for bad_redacted_device_certificate in ( - good_redacted_device_certificate.evolve(timestamp=now.add(seconds=1)), - good_redacted_device_certificate.evolve(device_id=alice.device_id), - good_redacted_device_certificate.evolve(verify_key=alice.verify_key), - ): - rep = await device_create( - alice_ws, - device_certificate=device_certificate, - redacted_device_certificate=bad_redacted_device_certificate.dump_and_sign( - alice.signing_key - ), - ) - assert isinstance(rep, DeviceCreateRepInvalidData) - - # Finally just make sure good was really good - rep = await device_create( - alice_ws, - device_certificate=device_certificate, - redacted_device_certificate=good_redacted_device_certificate.dump_and_sign( - alice.signing_key - ), - ) - assert isinstance(rep, DeviceCreateRepOk) - - -@pytest.mark.trio -async def test_redacted_certificates_cannot_contain_sensitive_data(alice_ws, alice, alice_nd): - now = DateTime.now() - device_certificate = DeviceCertificate( - author=alice.device_id, - timestamp=now, - device_id=alice_nd.device_id, - device_label=alice_nd.device_label, - verify_key=alice_nd.verify_key, - ).dump_and_sign(alice.signing_key) - - with freeze_time(now): - rep = await device_create( - alice_ws, - device_certificate=device_certificate, - redacted_device_certificate=device_certificate, - ) - assert isinstance(rep, DeviceCreateRepInvalidData) diff --git a/server/tests/backend/user/test_user_create.py b/server/tests/backend/user/test_user_create.py deleted file mode 100644 index 85622f4818d..00000000000 --- a/server/tests/backend/user/test_user_create.py +++ /dev/null @@ -1,688 +0,0 @@ -# Parsec Cloud (https://parsec.cloud) Copyright (c) BUSL-1.1 2016-present Scille SAS -from __future__ import annotations - -import pytest - -from parsec._parsec import ( - ActiveUsersLimit, - DateTime, - DeviceID, - DeviceLabel, - UserProfile, - authenticated_cmds, -) -from parsec.api.data import DeviceCertificate, UserCertificate -from parsec.api.protocol import ( - UserCreateRepActiveUsersLimitReached, - UserCreateRepAlreadyExists, - UserCreateRepInvalidCertification, - UserCreateRepInvalidData, - UserCreateRepNotAllowed, - UserCreateRepOk, -) -from parsec.backend.user import INVITATION_VALIDITY, Device, User -from tests.backend.common import authenticated_ping, user_create -from tests.common import customize_fixtures, freeze_time - - -@pytest.mark.trio -@pytest.mark.parametrize("profile", UserProfile.VALUES) -async def test_user_create_ok( - backend_asgi_app, - backend_authenticated_ws_factory, - alice_ws, - alice, - mallory, - profile, -): - now = DateTime.now() - user_certificate = UserCertificate( - author=alice.device_id, - timestamp=now, - user_id=mallory.user_id, - human_handle=mallory.human_handle, - public_key=mallory.public_key, - profile=profile, - ) - redacted_user_certificate = user_certificate.evolve(human_handle=None) - device_certificate = DeviceCertificate( - author=alice.device_id, - timestamp=now, - device_id=mallory.device_id, - device_label=mallory.device_label, - verify_key=mallory.verify_key, - ) - redacted_device_certificate = device_certificate.evolve(device_label=None) - - user_certificate = user_certificate.dump_and_sign(alice.signing_key) - device_certificate = device_certificate.dump_and_sign(alice.signing_key) - redacted_user_certificate = redacted_user_certificate.dump_and_sign(alice.signing_key) - redacted_device_certificate = redacted_device_certificate.dump_and_sign(alice.signing_key) - - rep = await user_create( - alice_ws, - user_certificate=user_certificate, - device_certificate=device_certificate, - redacted_user_certificate=redacted_user_certificate, - redacted_device_certificate=redacted_device_certificate, - ) - assert isinstance(rep, UserCreateRepOk) - - # Make sure mallory can connect now - async with backend_authenticated_ws_factory(backend_asgi_app, mallory) as sock: - rep = await authenticated_ping(sock, user_id=mallory.user_id) - assert isinstance(rep, authenticated_cmds.latest.ping.RepOk) - - # Check the resulting data in the backend - backend_user, backend_device = await backend_asgi_app.backend.user.get_user_with_device( - mallory.organization_id, mallory.device_id - ) - - assert backend_user == User( - user_id=mallory.user_id, - human_handle=mallory.human_handle, - initial_profile=profile, - user_certificate=user_certificate, - redacted_user_certificate=redacted_user_certificate, - user_certifier=alice.device_id, - created_on=now, - ) - assert backend_device == Device( - device_id=mallory.device_id, - device_label=mallory.device_label, - device_certificate=device_certificate, - redacted_device_certificate=redacted_device_certificate, - device_certifier=alice.device_id, - created_on=now, - ) - - -@pytest.mark.trio -@customize_fixtures(backend_not_populated=True) -async def test_user_create_nok_active_users_limit_reached( - backend_asgi_app, - backend_data_binder_factory, - backend_authenticated_ws_factory, - coolorg, - alice, - mallory, -): - # Ensure there is only one user in the organization... - binder = backend_data_binder_factory(backend_asgi_app.backend) - await binder.bind_organization(coolorg, alice) - # ...so our active user limit has just been reached - await backend_asgi_app.backend.organization.update( - alice.organization_id, active_users_limit=ActiveUsersLimit.LimitedTo(1) - ) - - now = DateTime.now() - user_certificate = UserCertificate( - author=alice.device_id, - timestamp=now, - user_id=mallory.user_id, - human_handle=mallory.human_handle, - public_key=mallory.public_key, - profile=UserProfile.STANDARD, - ) - redacted_user_certificate = user_certificate.evolve(human_handle=None) - device_certificate = DeviceCertificate( - author=alice.device_id, - timestamp=now, - device_id=mallory.device_id, - device_label=mallory.device_label, - verify_key=mallory.verify_key, - ) - redacted_device_certificate = device_certificate.evolve(device_label=None) - - user_certificate = user_certificate.dump_and_sign(alice.signing_key) - device_certificate = device_certificate.dump_and_sign(alice.signing_key) - redacted_user_certificate = redacted_user_certificate.dump_and_sign(alice.signing_key) - redacted_device_certificate = redacted_device_certificate.dump_and_sign(alice.signing_key) - - async with backend_authenticated_ws_factory(backend_asgi_app, alice) as sock: - rep = await user_create( - sock, - user_certificate=user_certificate, - device_certificate=device_certificate, - redacted_user_certificate=redacted_user_certificate, - redacted_device_certificate=redacted_device_certificate, - ) - assert isinstance(rep, UserCreateRepActiveUsersLimitReached) - - # Now correct the limit, and ensure the user can be created - - await backend_asgi_app.backend.organization.update( - alice.organization_id, active_users_limit=ActiveUsersLimit.LimitedTo(2) - ) - - rep = await user_create( - sock, - user_certificate=user_certificate, - device_certificate=device_certificate, - redacted_user_certificate=redacted_user_certificate, - redacted_device_certificate=redacted_device_certificate, - ) - assert isinstance(rep, UserCreateRepOk) - - -@pytest.mark.trio -async def test_user_create_invalid_certificate(alice_ws, alice, bob, mallory): - now = DateTime.now() - good_user_certificate = UserCertificate( - author=alice.device_id, - timestamp=now, - user_id=mallory.user_id, - human_handle=mallory.human_handle, - public_key=mallory.public_key, - profile=UserProfile.STANDARD, - ).dump_and_sign(alice.signing_key) - good_device_certificate = DeviceCertificate( - author=alice.device_id, - timestamp=now, - device_id=mallory.device_id, - device_label=mallory.device_label, - verify_key=mallory.verify_key, - ).dump_and_sign(alice.signing_key) - bad_user_certificate = UserCertificate( - author=bob.device_id, - timestamp=now, - user_id=mallory.user_id, - human_handle=mallory.human_handle, - public_key=mallory.public_key, - profile=UserProfile.STANDARD, - ).dump_and_sign(bob.signing_key) - bad_device_certificate = DeviceCertificate( - author=bob.device_id, - timestamp=now, - device_id=mallory.device_id, - device_label=mallory.device_label, - verify_key=mallory.verify_key, - ).dump_and_sign(bob.signing_key) - - for cu, cd in [ - (good_user_certificate, bad_device_certificate), - (bad_user_certificate, good_device_certificate), - (bad_user_certificate, bad_device_certificate), - ]: - rep = await user_create( - alice_ws, - user_certificate=cu, - device_certificate=cd, - redacted_user_certificate=good_user_certificate, - redacted_device_certificate=good_device_certificate, - ) - assert isinstance(rep, UserCreateRepInvalidCertification) - - # Same thing for the redacted part - for cu, cd in [ - (good_user_certificate, bad_device_certificate), - (bad_user_certificate, good_device_certificate), - (bad_user_certificate, bad_device_certificate), - ]: - rep = await user_create( - alice_ws, - user_certificate=good_user_certificate, - device_certificate=good_device_certificate, - redacted_user_certificate=cu, - redacted_device_certificate=cd, - ) - assert isinstance(rep, UserCreateRepInvalidCertification) - - -@pytest.mark.trio -async def test_user_redacted_non_redacted_mixed_up( - alice_ws, - alice, - mallory, -): - now = DateTime.now() - user_certificate = UserCertificate( - author=alice.device_id, - timestamp=now, - user_id=mallory.user_id, - human_handle=mallory.human_handle, - public_key=mallory.public_key, - profile=UserProfile.STANDARD, - ) - redacted_user_certificate = user_certificate.evolve(human_handle=None) - device_certificate = DeviceCertificate( - author=alice.device_id, - timestamp=now, - device_id=mallory.device_id, - device_label=mallory.device_label, - verify_key=mallory.verify_key, - ) - redacted_device_certificate = device_certificate.evolve(device_label=None) - - user_certificate = user_certificate.dump_and_sign(alice.signing_key) - device_certificate = device_certificate.dump_and_sign(alice.signing_key) - redacted_user_certificate = redacted_user_certificate.dump_and_sign(alice.signing_key) - redacted_device_certificate = redacted_device_certificate.dump_and_sign(alice.signing_key) - - kwargs = { - "user_certificate": user_certificate, - "device_certificate": device_certificate, - "redacted_user_certificate": redacted_user_certificate, - "redacted_device_certificate": redacted_device_certificate, - } - for field, value in [ - ("user_certificate", redacted_user_certificate), - ("device_certificate", redacted_device_certificate), - ("redacted_user_certificate", user_certificate), - ("redacted_device_certificate", device_certificate), - ]: - rep = await user_create(alice_ws, **{**kwargs, field: value}) - assert isinstance(rep, UserCreateRepInvalidData) - - -@pytest.mark.trio -async def test_user_create_not_matching_user_device(alice_ws, alice, bob, mallory): - now = DateTime.now() - user_certificate = UserCertificate( - author=alice.device_id, - timestamp=now, - user_id=mallory.user_id, - human_handle=mallory.human_handle, - public_key=mallory.public_key, - profile=UserProfile.STANDARD, - ).dump_and_sign(alice.signing_key) - device_certificate = DeviceCertificate( - author=alice.device_id, - timestamp=now, - device_id=bob.device_id, - device_label=mallory.device_label, - verify_key=mallory.verify_key, - ).dump_and_sign(alice.signing_key) - - rep = await user_create( - alice_ws, - user_certificate=user_certificate, - device_certificate=device_certificate, - redacted_user_certificate=user_certificate, - redacted_device_certificate=device_certificate, - ) - assert isinstance(rep, UserCreateRepInvalidData) - - -@pytest.mark.trio -async def test_user_create_bad_redacted_device_certificate(alice_ws, alice, mallory): - now = DateTime.now() - user_certificate = UserCertificate( - author=alice.device_id, - timestamp=now, - user_id=mallory.user_id, - human_handle=mallory.human_handle, - public_key=mallory.public_key, - profile=UserProfile.STANDARD, - ) - redacted_user_certificate = user_certificate.evolve(human_handle=None) - redacted_user_certificate = redacted_user_certificate.dump_and_sign(alice.signing_key) - user_certificate = user_certificate.dump_and_sign(alice.signing_key) - device_certificate = DeviceCertificate( - author=alice.device_id, - timestamp=now, - device_id=mallory.device_id, - device_label=mallory.device_label, - verify_key=mallory.verify_key, - ) - good_redacted_device_certificate = device_certificate.evolve(device_label=None) - device_certificate = device_certificate.dump_and_sign(alice.signing_key) - for bad_redacted_device_certificate in ( - good_redacted_device_certificate.evolve(timestamp=now.add(seconds=1)), - good_redacted_device_certificate.evolve(device_id=alice.device_id), - good_redacted_device_certificate.evolve(verify_key=alice.verify_key), - ): - rep = await user_create( - alice_ws, - user_certificate=user_certificate, - device_certificate=device_certificate, - redacted_user_certificate=redacted_user_certificate, - redacted_device_certificate=bad_redacted_device_certificate.dump_and_sign( - alice.signing_key - ), - ) - assert isinstance(rep, UserCreateRepInvalidData) - - # Missing redacted certificate is not allowed as well - # We should not be able to build an invalid request - # Generated from Python implementation (Parsec v2.11.1+dev) - # Content: - # cmd: "user_create" - # device_certificate: hex!("666f6f626172") - # redacted_device_certificate: None - # redacted_user_certificate: hex!("666f6f626172") - # user_certificate: hex!("666f6f626172") - # - raw_req = bytes.fromhex( - "85a3636d64ab757365725f637265617465b26465766963655f6365727469666963617465c4" - "06666f6f626172bb72656461637465645f6465766963655f6365727469666963617465c0b9" - "72656461637465645f757365725f6365727469666963617465c406666f6f626172b0757365" - "725f6365727469666963617465c406666f6f626172" - ) - await alice_ws.send(raw_req) - raw_rep = await alice_ws.receive() - rep = authenticated_cmds.latest.user_create.Rep.load(raw_rep) - assert isinstance(rep, authenticated_cmds.latest.user_create.RepUnknownStatus) - assert rep.status == "invalid_msg_format" - - # Finally just make sure good was really good - rep = await user_create( - alice_ws, - user_certificate=user_certificate, - device_certificate=device_certificate, - redacted_user_certificate=redacted_user_certificate, - redacted_device_certificate=good_redacted_device_certificate.dump_and_sign( - alice.signing_key - ), - ) - assert isinstance(rep, UserCreateRepOk) - - -@pytest.mark.trio -async def test_user_create_bad_redacted_user_certificate(alice_ws, alice, mallory): - now = DateTime.now() - device_certificate = DeviceCertificate( - author=alice.device_id, - timestamp=now, - device_id=mallory.device_id, - device_label=mallory.device_label, - verify_key=mallory.verify_key, - ) - redacted_device_certificate = device_certificate.evolve(device_label=None) - redacted_device_certificate = redacted_device_certificate.dump_and_sign(alice.signing_key) - device_certificate = device_certificate.dump_and_sign(alice.signing_key) - user_certificate = UserCertificate( - author=alice.device_id, - timestamp=now, - user_id=mallory.user_id, - human_handle=mallory.human_handle, - public_key=mallory.public_key, - profile=UserProfile.STANDARD, - ) - good_redacted_user_certificate = user_certificate.evolve(human_handle=None) - user_certificate = user_certificate.dump_and_sign(alice.signing_key) - for bad_redacted_user_certificate in ( - good_redacted_user_certificate.evolve(timestamp=now.add(seconds=1)), - good_redacted_user_certificate.evolve(user_id=alice.user_id), - good_redacted_user_certificate.evolve(public_key=alice.public_key), - good_redacted_user_certificate.evolve(profile=UserProfile.OUTSIDER), - ): - rep = await user_create( - alice_ws, - user_certificate=user_certificate, - device_certificate=device_certificate, - redacted_user_certificate=bad_redacted_user_certificate.dump_and_sign( - alice.signing_key - ), - redacted_device_certificate=redacted_device_certificate, - ) - assert isinstance(rep, UserCreateRepInvalidData) - - # Missing redacted certificate is not allowed as well - # We should not be able to build an invalid request - # Generated from Python implementation (Parsec v2.11.1+dev) - # Content: - # cmd: "user_create" - # device_certificate: hex!("666f6f626172") - # redacted_device_certificate: hex!("666f6f626172") - # redacted_user_certificate: None - # user_certificate: hex!("666f6f626172") - # - raw_req = bytes.fromhex( - "85a3636d64ab757365725f637265617465b26465766963655f6365727469666963617465c4" - "06666f6f626172bb72656461637465645f6465766963655f6365727469666963617465c406" - "666f6f626172b972656461637465645f757365725f6365727469666963617465c0b0757365" - "725f6365727469666963617465c406666f6f626172" - ) - await alice_ws.send(raw_req) - raw_rep = await alice_ws.receive() - rep = authenticated_cmds.latest.user_create.Rep.load(raw_rep) - assert isinstance(rep, authenticated_cmds.latest.user_create.RepUnknownStatus) - assert rep.status == "invalid_msg_format" - - # Finally just make sure good was really good - rep = await user_create( - alice_ws, - user_certificate=user_certificate, - device_certificate=device_certificate, - redacted_user_certificate=good_redacted_user_certificate.dump_and_sign(alice.signing_key), - redacted_device_certificate=redacted_device_certificate, - ) - assert isinstance(rep, UserCreateRepOk) - - -@pytest.mark.trio -async def test_user_create_already_exists(alice_ws, alice, bob): - now = DateTime.now() - user_certificate = UserCertificate( - author=alice.device_id, - timestamp=now, - user_id=bob.user_id, - human_handle=alice.human_handle, - public_key=bob.public_key, - profile=UserProfile.STANDARD, - ) - redacted_user_certificate = user_certificate.evolve(human_handle=None) - device_certificate = DeviceCertificate( - author=alice.device_id, - timestamp=now, - device_id=bob.device_id, - device_label=alice.device_label, - verify_key=bob.verify_key, - ) - redacted_device_certificate = device_certificate.evolve(device_label=None) - - user_certificate = user_certificate.dump_and_sign(alice.signing_key) - redacted_user_certificate = redacted_user_certificate.dump_and_sign(alice.signing_key) - device_certificate = device_certificate.dump_and_sign(alice.signing_key) - redacted_device_certificate = redacted_device_certificate.dump_and_sign(alice.signing_key) - - rep = await user_create( - alice_ws, - user_certificate=user_certificate, - device_certificate=device_certificate, - redacted_user_certificate=redacted_user_certificate, - redacted_device_certificate=redacted_device_certificate, - ) - assert isinstance(rep, UserCreateRepAlreadyExists) - - -@pytest.mark.trio -async def test_user_create_human_handle_already_exists(alice_ws, alice, bob): - now = DateTime.now() - bob2_device_id = DeviceID("bob2@dev1") - user_certificate = UserCertificate( - author=alice.device_id, - timestamp=now, - user_id=bob2_device_id.user_id, - human_handle=bob.human_handle, - public_key=bob.public_key, - profile=UserProfile.STANDARD, - ) - redacted_user_certificate = user_certificate.evolve(human_handle=None) - device_certificate = DeviceCertificate( - author=alice.device_id, - timestamp=now, - device_id=bob2_device_id, - device_label=DeviceLabel("dev2"), - verify_key=bob.verify_key, - ) - redacted_device_certificate = device_certificate.evolve(device_label=None) - - user_certificate = user_certificate.dump_and_sign(alice.signing_key) - redacted_user_certificate = redacted_user_certificate.dump_and_sign(alice.signing_key) - device_certificate = device_certificate.dump_and_sign(alice.signing_key) - redacted_device_certificate = redacted_device_certificate.dump_and_sign(alice.signing_key) - - rep = await user_create( - alice_ws, - user_certificate=user_certificate, - device_certificate=device_certificate, - redacted_user_certificate=redacted_user_certificate, - redacted_device_certificate=redacted_device_certificate, - ) - assert isinstance(rep, UserCreateRepAlreadyExists) - - -@pytest.mark.trio -async def test_user_create_human_handle_with_revoked_previous_one( - alice_ws, alice, bob, backend_data_binder -): - # First revoke bob - await backend_data_binder.bind_revocation(user_id=bob.user_id, certifier=alice) - - # Now recreate another user with bob's human handle - now = DateTime.now() - bob2_device_id = DeviceID("bob2@dev1") - user_certificate = UserCertificate( - author=alice.device_id, - timestamp=now, - user_id=bob2_device_id.user_id, - human_handle=bob.human_handle, - public_key=bob.public_key, - profile=UserProfile.STANDARD, - ) - redacted_user_certificate = user_certificate.evolve(human_handle=None) - device_certificate = DeviceCertificate( - author=alice.device_id, - timestamp=now, - device_id=bob2_device_id, - device_label=bob.device_label, # Device label doesn't have to be unique - verify_key=bob.verify_key, - ) - redacted_device_certificate = device_certificate.evolve(device_label=None) - - user_certificate = user_certificate.dump_and_sign(alice.signing_key) - redacted_user_certificate = redacted_user_certificate.dump_and_sign(alice.signing_key) - device_certificate = device_certificate.dump_and_sign(alice.signing_key) - redacted_device_certificate = redacted_device_certificate.dump_and_sign(alice.signing_key) - - rep = await user_create( - alice_ws, - user_certificate=user_certificate, - device_certificate=device_certificate, - redacted_user_certificate=redacted_user_certificate, - redacted_device_certificate=redacted_device_certificate, - ) - assert isinstance(rep, UserCreateRepOk) - - -@pytest.mark.trio -async def test_user_create_not_matching_certified_on(alice_ws, alice, mallory): - date1 = DateTime(2000, 1, 1) - date2 = date1.add(seconds=1) - user_certificate = UserCertificate( - author=alice.device_id, - timestamp=date1, - user_id=mallory.user_id, - human_handle=mallory.human_handle, - public_key=mallory.public_key, - profile=UserProfile.STANDARD, - ).dump_and_sign(alice.signing_key) - device_certificate = DeviceCertificate( - author=alice.device_id, - timestamp=date2, - device_id=mallory.device_id, - device_label=mallory.device_label, - verify_key=mallory.verify_key, - ).dump_and_sign(alice.signing_key) - with freeze_time(date1): - rep = await user_create( - alice_ws, - user_certificate=user_certificate, - device_certificate=device_certificate, - redacted_user_certificate=user_certificate, - redacted_device_certificate=device_certificate, - ) - assert isinstance(rep, UserCreateRepInvalidData) - - -@pytest.mark.trio -async def test_user_create_certificate_too_old(alice_ws, alice, mallory): - too_old = DateTime(2000, 1, 1) - now = too_old.add(seconds=INVITATION_VALIDITY + 1) - user_certificate = UserCertificate( - author=alice.device_id, - timestamp=too_old, - user_id=mallory.user_id, - human_handle=mallory.human_handle, - public_key=mallory.public_key, - profile=UserProfile.STANDARD, - ).dump_and_sign(alice.signing_key) - device_certificate = DeviceCertificate( - author=alice.device_id, - timestamp=too_old, - device_id=mallory.device_id, - device_label=mallory.device_label, - verify_key=mallory.verify_key, - ).dump_and_sign(alice.signing_key) - - with freeze_time(now): - rep = await user_create( - alice_ws, - user_certificate=user_certificate, - device_certificate=device_certificate, - redacted_user_certificate=user_certificate, - redacted_device_certificate=device_certificate, - ) - assert isinstance(rep, UserCreateRepInvalidCertification) - - -@pytest.mark.trio -async def test_user_create_author_not_admin(backend_asgi_app, bob_ws): - # No need for valid certificate given given access right should be - # checked before payload deserialization - rep = await user_create( - bob_ws, - user_certificate=b"", - device_certificate=b"", - redacted_user_certificate=b"", - redacted_device_certificate=b"", - ) - assert isinstance(rep, UserCreateRepNotAllowed) - - -@pytest.mark.trio -async def test_redacted_certificates_cannot_contain_sensitive_data(alice_ws, alice, mallory): - now = DateTime.now() - user_certificate = UserCertificate( - author=alice.device_id, - timestamp=now, - user_id=mallory.user_id, - human_handle=mallory.human_handle, - public_key=mallory.public_key, - profile=UserProfile.STANDARD, - ) - redacted_user_certificate = user_certificate.evolve(human_handle=None) - device_certificate = DeviceCertificate( - author=alice.device_id, - timestamp=now, - device_id=mallory.device_id, - device_label=mallory.device_label, - verify_key=mallory.verify_key, - ) - redacted_device_certificate = device_certificate.evolve(device_label=None) - - user_certificate = user_certificate.dump_and_sign(alice.signing_key) - device_certificate = device_certificate.dump_and_sign(alice.signing_key) - redacted_user_certificate = redacted_user_certificate.dump_and_sign(alice.signing_key) - redacted_device_certificate = redacted_device_certificate.dump_and_sign(alice.signing_key) - - with freeze_time(now): - rep = await user_create( - alice_ws, - user_certificate=user_certificate, - device_certificate=device_certificate, - redacted_user_certificate=user_certificate, - redacted_device_certificate=redacted_device_certificate, - ) - assert isinstance(rep, UserCreateRepInvalidData) - - rep = await user_create( - alice_ws, - user_certificate=user_certificate, - device_certificate=device_certificate, - redacted_user_certificate=redacted_user_certificate, - redacted_device_certificate=device_certificate, - ) - assert isinstance(rep, UserCreateRepInvalidData) diff --git a/server/tests/backend/user/test_user_revoke.py b/server/tests/backend/user/test_user_revoke.py deleted file mode 100644 index 5bdfbb56860..00000000000 --- a/server/tests/backend/user/test_user_revoke.py +++ /dev/null @@ -1,159 +0,0 @@ -# Parsec Cloud (https://parsec.cloud) Copyright (c) BUSL-1.1 2016-present Scille SAS -from __future__ import annotations - -import pytest -import trio -from quart.testing.connections import WebsocketDisconnectError - -from parsec._parsec import ( - DateTime, - UserProfile, -) -from parsec.api.data import RevokedUserCertificate -from parsec.api.protocol import ( - HandshakeRevokedDevice, - UserRevokeRepAlreadyRevoked, - UserRevokeRepInvalidCertification, - UserRevokeRepNotAllowed, - UserRevokeRepNotFound, - UserRevokeRepOk, -) -from parsec.backend.user import INVITATION_VALIDITY -from tests.backend.common import authenticated_ping, user_revoke -from tests.common import freeze_time - - -@pytest.mark.trio -async def test_backend_close_on_user_revoke( - backend_asgi_app, alice_ws, backend_authenticated_ws_factory, bob, alice -): - now = DateTime.now() - bob_revocation = RevokedUserCertificate( - author=alice.device_id, timestamp=now, user_id=bob.user_id - ).dump_and_sign(alice.signing_key) - - async with backend_authenticated_ws_factory(backend_asgi_app, bob) as bob_ws: - with backend_asgi_app.backend.event_bus.listen(): - rep = await user_revoke(alice_ws, revoked_user_certificate=bob_revocation) - assert isinstance(rep, UserRevokeRepOk) - # `user.revoked` event schedules connection cancellation, so wait - # for things to settle down to make sure the cancellation is done - await trio.testing.wait_all_tasks_blocked() - # Bob cannot send new command - with pytest.raises(WebsocketDisconnectError): - await authenticated_ping(bob_ws) - - -@pytest.mark.trio -async def test_user_revoke_ok( - backend_asgi_app, backend_authenticated_ws_factory, adam_ws, alice, adam -): - now = DateTime.now() - alice_revocation = RevokedUserCertificate( - author=adam.device_id, timestamp=now, user_id=alice.user_id - ).dump_and_sign(adam.signing_key) - - with backend_asgi_app.backend.event_bus.listen(): - rep = await user_revoke(adam_ws, revoked_user_certificate=alice_revocation) - assert isinstance(rep, UserRevokeRepOk) - - # Alice cannot connect from now on... - with pytest.raises(HandshakeRevokedDevice): - async with backend_authenticated_ws_factory(backend_asgi_app, alice): - pass - - -@pytest.mark.trio -async def test_user_revoke_not_admin( - backend_asgi_app, backend_authenticated_ws_factory, bob_ws, alice, bob -): - now = DateTime.now() - alice_revocation = RevokedUserCertificate( - author=bob.device_id, timestamp=now, user_id=alice.user_id - ).dump_and_sign(bob.signing_key) - - rep = await user_revoke(bob_ws, revoked_user_certificate=alice_revocation) - assert isinstance(rep, UserRevokeRepNotAllowed) - - -@pytest.mark.trio -async def test_cannot_self_revoke( - backend_asgi_app, backend_authenticated_ws_factory, alice_ws, alice -): - now = DateTime.now() - alice_revocation = RevokedUserCertificate( - author=alice.device_id, timestamp=now, user_id=alice.user_id - ).dump_and_sign(alice.signing_key) - - rep = await user_revoke(alice_ws, revoked_user_certificate=alice_revocation) - assert isinstance(rep, UserRevokeRepNotAllowed) - - -@pytest.mark.trio -async def test_user_revoke_unknown(backend_asgi_app, alice_ws, alice, mallory): - revoked_user_certificate = RevokedUserCertificate( - author=alice.device_id, timestamp=DateTime.now(), user_id=mallory.user_id - ).dump_and_sign(alice.signing_key) - - rep = await user_revoke(alice_ws, revoked_user_certificate=revoked_user_certificate) - assert isinstance(rep, UserRevokeRepNotFound) - - -@pytest.mark.trio -async def test_user_revoke_already_revoked(backend_asgi_app, alice_ws, bob, alice): - now = DateTime.now() - bob_revocation = RevokedUserCertificate( - author=alice.device_id, timestamp=now, user_id=bob.user_id - ).dump_and_sign(alice.signing_key) - - rep = await user_revoke(alice_ws, revoked_user_certificate=bob_revocation) - assert isinstance(rep, UserRevokeRepOk) - - rep = await user_revoke(alice_ws, revoked_user_certificate=bob_revocation) - assert isinstance(rep, UserRevokeRepAlreadyRevoked) - - -@pytest.mark.trio -async def test_user_revoke_invalid_certified(backend_asgi_app, alice_ws, alice2, bob): - revoked_user_certificate = RevokedUserCertificate( - author=alice2.device_id, timestamp=DateTime.now(), user_id=bob.user_id - ).dump_and_sign(alice2.signing_key) - - rep = await user_revoke(alice_ws, revoked_user_certificate=revoked_user_certificate) - assert isinstance(rep, UserRevokeRepInvalidCertification) - - -@pytest.mark.trio -async def test_user_revoke_certify_too_old(backend_asgi_app, alice_ws, alice, bob): - now = DateTime(2000, 1, 1) - revoked_user_certificate = RevokedUserCertificate( - author=alice.device_id, timestamp=now, user_id=bob.user_id - ).dump_and_sign(alice.signing_key) - - with freeze_time(now.add(seconds=INVITATION_VALIDITY + 1)): - rep = await user_revoke(alice_ws, revoked_user_certificate=revoked_user_certificate) - assert isinstance(rep, UserRevokeRepInvalidCertification) - - -@pytest.mark.trio -async def test_user_revoke_other_organization( - ws_from_other_organization_factory, - backend_authenticated_ws_factory, - backend_asgi_app, - alice, - bob, -): - # Organizations should be isolated even for organization admins - async with ws_from_other_organization_factory( - backend_asgi_app, mimic=alice.device_id, profile=UserProfile.ADMIN - ) as sock: - revocation = RevokedUserCertificate( - author=sock.device.device_id, timestamp=DateTime.now(), user_id=bob.user_id - ).dump_and_sign(sock.device.signing_key) - - rep = await user_revoke(sock, revoked_user_certificate=revocation) - assert isinstance(rep, UserRevokeRepNotFound) - - # Make sure bob still works - async with backend_authenticated_ws_factory(backend_asgi_app, bob): - pass diff --git a/server/tests/common/__init__.py b/server/tests/common/__init__.py index 6ad817a53d4..abb3e6d8d9e 100644 --- a/server/tests/common/__init__.py +++ b/server/tests/common/__init__.py @@ -2,14 +2,17 @@ from __future__ import annotations from .backend import * # noqa -from .binder import * # noqa +from .client import * # noqa + +# from .binder import * from .event_bus_spy import * # noqa -from .fixtures_customisation import * # noqa -from .freeze_time import * # noqa -from .helpers import * # noqa -from .oracles import * # noqa -from .population import * # noqa + +# from .fixtures_customisation import * +# from .freeze_time import * +# from .helpers import * +# from .oracles import * +# from .population import * from .postgresql import * # noqa -from .rpc_api import * # noqa -from .sequester import * # noqa -from .trio_clock import * # noqa + +# from .rpc_api import * +# from .sequester import * diff --git a/server/tests/common/backend.py b/server/tests/common/backend.py index 0b7d4cf5f91..3c64402aa05 100644 --- a/server/tests/common/backend.py +++ b/server/tests/common/backend.py @@ -1,699 +1,72 @@ # Parsec Cloud (https://parsec.cloud) Copyright (c) BUSL-1.1 2016-present Scille SAS -from __future__ import annotations -import json -import socket -import ssl -import sys -import tempfile -import threading -from contextlib import asynccontextmanager, contextmanager -from dataclasses import dataclass -from functools import partial -from inspect import iscoroutine -from itertools import chain -from typing import AsyncContextManager, Callable, Optional, Union +import asyncio +from typing import AsyncGenerator import pytest -import trio -import trustme -from hypercorn.config import Config as HyperConfig -from hypercorn.config import Sockets -from hypercorn.trio.run import worker_serve -from hypercorn.trio.tcp_server import TCPServer -from hypercorn.trio.worker_context import WorkerContext -from quart.typing import TestClientProtocol -from quart_trio import QuartTrio -from parsec._parsec import ( - BackendAddr, - BackendEventOrganizationExpired, - BackendInvitationAddr, - InvitationType, -) -from parsec.backend import backend_app_factory -from parsec.backend.app import BackendApp -from parsec.backend.asgi import app_factory -from parsec.backend.config import BackendConfig, MockedBlockStoreConfig, MockedEmailConfig -from tests.common.binder import LocalDevice, OrganizationFullData -from tests.common.freeze_time import freeze_time -from tests.common.trio_clock import real_clock_timeout - - -@pytest.fixture(scope="session") -def unused_tcp_port(): - """Find an unused localhost TCP port from 1024-65535 and return it.""" - sock = socket.socket() - sock.bind(("127.0.0.1", 0)) - - # On macOS connecting to a bind-but-not-listening socket hangs. - # On Windows it doesn't hang but induce a couple of seconds long lag. - # So on those platforms we actually serve the port, only to right away close the - # client connection which is a "good enough" emulation of an unused port - if sys.platform in ("darwin", "win32"): - - def _broken_server(sock: socket.socket): - try: - sock.listen() - while True: - client_sock, _ = sock.accept() - client_sock.close() - except OSError: - # Sock has been closed, the tests are over - return - - threading.Thread(target=_broken_server, args=[sock], daemon=False).start() - - port = sock.getsockname()[1] - yield port - sock.close() - - -def correct_addr( - to_correct: Union[BackendAddr, LocalDevice, OrganizationFullData], port: int -) -> BackendAddr: - """ - Helper to fix a backend address so that it will reach the current server. - This not needed when using `running_backend` (given in this case the - alice/bob/coolorg etc. fixtures are created with the correct port), but - must be used when the test has to manually start server (e.g. in - the hypothesis tests) - """ - if isinstance(to_correct, LocalDevice): - return LocalDevice( - organization_addr=correct_addr(to_correct.organization_addr, port), - device_id=to_correct.device_id, - device_label=to_correct.device_label, - human_handle=to_correct.human_handle, - signing_key=to_correct.signing_key, - private_key=to_correct.private_key, - profile=to_correct.profile, - user_realm_id=to_correct.user_realm_id, - user_realm_key=to_correct.user_realm_key, - local_symkey=to_correct.local_symkey, - ) - elif isinstance(to_correct, OrganizationFullData): - return OrganizationFullData( - bootstrap_addr=correct_addr(to_correct.bootstrap_addr, port), - addr=correct_addr(to_correct.addr, port), - root_signing_key=to_correct.root_signing_key, - ) - else: - # Consider it's a regular addr - *_, to_keep = to_correct.to_url().removeprefix("parsec://").split("/", 1) - url = f"parsec://127.0.0.1:{port}/{to_keep}" - return to_correct.__class__.from_url(url) - - -@asynccontextmanager -async def server_factory(handle_client: Callable): - async with trio.open_service_nursery() as nursery: - listeners = await nursery.start( - partial( - trio.serve_tcp, - handle_client, - port=0, - host="127.0.0.1", - handler_nursery=nursery, - ) - ) - yield listeners[0].socket.getsockname()[1] - - nursery.cancel_scope.cancel() - - -BackendFactory = Callable[..., AsyncContextManager[BackendApp]] - - -@pytest.fixture -def backend_factory( - asyncio_loop, - event_bus_factory, - backend_data_binder_factory, - coolorg, - expiredorg, - other_org, - alice, - alice2, - expired_org_alice, - other_alice, - adam, - bob, - blockstore, - backend_store, - fixtures_customization, -) -> BackendFactory: - # Given the postgresql driver uses trio-asyncio, any coroutine dealing with - # the backend should inherit from the one with the asyncio loop context manager. - # This mean the nursery fixture cannot use the backend object otherwise we - # can end up in a dead lock if the asyncio loop is teardown before the - # nursery fixture is done with calling the backend's postgresql stuff. - - @asynccontextmanager - async def _backend_factory( - populated=True, config={}, event_bus=None - ) -> AsyncContextManager[BackendApp]: - nonlocal backend_store, blockstore - if fixtures_customization.get("backend_force_mocked"): - backend_store = "MOCKED" - assert fixtures_customization.get("blockstore_mode", "NO_RAID") == "NO_RAID" - blockstore = MockedBlockStoreConfig() - - config = BackendConfig( - **{ - "administration_token": "s3cr3t", - "db_min_connections": 1, - "db_max_connections": 5, - "debug": True, - "db_url": backend_store, - "sse_keepalive": 30, - "blockstore_config": blockstore, - "email_config": None, - "backend_addr": None, - "forward_proto_enforce_https": None, - "organization_bootstrap_webhook_url": None, - "organization_spontaneous_bootstrap": False, - **config, - } - ) - - if not event_bus: - event_bus = event_bus_factory() - async with backend_app_factory(config, event_bus=event_bus) as backend: - if populated: - with freeze_time("2000-01-01"): - binder = backend_data_binder_factory(backend) - await binder.bind_organization( - coolorg, - alice, - initial_user_manifest=fixtures_customization.get( - "alice_initial_remote_user_manifest", "v1" - ), - ) - await binder.bind_organization(expiredorg, expired_org_alice) - with backend.event_bus.listen() as spy: - await backend.organization.update( - expiredorg.organization_id, is_expired=True - ) - await spy.wait_with_timeout(BackendEventOrganizationExpired) - await binder.bind_organization(other_org, other_alice) - await binder.bind_device(alice2, certifier=alice) - await binder.bind_device( - adam, - certifier=alice2, - initial_user_manifest=fixtures_customization.get( - "adam_initial_remote_user_manifest", "v1" - ), - ) - await binder.bind_device( - bob, - certifier=adam, - initial_user_manifest=fixtures_customization.get( - "bob_initial_remote_user_manifest", "v1" - ), - ) - if fixtures_customization.get("adam_is_revoked", False): - await binder.bind_revocation(adam.user_id, certifier=alice) - - yield backend - - return _backend_factory - - -@pytest.fixture -async def backend(unused_tcp_port, backend_factory, fixtures_customization, backend_addr): - populated = not fixtures_customization.get("backend_not_populated", False) - config = {} - tmpdir = tempfile.mkdtemp(prefix="tmp-email-folder-") - config["email_config"] = MockedEmailConfig(sender="Parsec ", tmpdir=tmpdir) - config["backend_addr"] = backend_addr - if fixtures_customization.get("backend_spontaneous_organization_bootstrap", False): - config["organization_spontaneous_bootstrap"] = True - if fixtures_customization.get("backend_has_webhook", False): - # Invalid port, hence we should crash if by mistake we try to reach this url - config["organization_bootstrap_webhook_url"] = f"http://127.0.0.1:{unused_tcp_port}/webhook" - forward_proto_enforce_https = fixtures_customization.get("backend_forward_proto_enforce_https") - if forward_proto_enforce_https: - config["forward_proto_enforce_https"] = forward_proto_enforce_https - async with backend_factory(populated=populated, config=config) as backend: - yield backend - - -@pytest.fixture -def backend_data_binder(backend, backend_data_binder_factory): - return backend_data_binder_factory(backend) - - -class LetterBox: - def __init__(self): - self._send_email, self._recv_email = trio.open_memory_channel(10) - self.emails = [] - - async def get_next_with_timeout(self): - async with real_clock_timeout(): - return await self.get_next() - - async def get_next(self): - return await self._recv_email.receive() - - def _push(self, to_addr, message): - email = (to_addr, message) - self._send_email.send_nowait(email) - self.emails.append(email) - - -@pytest.fixture -def email_letterbox(monkeypatch): - letterbox = LetterBox() - - async def _mocked_send_email(email_config, to_addr, message): - letterbox._push(to_addr, message) - - monkeypatch.setattr("parsec.backend.invite.send_email", _mocked_send_email) - return letterbox - - -@pytest.fixture -def webhook_spy(monkeypatch): - events = [] - - class MockedRep: - @property - def status(self): - return 200 - - @contextmanager - def _mock_urlopen(req, **kwargs): - # Webhook are alway POST with utf-8 JSON body - assert req.method == "POST" - assert req.headers == {"Content-type": "application/json; charset=utf-8"} - cooked_data = json.loads(req.data.decode("utf-8")) - events.append((req.full_url, cooked_data)) - yield MockedRep() - - monkeypatch.setattr("parsec.backend.webhooks.urlopen", _mock_urlopen) - return events - - -# `running_backend` is a really useful fixture, but comes with it own issue: -# - the TCP port on which the backend is running is only known once we have started the server -# - we need the TCP port to determine the backend address -# - we need the backend address to create the alice/bob/coolorg etc. fixtures -# - those fixtures are needed to populate the backend -# - the running_backend fixture should only resolve once the backend is populated and running -# -# So to break this dependency loop, we introduce `running_backend_sockets` -# fixture which starts a server (hence getting it TCP port) but allow us to plug the -# actual backend server code later on. -# -# On top of that, we have the `running_backend_ready` that allows other fixtures (typically -# the core related ones) to be able to ensure the server is up and running (the fixtures -# cannot directly depend on the `running_backend` fixture for this given it should be -# up to the test to decide whether or not the backend should be running) -# -# In case `running_backend` is among the test's fixtures: -# -# ├─ running_backend -# | ├─ backend -# | | └─ backend_factory -# | | └─ alice/bob/coolorg etc. -# | | └─ backend_addr <- wait port to be known -# | | └─ running_backend_port_known -# | ├─ running_backend_sockets <- signal port is known -# | | └─ running_backend_port_known -# | └─ running_backend_ready -# └─ core <- wait for running_backend before it own init -# └─ running_backend_ready -# -# In case `running_backend` is not among the test's fixtures: -# -# ├─ core -# | └─ running_backend_ready <- nothing to wait for ! -# └─ alice/bob/coolorg etc. -# └─ backend_addr -# └─ running_backend_port_known -# └─ unused_tcp_port - - -@pytest.fixture -def running_backend_ready(request): - # Useful to synchronize other fixtures that need to connect to - # the backend if it is available - event = trio.Event() - - # Called by `running_backend` fixture once port is known and ASGI app resolved - def _set_running_backend_ready() -> None: - assert not event.is_set() - event.set() - - async def _wait_running_backend_ready() -> None: - await event.wait() - - # Nothing to wait if current test doesn't use `running_backend` fixture - if "running_backend" not in request.fixturenames: - _set_running_backend_ready() - - # Only accessed by `running_backend` fixture - _wait_running_backend_ready._set_running_backend_ready = _set_running_backend_ready - - return _wait_running_backend_ready - - -@pytest.fixture -def running_backend_port_known(request, unused_tcp_port): - # Useful to synchronize other fixtures that need to connect to - # the backend if it is available - # This is also needed to create backend addr with a valid port - event = trio.Event() - backend_port = None - - # Called by `running_backend`` fixture once port is known - def _set_running_backend_port_known(port: int) -> None: - nonlocal backend_port - assert not event.is_set() - event.set() - backend_port = port - - async def _wait_running_backend_port_known() -> int: - nonlocal backend_port - await event.wait() - assert backend_port is not None - return backend_port - - # Nothing to wait if current test doesn't use `running_backend` fixture - if "running_backend" not in request.fixturenames: - _set_running_backend_port_known(unused_tcp_port) - - # Only accessed by `running_backend` fixture - _wait_running_backend_port_known._set_running_backend_port_known = ( - _set_running_backend_port_known - ) - - return _wait_running_backend_port_known - - -@pytest.fixture -async def async_fixture_backend_addr(running_backend_port_known, fixtures_customization): - port = await running_backend_port_known() - use_ssl = fixtures_customization.get("backend_over_ssl", False) - return BackendAddr(hostname="127.0.0.1", port=port, use_ssl=use_ssl) - - -@pytest.fixture -def backend_addr(async_fixture_backend_addr, unused_tcp_port): - # Given server port is not known until `running_backend_sockets` - # is ready, `backend_addr` should be an asynchronous fixture. - # However `backend_addr` is a really common fixture and we want to be able - # to use it event in the non-trio tests (for instance in hypothesis tests). - # So we cheat by pretending `backend_addr` is a sync fixture and fallback to - # a default addr value if we are in a non-trio test. - if iscoroutine(async_fixture_backend_addr): - # We are in a non-trio test, just close the coroutine and provide - # an addr which is guaranteed to cause connection error - async_fixture_backend_addr.close() - # `use_ssl=False` is useful if this address is later modified by `correct_addr` - return BackendAddr(hostname="127.0.0.1", port=unused_tcp_port, use_ssl=False) - else: - return async_fixture_backend_addr - - -# Generating CA & Certificate are costly, so cache them once created -_ca_and_cert = None - - -def get_ca_and_cert(): - global _ca_and_cert - if _ca_and_cert is None: - _ca = trustme.CA() - _cert = _ca.issue_cert("127.0.0.1") - _ca_and_cert = (_ca, _cert) - return _ca_and_cert - - -class AsgiOfflineMiddleware: - """ - Wrap an ASGI App to be able to simulate as if it were offline - """ - - def __init__(self, asgi_app): - self.asgi_app = asgi_app - self._offline_ports = set() - self._offline_watchdogs_parking = trio.lowlevel.ParkingLot() - - async def __call__(self, scope, receive, send): - # Special case for lifespan given it corresponds to server init and not - # to an incoming client connection - if scope["type"] == "lifespan": - return await self.asgi_app(scope, receive, send) - - port = scope["server"][1] - - if port in self._offline_ports: - # In case of http, hypercorn won't let us close the connection (i.e. it will - # answer an HTTP 500 for us) without returning a valid http response :'( - # So we return a 503 which is the closest thing to offline we can do and - # rely on ad-hoc code in the client implementation - if scope["type"] == "http": - await send({"type": "http.response.start", "status": 503}) - await send({"type": "http.response.body"}) - return - - else: - - async def _offline_watchdog(cancel_scope): - while True: - await self._offline_watchdogs_parking.park() - if port in self._offline_ports: - break - cancel_scope.cancel() - - response_sent = False - async with trio.open_nursery() as nursery: - nursery.start_soon(_offline_watchdog, nursery.cancel_scope) - await self.asgi_app(scope, receive, send) - response_sent = True - nursery.cancel_scope.cancel() - if not response_sent and scope["type"] == "http": - # Must send an HTTP response, see comment in the offline part. - # (note Hypercorn accepts that we send multiple `http.response.start` - # so concurrency issue with what occured in `self.asgi_app` don't - # cause any issue) - await send({"type": "http.response.start", "status": 503}) - await send({"type": "http.response.body"}) - - @contextmanager - def offline(self, port: int): - assert port not in self._offline_ports - self._offline_ports.add(port) - self._offline_watchdogs_parking.unpark_all() - try: - yield - finally: - self._offline_ports.remove(port) - # No need to unpark given our port has just been re-authorized ! - - def __getattr__(self, val): - return getattr(self.asgi_app, val) - - -@pytest.fixture -def hyper_config(monkeypatch, fixtures_customization): - # Create a ssl certificate and overload default ssl context generation - if fixtures_customization.get("backend_over_ssl", False): - ca, cert = get_ca_and_cert() - vanilla_create_default_context = ssl.create_default_context - - def patched_create_default_context(*args, **kwargs): - ctx = vanilla_create_default_context(*args, **kwargs) - ca.configure_trust(ctx) - cert.configure_cert(ctx) # TODO: only server should load this part ? - return ctx - - monkeypatch.setattr("ssl.create_default_context", patched_create_default_context) - - use_ssl = True - else: - use_ssl = False - - # Create the empty ASGI app and serve it on TCP - hyper_config = HyperConfig.from_mapping( - { - "bind": [f"127.0.0.1:0"], - # "accesslog": logging.getLogger("hypercorn.access"), - # "errorlog": logging.getLogger("hypercorn.error"), - # "certfile": str(ssl_certfile) if ssl_certfile else None, - # "keyfile": str(ssl_keyfile) if ssl_certfile else None, - } +from parsec._parsec import BackendAddr +from parsec.asgi import AsgiApp, asgi_app_factory +from parsec.backend import Backend, backend_factory +from parsec.cli.testbed import TestbedBackend +from parsec.config import BackendConfig, BaseBlockStoreConfig, MockedEmailConfig + +SERVER_DOMAIN = "parsec.invalid" + + +@pytest.fixture +def backend_config( + tmpdir: str, db_url: str, blockstore_config: BaseBlockStoreConfig +) -> BackendConfig: + return BackendConfig( + debug=True, + db_url=db_url, + db_min_connections=1, + db_max_connections=1, + sse_keepalive=30, + forward_proto_enforce_https=None, + backend_addr=BackendAddr(hostname=SERVER_DOMAIN, port=None, use_ssl=True), + email_config=MockedEmailConfig("no-reply@parsec.com", tmpdir), + blockstore_config=blockstore_config, + administration_token="s3cr3t", + organization_spontaneous_bootstrap=False, + organization_bootstrap_webhook_url=None, ) - # Sanity checks - assert hyper_config.ssl_enabled is use_ssl - return hyper_config @pytest.fixture -async def running_backend_sockets(hyper_config, running_backend_port_known): - try: - sockets = hyper_config.create_sockets() - # Sanity checks - if hyper_config.ssl_enabled: - assert len(sockets.secure_sockets) == 1 - assert len(sockets.insecure_sockets) == 0 - sock = sockets.secure_sockets[0] - else: - assert len(sockets.secure_sockets) == 0 - assert len(sockets.insecure_sockets) == 1 - sock = sockets.insecure_sockets[0] - - sock.listen(hyper_config.backlog) - port = sock.getsockname()[1] +async def backend(backend_config: BackendConfig) -> AsyncGenerator[Backend, None]: + # pytest-asyncio use different coroutines to run the init and teardown parts + # of async generator fixtures. + # However anyio's task group are required to run there async context manager init + # and teardon from the same coroutine. + # So the solution is to start a dedicated task. - except Exception: - # The fixture is broken, we must resolve `running_backend_port_known` - # before leaving otherwise pytest-trio will hang forever given it waits - # for all fixtures to finish intialization before re-raising exceptions - running_backend_port_known._set_running_backend_port_known(9999) - raise + started = asyncio.Event() + should_stop = asyncio.Event() + backend = None - else: - running_backend_port_known._set_running_backend_port_known(port) - - return sockets - - -@pytest.fixture -def backend_asgi_app(backend): - # This fixture doesn't seem much, but it helps making sure we have only one - # instance of the ASGI server - return AsgiOfflineMiddleware(app_factory(backend)) + async def _run_backend(): + nonlocal backend + async with backend_factory(config=backend_config) as backend: + started.set() + await should_stop.wait() + task = asyncio.create_task(_run_backend()) + await started.wait() + assert isinstance(backend, Backend) -def asgi_app_handle_client_factory(asgi_app: QuartTrio) -> Callable: - asgi_config = HyperConfig() - asgi_worker_context = WorkerContext() + yield backend - async def _handle_client(stream: trio.abc.Stream) -> None: - return await TCPServer( - app=asgi_app, config=asgi_config, context=asgi_worker_context, stream=stream - ) - - return _handle_client - - -@pytest.fixture -def backend_asgi_app_handle_client(backend_asgi_app): - return asgi_app_handle_client_factory(backend_asgi_app) + should_stop.set() + await task @pytest.fixture -def asgi_server_factory(hyper_config): - @asynccontextmanager - async def asgi_server_factory(asgi_app): - async with trio.open_nursery() as nursery: - yield await nursery.start(partial(worker_serve, app=asgi_app, config=hyper_config)) - nursery.cancel_scope.cancel() - - return asgi_server_factory - - -@dataclass -class RunningBackend: - asgi_app: QuartTrio - backend: BackendApp - addr: BackendAddr - - def offline(self, port: Optional[int] = None): - return self.asgi_app.offline(port or self.addr.port) - - def correct_addr(self, addr: BackendAddr | LocalDevice) -> BackendAddr: - return correct_addr(addr, self.addr.port) - - async def connection_factory(self) -> trio.abc.Stream: - return await trio.open_tcp_stream(self.addr.hostname, self.addr.port) - - def test_client(self, use_cookies: bool = True) -> TestClientProtocol: - """Creates and returns a test client""" - return self.asgi_app.test_client(use_cookies) - - -RunningBackendFactory = Callable[..., AsyncContextManager[RunningBackend]] +def app(backend: Backend) -> AsgiApp: + return asgi_app_factory(backend) @pytest.fixture -def running_backend_factory(asyncio_loop, hyper_config) -> RunningBackendFactory: - # `asyncio_loop` is already declared by `backend_factory` (since it's only the - # backend that need an asyncio loop for postgresql stuff), however this is not - # enough here given we create the server *before* `backend_factory` is required - - @asynccontextmanager - async def _running_backend_factory( - backend: BackendApp, sockets: Optional[Sockets] = None - ) -> AsyncContextManager[RunningBackend]: - if not sockets: - sockets = hyper_config.create_sockets() - for sock in sockets.secure_sockets: - sock.listen(hyper_config.backlog) - for sock in sockets.insecure_sockets: - sock.listen(hyper_config.backlog) - - asgi_app = AsgiOfflineMiddleware(app_factory(backend)) - async with trio.open_nursery() as nursery: - binds = await nursery.start( - partial(worker_serve, app=asgi_app, config=hyper_config, sockets=sockets) - ) - port = int(binds[0].rsplit(":", 1)[1]) - - yield RunningBackend( - asgi_app=asgi_app, - backend=backend, - addr=BackendAddr(hostname="127.0.0.1", port=port, use_ssl=hyper_config.ssl_enabled), - ) - - # TODO: remove me once https://github.com/pgjones/hypercorn/issues/106 is fixed - # tl;dr: Hypercorn doesn't call `socket.shutdown()` when cancelled which makes - # the socket wait for the incoming packets to be processed before closing it.. - # which leads to a deadlock given the server supposed to do that has been cancelled ! - for s in chain(sockets.insecure_sockets, sockets.secure_sockets): - try: - s.shutdown(socket.SHUT_RDWR) # I'm done asking nicely - except OSError: - pass - nursery.cancel_scope.cancel() - - return _running_backend_factory - - -@pytest.fixture -async def running_backend( - running_backend_sockets, running_backend_ready, running_backend_factory, backend -): - async with running_backend_factory(backend, sockets=running_backend_sockets) as rb: - running_backend_ready._set_running_backend_ready() - yield rb - - -@pytest.fixture -async def alice_new_device_invitation(backend, alice): - invitation = await backend.invite.new_for_device( - organization_id=alice.organization_id, greeter_user_id=alice.user_id - ) - return BackendInvitationAddr.build( - backend_addr=alice.organization_addr.get_backend_addr(), - organization_id=alice.organization_id, - invitation_type=InvitationType.DEVICE, - token=invitation.token, - ) - - -@pytest.fixture -async def zack_new_user_invitation(backend, alice): - invitation = await backend.invite.new_for_user( - organization_id=alice.organization_id, - greeter_user_id=alice.user_id, - claimer_email="zack@example.com", - ) - return BackendInvitationAddr.build( - backend_addr=alice.organization_addr.get_backend_addr(), - organization_id=alice.organization_id, - invitation_type=InvitationType.USER, - token=invitation.token, - ) +def testbed(backend: Backend) -> TestbedBackend: + return TestbedBackend(backend) diff --git a/server/tests/common/binder.py b/server/tests/common/binder.py deleted file mode 100644 index ed7239b1766..00000000000 --- a/server/tests/common/binder.py +++ /dev/null @@ -1,517 +0,0 @@ -# Parsec Cloud (https://parsec.cloud) Copyright (c) BUSL-1.1 2016-present Scille SAS -from __future__ import annotations - -from dataclasses import dataclass -from functools import partial - -import pytest - -from parsec._parsec import ( - BackendEventRealmRolesUpdated, - BackendEventRealmVlobsUpdated, - BackendOrganizationAddr, - BackendOrganizationBootstrapAddr, - DateTime, - DeviceID, - DeviceLabel, - HumanHandle, - OrganizationID, - PrivateKey, - PublicKey, - RealmRole, - SecretKey, - SigningKey, - UserID, - UserProfile, - VerifyKey, - VlobID, -) -from parsec.api.data import ( - DeviceCertificate, - RealmRoleCertificate, - RevokedUserCertificate, - UserCertificate, - UserManifest, -) -from parsec.backend.organization import SequesterAuthority -from parsec.backend.realm import RealmGrantedRole -from parsec.backend.user import Device as BackendDevice -from parsec.backend.user import User as BackendUser -from parsec.backend.vlob import VlobSequesterServiceInconsistencyError -from tests.common.sequester import SequesterAuthorityFullData - - -@dataclass -class LocalDevice: - organization_addr: BackendOrganizationAddr - device_id: DeviceID - device_label: DeviceLabel | None - human_handle: HumanHandle | None - signing_key: SigningKey - private_key: PrivateKey - profile: UserProfile - user_realm_id: VlobID - user_realm_key: SecretKey - local_symkey: SecretKey - - @property - def organization_id(self) -> OrganizationID: - return self.organization_addr.organization_id - - @property - def root_verify_key(self) -> VerifyKey: - return self.organization_addr.root_verify_key - - @property - def user_id(self) -> UserID: - return self.device_id.user_id - - @property - def public_key(self) -> PublicKey: - return self.private_key.public_key - - @property - def verify_key(self) -> VerifyKey: - return self.signing_key.verify_key - - def timestamp(self) -> DateTime: - return DateTime.now() - - @classmethod - def generate_new_device( - cls, - organization_addr: BackendOrganizationAddr, - profile: UserProfile, - device_id: DeviceID | None = None, - human_handle: HumanHandle | None = None, - device_label: DeviceLabel | None = None, - signing_key: SigningKey | None = None, - private_key: PrivateKey | None = None, - ) -> LocalDevice: - return cls( - organization_addr=organization_addr, - device_id=device_id or DeviceID.new(), - device_label=device_label, - human_handle=human_handle, - signing_key=signing_key or SigningKey.generate(), - private_key=private_key or PrivateKey.generate(), - profile=profile, - user_realm_id=VlobID.new(), - user_realm_key=SecretKey.generate(), - local_symkey=SecretKey.generate(), - ) - - -@dataclass -class OrganizationFullData: - bootstrap_addr: BackendOrganizationBootstrapAddr - addr: BackendOrganizationAddr - root_signing_key: SigningKey - sequester_authority: SequesterAuthorityFullData | None - - @property - def bootstrap_token(self): - return self.bootstrap_addr.token - - @property - def root_verify_key(self): - return self.root_signing_key.verify_key - - @property - def organization_id(self): - return self.addr.organization_id - - -class InitialUserManifestState: - def __init__(self): - self._v1: dict[tuple[OrganizationID, UserID], UserManifest] = {} - - def _generate_or_retrieve_user_manifest_v1(self, device: LocalDevice) -> UserManifest: - try: - return self._v1[(device.organization_id, device.user_id)] - - except KeyError: - timestamp = device.timestamp() - remote_user_manifest = UserManifest( - author=device.device_id, - timestamp=timestamp, - id=device.user_realm_id, - version=1, - created=timestamp, - updated=timestamp, - last_processed_message=0, - workspaces=[], - ) - self._v1[(device.organization_id, device.user_id)] = remote_user_manifest - return self._v1[(device.organization_id, device.user_id)] - - def force_user_manifest_v1_generation(self, device): - self._generate_or_retrieve_user_manifest_v1(device) - - def get_user_manifest_v1_for_backend(self, device: LocalDevice) -> UserManifest: - return self._generate_or_retrieve_user_manifest_v1(device) - - -@pytest.fixture -def initial_user_manifest_state() -> InitialUserManifestState: - # User manifest is stored in backend vlob and in devices's local db. - # Hence this fixture allow us to centralize the first version of this user - # manifest. - # In most tests we want to be in a state were backend and devices all - # store the same user manifest (named the "v1" here). - # But sometime we want a completely fresh start ("v1" doesn't exist, - # hence devices and backend are empty) or only a single device to begin - # with no knowledge of the "v1". - return InitialUserManifestState() - - -def local_device_to_backend_user( - device: LocalDevice, - certifier: LocalDevice | OrganizationFullData, - timestamp: DateTime | None = None, -) -> tuple[BackendUser, BackendDevice]: - if isinstance(certifier, OrganizationFullData): - certifier_id = None - certifier_signing_key = certifier.root_signing_key - else: - certifier_id = certifier.device_id - certifier_signing_key = certifier.signing_key - - timestamp = timestamp or device.timestamp() - - user_certificate = UserCertificate( - author=certifier_id, - timestamp=timestamp, - user_id=device.user_id, - public_key=device.public_key, - profile=device.profile, - human_handle=device.human_handle, - ) - device_certificate = DeviceCertificate( - author=certifier_id, - timestamp=timestamp, - device_id=device.device_id, - device_label=device.device_label, - verify_key=device.verify_key, - ) - redacted_user_certificate = user_certificate.evolve(human_handle=None) - redacted_device_certificate = device_certificate.evolve(device_label=None) - - user = BackendUser( - user_id=device.user_id, - human_handle=device.human_handle, - initial_profile=device.profile, - user_certificate=user_certificate.dump_and_sign(certifier_signing_key), - redacted_user_certificate=redacted_user_certificate.dump_and_sign(certifier_signing_key), - user_certifier=certifier_id, - created_on=timestamp, - ) - - first_device = BackendDevice( - device_id=device.device_id, - device_label=device.device_label, - device_certificate=device_certificate.dump_and_sign(certifier_signing_key), - redacted_device_certificate=redacted_device_certificate.dump_and_sign( - certifier_signing_key - ), - device_certifier=certifier_id, - created_on=timestamp, - ) - - return user, first_device - - -class CertificatesStore: - def __init__(self): - self._user_certificates = {} - self._device_certificates = {} - self._revoked_user_certificates = {} - - def store_user(self, organization_id, user_id, certif, redacted_certif): - key = (organization_id, user_id) - assert key not in self._user_certificates - self._user_certificates[key] = (certif, redacted_certif) - - def store_device(self, organization_id, device_id, certif, redacted_certif): - key = (organization_id, device_id) - assert key not in self._device_certificates - self._device_certificates[key] = (certif, redacted_certif) - - def store_revoked_user(self, organization_id, user_id, certif): - key = (organization_id, user_id) - assert key not in self._revoked_user_certificates - self._revoked_user_certificates[key] = certif - - def get_user(self, local_user, redacted=False): - key = (local_user.organization_id, local_user.user_id) - certif, redacted_certif = self._user_certificates[key] - return redacted_certif if redacted else certif - - def get_device(self, local_device, redacted=False): - key = (local_device.organization_id, local_device.device_id) - certif, redacted_certif = self._device_certificates[key] - return redacted_certif if redacted else certif - - def get_revoked_user(self, local_user): - key = (local_user.organization_id, local_user.user_id) - return self._revoked_user_certificates.get(key) - - def translate_certif(self, needle): - for (_, user_id), (certif, redacted_certif) in self._user_certificates.items(): - if needle == certif: - return f"<{user_id.str} user certif>" - if needle == redacted_certif: - return f"<{user_id.str} redacted user certif>" - - for (_, device_id), (certif, redacted_certif) in self._device_certificates.items(): - if needle == certif: - return f"<{device_id.str} device certif>" - if needle == redacted_certif: - return f"<{device_id.str} redacted device certif>" - - for (_, user_id), certif in self._revoked_user_certificates.items(): - if needle == certif: - return f"<{user_id.str} revoked user certif>" - - raise RuntimeError("Unknown certificate !") - - def translate_certifs(self, certifs): - return sorted(self.translate_certif(certif) for certif in certifs) - - -@pytest.fixture -def certificates_store(backend_data_binder_factory, backend): - binder = backend_data_binder_factory(backend) - return binder.certificates_store - - -@pytest.fixture -def backend_data_binder_factory(initial_user_manifest_state): - class BackendDataBinder: - def __init__(self, backend): - self.backend = backend - self.bound_local_devices = [] - self.certificates_store = CertificatesStore() - - def get_device(self, organization_id, device_id): - for d in self.bound_local_devices: - if d.organization_id == organization_id and d.device_id == device_id: - return d - else: - raise ValueError((organization_id, device_id)) - - async def _create_realm_and_first_vlob(self, device): - manifest = initial_user_manifest_state.get_user_manifest_v1_for_backend(device) - if manifest.author == device.device_id: - author = device - else: - author = self.get_device(device.organization_id, manifest.author) - realm_id = author.user_realm_id - vlob_id = author.user_realm_id - - with self.backend.event_bus.listen() as spy: - # The realm needs to be created strictly before the manifest timestamp - realm_create_timestamp = manifest.timestamp.subtract(microseconds=1) - - await self.backend.realm.create( - organization_id=author.organization_id, - self_granted_role=RealmGrantedRole( - realm_id=realm_id, - user_id=author.user_id, - certificate=RealmRoleCertificate( - author=author.device_id, - timestamp=realm_create_timestamp, - realm_id=realm_id, - user_id=author.user_id, - role=RealmRole.OWNER, - ).dump_and_sign(author.signing_key), - role=RealmRole.OWNER, - granted_by=author.device_id, - granted_on=realm_create_timestamp, - ), - ) - vlob_create_fn = partial( - self.backend.vlob.create, - organization_id=author.organization_id, - author=author.device_id, - realm_id=realm_id, - encryption_revision=1, - vlob_id=vlob_id, - timestamp=manifest.timestamp, - blob=manifest.dump_sign_and_encrypt( - author_signkey=author.signing_key, key=author.user_realm_key - ), - ) - try: - await vlob_create_fn(sequester_blob=None) - except VlobSequesterServiceInconsistencyError: - # This won't work if some sequester services are defined, - # but it works fine enough for the moment :) - await vlob_create_fn(sequester_blob={}) - - # Avoid possible race condition in tests listening for events - await spy.wait_multiple( - [ - ( - BackendEventRealmRolesUpdated( - organization_id=author.organization_id, - author=author.device_id, - realm_id=realm_id, - user=author.user_id, - role=RealmRole.OWNER, - ) - ), - ( - BackendEventRealmVlobsUpdated( - organization_id=author.organization_id, - author=author.device_id, - realm_id=realm_id, - checkpoint=1, - src_id=vlob_id, - src_version=1, - ) - ), - ] - ) - - async def bind_organization( - self, - org: OrganizationFullData, - first_device: LocalDevice, - initial_user_manifest: str = "v1", - timestamp: DateTime | None = None, - create_needed: bool = True, - ): - assert initial_user_manifest in ("v1", "not_synced") - - if create_needed: - await self.backend.organization.create( - id=org.organization_id, - bootstrap_token=org.bootstrap_token, - created_on=timestamp or first_device.timestamp(), - ) - assert org.organization_id == first_device.organization_id - backend_user, backend_first_device = local_device_to_backend_user( - first_device, org, timestamp - ) - if org.sequester_authority: - sequester_authority = SequesterAuthority( - certificate=org.sequester_authority.certif, - verify_key_der=org.sequester_authority.certif_data.verify_key_der, - ) - else: - sequester_authority = None - await self.backend.organization.bootstrap( - id=org.organization_id, - user=backend_user, - first_device=backend_first_device, - bootstrap_token=org.bootstrap_token, - root_verify_key=org.root_verify_key, - sequester_authority=sequester_authority, - ) - self.certificates_store.store_user( - org.organization_id, - backend_user.user_id, - backend_user.user_certificate, - backend_user.redacted_user_certificate, - ) - self.certificates_store.store_device( - org.organization_id, - backend_first_device.device_id, - backend_first_device.device_certificate, - backend_first_device.redacted_device_certificate, - ) - self.bound_local_devices.append(first_device) - - if initial_user_manifest == "v1": - await self._create_realm_and_first_vlob(first_device) - - async def bind_device( - self, - device: LocalDevice, - certifier: LocalDevice | None = None, - initial_user_manifest: str | None = None, - timestamp: DateTime | None = None, - ): - assert initial_user_manifest in (None, "v1", "not_synced") - - if not certifier: - try: - certifier = next( - d - for d in self.bound_local_devices - if d.organization_id == device.organization_id - ) - except StopIteration: - raise RuntimeError( - f"Organization `{device.organization_id.str}` not bootstrapped" - ) - - backend_user, backend_device = local_device_to_backend_user( - device, certifier, timestamp - ) - - if any(d for d in self.bound_local_devices if d.user_id == device.user_id): - # User already created, only add device - - # For clarity, user manifest state in backend should be only specified - # when creating the user - assert initial_user_manifest is None - - await self.backend.user.create_device(device.organization_id, backend_device) - self.certificates_store.store_device( - device.organization_id, - backend_device.device_id, - backend_device.device_certificate, - backend_device.redacted_device_certificate, - ) - - else: - # Add device and user - await self.backend.user.create_user( - device.organization_id, backend_user, backend_device - ) - self.certificates_store.store_user( - device.organization_id, - backend_user.user_id, - backend_user.user_certificate, - backend_user.redacted_user_certificate, - ) - self.certificates_store.store_device( - device.organization_id, - backend_device.device_id, - backend_device.device_certificate, - backend_device.redacted_device_certificate, - ) - # By default we create user manifest v1 in backend - if initial_user_manifest in (None, "v1"): - await self._create_realm_and_first_vlob(device) - - self.bound_local_devices.append(device) - - async def bind_revocation(self, user_id: UserID, certifier: LocalDevice): - timestamp = certifier.timestamp() - revoked_user_certificate = RevokedUserCertificate( - author=certifier.device_id, timestamp=timestamp, user_id=user_id - ).dump_and_sign(certifier.signing_key) - await self.backend.user.revoke_user( - certifier.organization_id, user_id, revoked_user_certificate, certifier.device_id - ) - self.certificates_store.store_revoked_user( - certifier.organization_id, user_id, revoked_user_certificate - ) - - # Binder must be unique per backend - - binders = [] - - def _backend_data_binder_factory(backend): - for binder, candidate_backend in binders: - if candidate_backend is backend: - return binder - - binder = BackendDataBinder(backend) - binders.append((binder, backend)) - return binder - - return _backend_data_binder_factory diff --git a/server/tests/common/event_bus_spy.py b/server/tests/common/event_bus_spy.py index 402f406e1aa..0071f6ff84b 100644 --- a/server/tests/common/event_bus_spy.py +++ b/server/tests/common/event_bus_spy.py @@ -5,13 +5,12 @@ from typing import Type from unittest.mock import ANY +import anyio import attr import pytest -import trio from parsec._parsec import BackendEvent, DateTime from parsec.event_bus import EventBus, MetaEvent -from tests.common import real_clock_timeout class PartialDict(dict): @@ -126,10 +125,6 @@ def _on_event_cb(self, event, **kwargs): def clear(self): self.events.clear() - async def wait_with_timeout(self, event, kwargs=ANY, dt=ANY, update_event_func=None): - async with real_clock_timeout(): - await self.wait(event, kwargs, dt, update_event_func) - async def wait(self, event, kwargs=ANY, dt=ANY, update_event_func=None): if isinstance(event, BackendEvent): assert kwargs is ANY # Ignored value @@ -157,10 +152,6 @@ def _waiter(cooked_event): self._waiters.add(_waiter) return await receive_channel.receive() - async def wait_multiple_with_timeout(self, events, in_order=True): - async with real_clock_timeout(): - await self.wait_multiple(events, in_order=in_order) - async def wait_multiple(self, events, in_order=True): expected_events = self._cook_events_params(events) try: @@ -169,7 +160,7 @@ async def wait_multiple(self, events, in_order=True): except AssertionError: pass - done = trio.Event() + done = anyio.Event() def _waiter(cooked_event): try: diff --git a/server/tests/common/fixtures_customisation.py b/server/tests/common/fixtures_customisation.py deleted file mode 100644 index ecccebd65e4..00000000000 --- a/server/tests/common/fixtures_customisation.py +++ /dev/null @@ -1,70 +0,0 @@ -# Parsec Cloud (https://parsec.cloud) Copyright (c) BUSL-1.1 2016-present Scille SAS -from __future__ import annotations - -from typing import Any, Callable, TypeVar - -import pytest - -_FIXTURES_CUSTOMIZATIONS = { - "coolorg_is_sequestered_organization", - "alice_profile", - "alice_initial_local_user_manifest", - "alice2_initial_local_user_manifest", - "alice_initial_remote_user_manifest", - "alice_has_human_handle", - "alice_has_device_label", - "bob_profile", - "bob_initial_local_user_manifest", - "bob_initial_remote_user_manifest", - "bob_has_human_handle", - "bob_has_device_label", - "adam_profile", - "adam_initial_local_user_manifest", - "adam_initial_remote_user_manifest", - "adam_has_human_handle", - "adam_has_device_label", - "adam_is_revoked", - "mallory_profile", - "mallory_initial_local_user_manifest", - "mallory_has_human_handle", - "mallory_has_device_label", - "backend_not_populated", - "backend_has_webhook", - "backend_force_mocked", - "backend_over_ssl", - "backend_forward_proto_enforce_https", - "backend_spontaneous_organization_bootstrap", - "blockstore_mode", -} - -F = TypeVar("F") - - -def customize_fixtures(**customizations: Any) -> Callable[[F], F]: - """ - Should be used as a decorator on tests to provide custom settings to fixtures. - """ - assert not customizations.keys() - _FIXTURES_CUSTOMIZATIONS - - def wrapper(fn: F) -> F: - try: - getattr(fn, "_fixtures_customization").update(customizations) - except AttributeError: - setattr(fn, "_fixtures_customization", customizations) - return fn - - return wrapper - - -@pytest.fixture -def fixtures_customization(request) -> dict[str, Any]: - try: - return request.node.function._fixtures_customization - except AttributeError: - pass - try: - # In the case of reruns, the original function can be found like so: - function = getattr(request.node.module, request.node.originalname) - return function._fixtures_customization - except AttributeError: - return {} diff --git a/server/tests/common/freeze_time.py b/server/tests/common/freeze_time.py deleted file mode 100644 index 3f4cc3eaa68..00000000000 --- a/server/tests/common/freeze_time.py +++ /dev/null @@ -1,54 +0,0 @@ -# Parsec Cloud (https://parsec.cloud) Copyright (c) BUSL-1.1 2016-present Scille SAS -from __future__ import annotations - -from contextlib import contextmanager -from typing import Iterator, Optional - -import trio - -from parsec._parsec import DateTime, mock_time -from parsec.api.protocol import DeviceID - -FreezeContext = dict[DeviceID, tuple[Optional[trio.lowlevel.Task], Optional[DateTime]]] -__freeze_time_dict: FreezeContext = {} - - -# Global ID that is used to save previous task & time when freezing datetime. -__global_freeze_time_id = DeviceID.new() - - -@contextmanager -def freeze_time( - time: DateTime | str | None = None, -) -> Iterator[DateTime]: - # Get current time if not provided - if time is None: - time = DateTime.now() - elif isinstance(time, str): - y, m, d = map(int, time.split("-")) - time = DateTime(y, m, d) - - # Get current trio task - try: - current_task = trio.lowlevel.current_task() - except RuntimeError: - current_task = None - - # Save previous context - global __global_freeze_time_id - global __freeze_time_dict - previous_task, previous_time = __freeze_time_dict.get(__global_freeze_time_id, (None, None)) - - # Ensure time has not been frozen from another coroutine - assert previous_task in (None, current_task) - - try: - # Set freeze datetime - __freeze_time_dict[__global_freeze_time_id] = (current_task, time) - mock_time(time) - - yield time - finally: - # Restore previous context - __freeze_time_dict[__global_freeze_time_id] = (previous_task, previous_time) - mock_time(previous_time) diff --git a/server/tests/common/helpers.py b/server/tests/common/helpers.py deleted file mode 100644 index 3b7be193b83..00000000000 --- a/server/tests/common/helpers.py +++ /dev/null @@ -1,133 +0,0 @@ -# Parsec Cloud (https://parsec.cloud) Copyright (c) BUSL-1.1 2016-present Scille SAS - -from __future__ import annotations - -from inspect import iscoroutinefunction -from unittest.mock import Mock - -import attr -import pytest -import trio - -from parsec._parsec import DateTime -from parsec.api.transport import Transport, TransportError - - -class AsyncMock(Mock): - @property - def is_async(self): - return self.__dict__.get("is_async", False) - - @is_async.setter - def is_async(self, val): - self.__dict__["is_async"] = val - - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - self.__dict__["is_async"] = False - spec = kwargs.get("spec") or kwargs.get("spec_set") - if spec: - if callable(spec): - self.is_async = True - for field in dir(spec): - if iscoroutinefunction(getattr(spec, field)): - getattr(self, field).is_async = True - - async def __async_call(self, *args, **kwargs): - return super().__call__(*args, **kwargs) - - def __call__(self, *args, **kwargs): - if getattr(self, "is_async", False) is True: - if iscoroutinefunction(self.side_effect): - return self.side_effect(*args, **kwargs) - - else: - return self.__async_call(*args, **kwargs) - - else: - return super().__call__(*args, **kwargs) - - async def __aenter__(self, *args, **kwargs): - return self - - async def __aexit__(self, *args, **kwargs): - return True - - -class FreezeTestOnTransportError(Transport): - """ - When a server crashes during test, it is possible the client coroutine - receives a `TransportError` exception. Hence we end up with two - exceptions: the server crash (i.e. the original exception we are interested - into) and the client not receiving an answer. - The solution is simply to freeze the coroutine receiving the broken stream - error until it will be cancelled by the original exception bubbling up. - """ - - def __init__(self, transport): - self.transport = transport - - @property - def stream(self): - return self.transport.stream - - async def send(self, msg): - try: - return await self.transport.send(msg) - - except TransportError: - # Wait here until this coroutine is cancelled - await trio.sleep_forever() - - async def recv(self): - try: - return await self.transport.recv() - - except TransportError: - # Wait here until this coroutine is cancelled - await trio.sleep_forever() - - -@attr.s -class CallController: - need_stop = attr.ib(factory=trio.Event) - stopped = attr.ib(factory=trio.Event) - - async def stop(self): - self.need_stop.set() - await self.stopped.wait() - - -async def call_with_control(controlled_fn, *, task_status=trio.TASK_STATUS_IGNORED): - controller = CallController() - - async def _started_cb(**kwargs): - controller.__dict__.update(kwargs) - task_status.started(controller) - await controller.need_stop.wait() - - try: - await controlled_fn(_started_cb) - - finally: - controller.stopped.set() - - -@pytest.fixture -def next_timestamp(): - """On windows, 2 calls to `DateTime.now()` can yield the same value. - For some tests, this creates edges cases we want to avoid. - """ - last_timestamp = None - - def _next_timestamp(): - nonlocal last_timestamp - current_timestamp = DateTime.now() - for _ in range(100): - if current_timestamp != last_timestamp: - last_timestamp = current_timestamp - return last_timestamp - else: - raise RuntimeError("Is DateTime.now() frozen ?") - - return _next_timestamp diff --git a/server/tests/common/oracles.py b/server/tests/common/oracles.py deleted file mode 100644 index b352e4ecae4..00000000000 --- a/server/tests/common/oracles.py +++ /dev/null @@ -1,363 +0,0 @@ -# Parsec Cloud (https://parsec.cloud) Copyright (c) BUSL-1.1 2016-present Scille SAS -from __future__ import annotations - -import shutil -import sys -from pathlib import Path - -import pytest - - -@pytest.fixture -def oracle_fs_factory(tmpdir): - class OracleFS: - def __init__(self, base_path): - self.base = Path(base_path) - self.base.mkdir(parents=True) - self.root = self.base / "root" - self.root.mkdir() - # Root oracle can no longer be removed this way - self.base.chmod(0o500) - if sys.platform == "win32": - self.root.chmod(0o500) - self.entries_stats = {} - self._register_stat(self.root, "root") - - def _is_workspace(self, path): - return len(path.relative_to(self.root).parts) == 1 - - def _cook_path(self, path): - assert path[0] == "/" - return self.root / path[1:] - - def _register_stat(self, path, type): - self.entries_stats[path] = { - "type": type, - "base_version": 0, - "is_placeholder": True, - "need_sync": True, - } - - def dump(self): - content = [] - - def _recursive_dump(path): - for child in path.iterdir(): - entry_name = self._relative_path(child) - if child.is_dir(): - _recursive_dump(child) - entry_name += "/" - stat = self.entries_stats[child] - content.append((entry_name, stat["base_version"], stat["need_sync"])) - - _recursive_dump(self.root) - return sorted(content) - - def copy(self): - new_oracle = _oracle_fs_factory() - for path, stat in self.entries_stats.items(): - new_path = new_oracle.root / path.relative_to(self.root) - new_oracle.entries_stats[new_path] = stat.copy() - # copytree requires the target folder doesn't exist - new_oracle.base.chmod(0o700) - new_oracle.root.rmdir() - shutil.copytree(self.root, new_oracle.root) - new_oracle.base.chmod(0o500) - return new_oracle - - def create_file(self, path): - path = self._cook_path(path) - if self._is_workspace(path): - return "invalid_path" - try: - path.touch(exist_ok=False) - except OSError: - return "invalid_path" - self._register_stat(path, "file") - self.entries_stats[path.parent]["need_sync"] = True - return "ok" - - def create_folder(self, path): - return self._create_folder(path) - - def _create_folder(self, path, workspace=False): - path = self._cook_path(path) - - if workspace: - if not self._is_workspace(path): - return "invalid_path" - else: - if self._is_workspace(path): - return "invalid_path" - - try: - path.mkdir(exist_ok=False) - except OSError: - return "invalid_path" - self._register_stat(path, "folder") - self.entries_stats[path.parent]["need_sync"] = True - return "ok" - - def create_workspace(self, path): - return self._create_folder(path, workspace=True) - - def unlink(self, path): - path = self._cook_path(path) - if self._is_workspace(path): - return "invalid_path" - try: - path.unlink() - except OSError: - return "invalid_path" - del self.entries_stats[path] - self.entries_stats[path.parent]["need_sync"] = True - return "ok" - - def rmdir(self, path): - path = self._cook_path(path) - if self._is_workspace(path): - return "invalid_path" - try: - path.rmdir() - except OSError: - return "invalid_path" - self._delete_stats(path) - self.entries_stats[path.parent]["need_sync"] = True - return "ok" - - def delete(self, path): - cooked_path = self._cook_path(path) - if self._is_workspace(cooked_path): - return "invalid_path" - - if cooked_path.is_file(): - return self.unlink(path) - else: - return self.rmdir(path) - - def _delete_stats(self, old_path): - new_stats = {} - for candidate_path, candidate_stat in self.entries_stats.items(): - try: - candidate_path.relative_to(old_path) - except ValueError: - # Candidate is not a child of old_path - new_stats[candidate_path] = candidate_stat - self.entries_stats = new_stats - - def rename_workspace(self, src, dst): - src = self._cook_path(src) - dst = self._cook_path(dst) - - if not self._is_workspace(src) or not self._is_workspace(dst): - return "invalid_path" - - if dst.exists(): - return "invalid_path" - - try: - src.rename(str(dst)) - except OSError: - return "invalid_path" - - if src != dst: - # Rename all the affected entries - for child_src, entry in self.entries_stats.copy().items(): - # Note `child_src` will also contain `src` itself here - try: - relative = child_src.relative_to(src) - except ValueError: - continue - child_dst = dst / relative - self.entries_stats[child_dst] = self.entries_stats.pop(child_src) - - # Remember dst.parent == src.parent == '/' - self.entries_stats[dst.parent]["need_sync"] = True - - return "ok" - - def move(self, src, dst): - # TODO: This method should be called rename - src = self._cook_path(src) - dst = self._cook_path(dst) - - if self._is_workspace(src) or self._is_workspace(dst): - return "invalid_path" - - if src.parent != dst.parent: - return "invalid_path" - - try: - src.rename(str(dst)) - except OSError: - return "invalid_path" - - if src != dst: - # Rename source and all entries within the source - for child_src, entry in self.entries_stats.copy().items(): - try: - relative = child_src.relative_to(src) - except ValueError: - continue - child_dst = dst / relative - entry = self.entries_stats.pop(child_src) - self.entries_stats[child_dst] = entry - - # The parent is the only modified entry - self.entries_stats[src.parent]["need_sync"] = True - - return "ok" - - def sync(self, sync_cb=lambda path, stat: None): - self._recursive_sync(self.root, sync_cb) - return "ok" - - def _relative_path(self, path): - path = str(path.relative_to(self.root)) - return "/" if path == "." else f"/{path}" - - def _recursive_sync(self, path, sync_cb): - stat = self.entries_stats[path] - if stat["need_sync"]: - stat["need_sync"] = False - stat["is_placeholder"] = False - stat["base_version"] += 1 - sync_cb(self._relative_path(path), stat) - - if path.is_dir(): - for child in path.iterdir(): - self._recursive_sync(child, sync_cb) - - def stat(self, path): - path = self._cook_path(path) - if path.exists(): - return {"status": "ok", **self.entries_stats[path]} - else: - return {"status": "invalid_path"} - - count = 0 - - def _oracle_fs_factory(): - nonlocal count - count += 1 - return OracleFS(Path(tmpdir / f"oracle_fs-{count}")) - - return _oracle_fs_factory - - -@pytest.fixture -def oracle_fs_with_sync_factory(oracle_fs_factory): - class OracleFSWithSync: - def __init__(self): - self.fs = oracle_fs_factory() - self.fs.sync() - self.synced_fs = oracle_fs_factory() - self.synced_fs.sync() - - def create_file(self, path): - return self.fs.create_file(path) - - def create_folder(self, path): - return self.fs.create_folder(path) - - def create_workspace(self, path): - return self.fs.create_workspace(path) - - def delete(self, path): - return self.fs.delete(path) - - def rmdir(self, path): - return self.fs.rmdir(path) - - def unlink(self, path): - return self.fs.unlink(path) - - def move(self, src, dst): - return self.fs.move(src, dst) - - def rename_workspace(self, src, dst): - return self.fs.rename_workspace(src, dst) - - def flush(self, path): - return self.fs.flush(path) - - def sync(self): - synced_items = [] - - def sync_cb(path, stat): - synced_items.append((path, stat["base_version"], stat["type"])) - - res = self.fs.sync(sync_cb=sync_cb) - if res == "ok": - new_synced = self.fs.copy() - - def _recursive_keep_synced(path): - stat = new_synced.entries_stats[path] - if stat["type"] in ["folder", "workspace"]: - for child in path.iterdir(): - _recursive_keep_synced(child) - stat["need_sync"] = False - if stat["is_placeholder"]: - del new_synced.entries_stats[path] - if stat["type"] == "file": - path.unlink() - else: - path.rmdir() - - _recursive_keep_synced(new_synced.root) - self.synced_fs = new_synced - return res - - def stat(self, path): - return self.fs.stat(path) - - def reset(self): - self.fs = self.synced_fs.copy() - - def _oracle_fs_with_sync_factory(): - return OracleFSWithSync() - - return _oracle_fs_with_sync_factory - - -class FileOracle: - def __init__(self, base_version=0): - self._buffer = bytearray() - self._synced_buffer = bytearray() - self.base_version = base_version - self.need_sync = base_version == 0 - - @property - def size(self): - return len(self._buffer) - - def read(self, size, offset): - return self._buffer[offset : size + offset] - - def write(self, offset, content): - if not content: - return - - if offset > len(self._buffer): - self.truncate(offset + len(content)) - self._buffer[offset : len(content) + offset] = content - self.need_sync = True - - def truncate(self, length): - if length == len(self._buffer): - return - new_buffer = bytearray(length) - truncate_length = min(length, len(self._buffer)) - new_buffer[:truncate_length] = self._buffer[:truncate_length] - self._buffer = new_buffer - self.need_sync = True - - def sync(self): - self._synced_buffer = self._buffer.copy() - if self.need_sync: - self.base_version += 1 - self.need_sync = False - - def reset(self): - self._buffer = self._synced_buffer.copy() - self.need_sync = False diff --git a/server/tests/common/population.py b/server/tests/common/population.py deleted file mode 100644 index fb6116deddd..00000000000 --- a/server/tests/common/population.py +++ /dev/null @@ -1,287 +0,0 @@ -# Parsec Cloud (https://parsec.cloud) Copyright (c) BUSL-1.1 2016-present Scille SAS -from __future__ import annotations - -import re -from collections import defaultdict -from contextlib import asynccontextmanager -from typing import Optional, Union - -import pytest - -from parsec._parsec import BackendOrganizationBootstrapAddr, SigningKey -from parsec.api.protocol import DeviceID, DeviceLabel, HumanHandle, OrganizationID, UserProfile -from tests.common.binder import LocalDevice, OrganizationFullData -from tests.common.freeze_time import freeze_time -from tests.common.sequester import sequester_authority_factory - - -@pytest.fixture -def organization_factory(backend_addr): - organizations = set() - count = 0 - - def _organization_factory(orgname=None, sequestered: bool = False): - nonlocal count - - if not orgname: - count += 1 - orgname = f"Org{count}" - - organization_id = OrganizationID(orgname) - assert organization_id not in organizations - organizations.add(organization_id) - bootstrap_token = f"<{orgname}-bootstrap-token>" - bootstrap_addr = BackendOrganizationBootstrapAddr.build( - backend_addr, organization_id=organization_id, token=bootstrap_token - ) - root_signing_key = SigningKey.generate() - addr = bootstrap_addr.generate_organization_addr(root_signing_key.verify_key) - - if sequestered: - sequester_authority = sequester_authority_factory( - organization_root_signing_key=root_signing_key - ) - else: - sequester_authority = None - - return OrganizationFullData( - bootstrap_addr=bootstrap_addr, - addr=addr, - root_signing_key=root_signing_key, - sequester_authority=sequester_authority, - ) - - return _organization_factory - - -@pytest.fixture -def local_device_factory(coolorg): - devices = defaultdict(list) - count = 0 - - def _local_device_factory( - base_device_id: Optional[Union[str, DeviceID]] = None, - org: OrganizationFullData = coolorg, - profile: Optional[UserProfile] = None, - has_human_handle: bool = True, - base_human_handle: Optional[Union[str, HumanHandle]] = None, - has_device_label: bool = True, - base_device_label: Optional[Union[str, DeviceLabel]] = None, - ): - nonlocal count - - if not base_device_id: - count += 1 - base_device_id = f"user{count}@dev0" - - org_devices = devices[org.organization_id] - if isinstance(base_device_id, DeviceID): - device_id = base_device_id - else: - device_id = DeviceID(base_device_id) - assert not any(d for d in org_devices if d.device_id == device_id) - - if not has_device_label: - assert base_device_label is None - device_label = None - elif not base_device_label: - device_label = DeviceLabel(f"My {device_id.device_name.str} machine") - elif isinstance(base_device_label, DeviceLabel): - device_label = base_device_label - else: - device_label = DeviceLabel(base_device_label) - - if not has_human_handle: - assert base_human_handle is None - human_handle = None - elif not base_human_handle: - name = device_id.user_id.str.capitalize() - human_handle = HumanHandle( - email=f"{device_id.user_id.str}@example.com", label=f"{name}y Mc{name}Face" - ) - elif isinstance(base_human_handle, HumanHandle): - human_handle = base_human_handle - else: - match = re.match(r"(.*) <(.*)>", base_human_handle) - if match: - label, email = match.groups() - else: - label = base_human_handle - email = f"{device_id.user_id.str}@example.com" - human_handle = HumanHandle(email=email, label=label) - - parent_device = None - try: - # If the user already exists, we must retrieve it data - parent_device = next(d for d in org_devices if d.user_id == device_id.user_id) - if profile is not None and profile != parent_device.profile: - raise ValueError( - "profile is set but user already exists, with a different profile value." - ) - profile = parent_device.profile - - except StopIteration: - profile = profile or UserProfile.STANDARD - - device = LocalDevice.generate_new_device( - organization_addr=org.addr, - device_id=device_id, - profile=profile, - human_handle=human_handle, - device_label=device_label, - ) - if parent_device is not None: - device.private_key = parent_device.private_key - device.user_realm_id = parent_device.user_realm_id - device.user_realm_key = parent_device.user_realm_key - org_devices.append(device) - return device - - return _local_device_factory - - -@pytest.fixture -def coolorg(fixtures_customization, organization_factory): - # Fonzie approve this - return organization_factory( - "CoolOrg", - sequestered=fixtures_customization.get("coolorg_is_sequestered_organization", False), - ) - - -@pytest.fixture -def other_org(organization_factory): - return organization_factory("OtherOrg") - - -# The organization is expired in backend_authenticated_cmds_factory -@pytest.fixture -def expiredorg(organization_factory): - expired_org = organization_factory("ExpiredOrg") - return expired_org - - -@pytest.fixture -def other_alice(fixtures_customization, local_device_factory, other_org): - return local_device_factory( - "alice@dev1", - other_org, - # other_alice mimics alice - profile=fixtures_customization.get("alice_profile", UserProfile.ADMIN), - has_human_handle=fixtures_customization.get("alice_has_human_handle", True), - has_device_label=fixtures_customization.get("alice_has_device_label", True), - ) - - -@pytest.fixture -def alice(fixtures_customization, local_device_factory, initial_user_manifest_state) -> LocalDevice: - device = local_device_factory( - "alice@dev1", - profile=fixtures_customization.get("alice_profile", UserProfile.ADMIN), - has_human_handle=fixtures_customization.get("alice_has_human_handle", True), - has_device_label=fixtures_customization.get("alice_has_device_label", True), - ) - # Force alice user manifest v1 to be signed by user alice@dev1 - # This is needed given backend_factory bind alice@dev1 then alice@dev2, - # hence user manifest v1 is stored in backend at a time when alice@dev2 - # doesn't exists. - with freeze_time("2000-01-01"): - initial_user_manifest_state.force_user_manifest_v1_generation(device) - return device - - -@pytest.fixture -def expired_org_alice( - fixtures_customization, local_device_factory, initial_user_manifest_state, expiredorg -): - device = local_device_factory( - "alice@dev1", - expiredorg, - # expired_org_alice mimics alice - profile=fixtures_customization.get("alice_profile", UserProfile.ADMIN), - has_human_handle=fixtures_customization.get("alice_has_human_handle", True), - has_device_label=fixtures_customization.get("alice_has_device_label", True), - ) - # Force alice user manifest v1 to be signed by user alice@dev1 - # This is needed given backend_factory bind alice@dev1 then alice@dev2, - # hence user manifest v1 is stored in backend at a time when alice@dev2 - # doesn't exists. - with freeze_time("2000-01-01"): - initial_user_manifest_state.force_user_manifest_v1_generation(device) - return device - - -@pytest.fixture -def alice2(fixtures_customization, local_device_factory): - return local_device_factory( - "alice@dev2", - profile=fixtures_customization.get("alice_profile", UserProfile.ADMIN), - has_human_handle=fixtures_customization.get("alice_has_human_handle", True), - has_device_label=fixtures_customization.get("alice_has_device_label", True), - ) - - -@pytest.fixture -def adam(fixtures_customization, local_device_factory): - return local_device_factory( - "adam@dev1", - profile=fixtures_customization.get("adam_profile", UserProfile.ADMIN), - has_human_handle=fixtures_customization.get("adam_has_human_handle", True), - has_device_label=fixtures_customization.get("adam_has_device_label", True), - ) - - -@pytest.fixture -def bob(fixtures_customization, local_device_factory): - return local_device_factory( - "bob@dev1", - profile=fixtures_customization.get("bob_profile", UserProfile.STANDARD), - has_human_handle=fixtures_customization.get("bob_has_human_handle", True), - has_device_label=fixtures_customization.get("bob_has_device_label", True), - ) - - -@pytest.fixture -def mallory(fixtures_customization, local_device_factory): - return local_device_factory( - "mallory@dev1", - profile=fixtures_customization.get("mallory_profile", UserProfile.STANDARD), - has_human_handle=fixtures_customization.get("mallory_has_human_handle", True), - has_device_label=fixtures_customization.get("mallory_has_device_label", True), - ) - - -@pytest.fixture -def ws_from_other_organization_factory( - backend_authenticated_ws_factory, - backend_data_binder_factory, - organization_factory, - local_device_factory, -): - @asynccontextmanager - async def _ws_from_other_organization_factory( - backend_asgi_app, - mimic: Optional[str] = None, - anonymous: bool = False, - profile: UserProfile = UserProfile.STANDARD, - ): - binder = backend_data_binder_factory(backend_asgi_app.backend) - - other_org = organization_factory() - if mimic: - other_device = local_device_factory( - base_device_id=mimic, org=other_org, profile=profile - ) - else: - other_device = local_device_factory(org=other_org, profile=profile) - await binder.bind_organization(other_org, other_device) - - if anonymous: - auth_as = other_org.organization_id - else: - auth_as = other_device - async with backend_authenticated_ws_factory(backend_asgi_app, auth_as) as sock: - sock.device = other_device - yield sock - - return _ws_from_other_organization_factory diff --git a/server/tests/common/postgresql.py b/server/tests/common/postgresql.py index 7342c4b35d3..921f9caeb6d 100644 --- a/server/tests/common/postgresql.py +++ b/server/tests/common/postgresql.py @@ -4,14 +4,15 @@ import asyncio import atexit import os +from typing import Awaitable, Callable import asyncpg from asyncpg.cluster import TempCluster -from parsec.backend.postgresql.handler import _apply_migrations, retrieve_migrations +from parsec.components.postgresql.handler import _apply_migrations, retrieve_migrations -def _patch_url_if_xdist(url): +def _patch_url_if_xdist(url: str) -> str: xdist_worker = os.environ.get("PYTEST_XDIST_WORKER") if xdist_worker: return f"{url}_{xdist_worker}" @@ -22,14 +23,16 @@ def _patch_url_if_xdist(url): _pg_db_url = None -async def run_migrations(conn) -> None: +async def run_migrations(conn: asyncpg.Connection) -> None: result = await _apply_migrations(conn, retrieve_migrations(), dry_run=False) if result.error: migration, msg = result.error raise RuntimeError(f"Error while applying migration {migration.file_name}: {msg}") -async def _execute_pg_query(url, query): +async def _execute_pg_query( + url: str, query: str | Callable[[asyncpg.Connection], Awaitable[None]] +) -> None: conn = await asyncpg.connect(url) if callable(query): await query(conn) @@ -38,7 +41,7 @@ async def _execute_pg_query(url, query): await conn.close() -def bootstrap_postgresql_testbed(): +def bootstrap_postgresql_testbed() -> str: global _pg_db_url provided_db = os.environ.get("PG_URL") @@ -63,7 +66,8 @@ def bootstrap_postgresql_testbed(): return _pg_db_url -async def asyncio_reset_postgresql_testbed(): +async def asyncio_reset_postgresql_testbed() -> None: + assert _pg_db_url is not None await _execute_pg_query( _pg_db_url, """ @@ -90,18 +94,18 @@ async def asyncio_reset_postgresql_testbed(): ) -def reset_postgresql_testbed(): +def reset_postgresql_testbed() -> None: asyncio.run(asyncio_reset_postgresql_testbed()) -def get_postgresql_url(): +def get_postgresql_url() -> str | None: return _pg_db_url _pg_cluster = None -def bootstrap_pg_cluster(): +def bootstrap_pg_cluster() -> str: global _pg_cluster if _pg_cluster: @@ -114,6 +118,7 @@ def bootstrap_pg_cluster(): _pg_cluster.start(port="dynamic", server_settings={}) def _shutdown_pg_cluster(): + assert _pg_cluster is not None if _pg_cluster.get_status() == "running": _pg_cluster.stop() if _pg_cluster.get_status() != "not-initialized": diff --git a/server/tests/common/rpc_api.py b/server/tests/common/rpc_api.py deleted file mode 100644 index 328584baf5c..00000000000 --- a/server/tests/common/rpc_api.py +++ /dev/null @@ -1,344 +0,0 @@ -# Parsec Cloud (https://parsec.cloud) Copyright (c) BUSL-1.1 2016-present Scille SAS -from __future__ import annotations - -from base64 import b64decode, b64encode -from contextlib import asynccontextmanager -from dataclasses import dataclass -from typing import AsyncIterator, Callable, Dict, Optional - -import msgpack -import pytest -import trio -from quart import Quart -from quart.typing import TestClientProtocol, TestHTTPConnectionProtocol -from werkzeug.datastructures import Headers - -from parsec._parsec import ( - ApiVersion, - BackendInvitationAddr, - DateTime, - InvitationToken, - OrganizationID, - authenticated_cmds, -) -from tests.common import LocalDevice, OrganizationFullData - - -class BaseRpcApiClient: - async def send( - self, - req, - extra_headers: Dict[str, str] = {}, - before_send_hook: Optional[Callable] = None, - now: Optional[DateTime] = None, - check_rep: bool = True, - ): - raise NotImplementedError - - -@dataclass -class SSEEventSink: - connection: TestHTTPConnectionProtocol - conn_buff: bytes = b"" - - @property - def status_code(self) -> int: - # Status code only available after the first event has been received - assert self.connection.status_code is not None - return self.connection.status_code - - async def get_next_event( - self, raw: bool = False - ) -> bytes | authenticated_cmds.latest.events_listen.Rep: - _, event = await self.get_next_event_and_id(raw) - return event - - async def get_next_event_and_id( - self, raw: bool = False - ) -> tuple[str, bytes | authenticated_cmds.latest.events_listen.Rep]: - while True: - # Get a message - while True: - try: - # Did we got multiple messages last time we read the connection ? - msg, self.conn_buff = self.conn_buff.split(b"\n\n", 1) - break - except ValueError: - self.conn_buff += await self.connection.receive() - - if msg == b":keepalive": - continue - - if msg == b"event:missed_events": - raise RuntimeError("missed events !") - - data_line, id_line = msg.split(b"\n") - assert data_line.startswith(b"data:") - # Strip because, according to SSE spec, `data:test` and `data: test` are identical - data = data_line[len(b"data:") :].strip() - assert id_line.startswith(b"id:") - id = id_line[len(b"id:") :].strip().decode("ascii") - - raw_event = b64decode(data) - if raw: - return (id, raw_event) - else: - return (id, authenticated_cmds.latest.events_listen.Rep.load(raw_event)) - - -class AuthenticatedRpcApiClient(BaseRpcApiClient): - API_VERSION = ApiVersion.API_LATEST_VERSION - - def __init__(self, client: TestClientProtocol, device: LocalDevice): - self.client = client - self.device = device - - @property - def base_headers(self): - return Headers( - { - "Content-Type": "application/msgpack", - "Api-Version": str(self.API_VERSION), - "Authorization": "PARSEC-SIGN-ED25519", - "Author": b64encode(self.device.device_id.str.encode("utf8")), - } - ) - - @asynccontextmanager - async def connect_sse_events( - self, before_send_hook: Callable | None = None, last_event_id: str | None = None - ) -> AsyncIterator[SSEEventSink]: - headers = self.base_headers.copy() - signature = self.device.signing_key.sign_only_signature(b"") - headers["Signature"] = b64encode(signature).decode("ascii") - headers["Accept"] = "text/event-stream" - if last_event_id: - headers["Last-Event-ID"] = last_event_id - args = { - "method": "GET", - "path": f"/authenticated/{self.device.organization_id.str}/events", - "headers": headers, - } - # Last chance to customize the request ! - if before_send_hook: - # Passing as dict allow the hook to event modify the path param - before_send_hook(args) - - connection = self.client.request(**args) - # TODO: `connection` async context manager is broken in `quart_trio` - async with trio.open_nursery() as nursery: - nursery.start_soon( - connection.app, connection.scope, connection._asgi_receive, connection._asgi_send - ) - - await connection.send_complete() - first_data = await connection.receive() - if connection.status_code == 200: - assert first_data == b":keepalive\n\n" - - yield SSEEventSink(connection) - - nursery.cancel_scope.cancel() - - async def send( - self, - req: bytes | dict, - extra_headers: Dict[str, str] = {}, - before_send_hook: Optional[Callable] = None, - now: Optional[DateTime] = None, - check_rep: bool = True, - ): - now = now or DateTime.now() - if isinstance(req, bytes): - body = req - else: - body = msgpack.packb(req) - headers = self.base_headers.copy() - signature = self.device.signing_key.sign_only_signature(body) - headers["Signature"] = b64encode(signature).decode("ascii") - - # Customize headers - for k, v in extra_headers.items(): - if v is None: - headers.pop(k, None) - else: - headers[k] = v - - args = { - "path": f"/authenticated/{self.device.organization_id.str}", - "headers": headers, - "data": body, - } - # Last chance to customize the request ! - if before_send_hook: - # Passing as dict allow the hook to event modify the path param - before_send_hook(args) - rep = await self.client.post(**args) - - if check_rep: - assert rep.status_code == 200 - rep_body = await rep.get_data() - if isinstance(req, bytes): - return rep_body - else: - return msgpack.unpackb(rep_body) - - else: - return rep - - -class AnonymousRpcApiClient(BaseRpcApiClient): - API_VERSION = ApiVersion.API_LATEST_VERSION - - def __init__(self, organization_id: OrganizationID, client: TestClientProtocol): - self.organization_id = organization_id - self.client = client - - @property - def base_headers(self): - return Headers( - { - "Content-Type": "application/msgpack", - "Api-Version": str(self.API_VERSION), - } - ) - - async def send( - self, - req: bytes | dict, - extra_headers: Dict[str, str] = {}, - before_send_hook: Optional[Callable] = None, - now: Optional[DateTime] = None, - check_rep: bool = True, - ): - now = now or DateTime.now() - if isinstance(req, bytes): - body = req - else: - body = msgpack.packb(req) - headers = self.base_headers.copy() - - # Customize headers - for k, v in extra_headers.items(): - if v is None: - headers.pop(k, None) - else: - headers[k] = v - - args = {"path": f"/anonymous/{self.organization_id.str}", "headers": headers, "data": body} - # Last chance to customize the request ! - if before_send_hook: - # Passing as dict allow the hook to event modify the path param - before_send_hook(args) - rep = await self.client.post(**args) - - if check_rep: - assert rep.status_code == 200 - rep_body = await rep.get_data() - if isinstance(req, bytes): - return rep_body - else: - return msgpack.unpackb(rep_body) - - else: - return rep - - -class InvitedRpcApiClient(BaseRpcApiClient): - API_VERSION = ApiVersion.API_LATEST_VERSION - - def __init__( - self, - organization_id: OrganizationID, - client: TestClientProtocol, - invitation_token: InvitationToken, - ): - self.organization_id = organization_id - self.client = client - self.invitation_token = invitation_token - - @property - def base_headers(self): - return Headers( - { - "Content-Type": "application/msgpack", - "Api-Version": str(self.API_VERSION), - "Invitation-Token": self.invitation_token.hex, - } - ) - - async def send( - self, - req: bytes | dict, - extra_headers: Dict[str, str] = {}, - before_send_hook: Optional[Callable] = None, - now: Optional[DateTime] = None, - check_rep: bool = True, - ): - now = now or DateTime.now() - if isinstance(req, bytes): - body = req - else: - body = msgpack.packb(req) - headers = self.base_headers.copy() - - # Customize headers - for k, v in extra_headers.items(): - if v is None: - headers.pop(k, None) - else: - headers[k] = v - - args = {"path": f"/invited/{self.organization_id.str}", "headers": headers, "data": body} - # Last chance to customize the request ! - if before_send_hook: - # Passing as dict allow the hook to event modify the path param - before_send_hook(args) - rep = await self.client.post(**args) - - if check_rep: - assert rep.status_code == 200 - rep_body = await rep.get_data() - if isinstance(req, bytes): - return rep_body - else: - return msgpack.unpackb(rep_body) - - else: - return rep - - -@pytest.fixture -def alice_rpc(alice: LocalDevice, backend_asgi_app: Quart) -> AuthenticatedRpcApiClient: - test_client = backend_asgi_app.test_client() - return AuthenticatedRpcApiClient(test_client, alice) - - -@pytest.fixture -def alice2_rpc(alice2: LocalDevice, backend_asgi_app: Quart) -> AuthenticatedRpcApiClient: - test_client = backend_asgi_app.test_client() - return AuthenticatedRpcApiClient(test_client, alice2) - - -@pytest.fixture -def bob_rpc(bob: LocalDevice, backend_asgi_app: Quart) -> AuthenticatedRpcApiClient: - test_client = backend_asgi_app.test_client() - return AuthenticatedRpcApiClient(test_client, bob) - - -@pytest.fixture -def anonymous_rpc(coolorg: OrganizationFullData, backend_asgi_app: Quart) -> AnonymousRpcApiClient: - test_client = backend_asgi_app.test_client() - return AnonymousRpcApiClient(coolorg.organization_id, test_client) - - -@pytest.fixture -def invited_rpc( - coolorg: OrganizationFullData, - backend_asgi_app: Quart, - alice_new_device_invitation: BackendInvitationAddr, -) -> InvitedRpcApiClient: - test_client = backend_asgi_app.test_client() - return InvitedRpcApiClient( - coolorg.organization_id, test_client, alice_new_device_invitation.token - ) diff --git a/server/tests/common/sequester.py b/server/tests/common/sequester.py deleted file mode 100644 index b5379cdcdc9..00000000000 --- a/server/tests/common/sequester.py +++ /dev/null @@ -1,105 +0,0 @@ -# Parsec Cloud (https://parsec.cloud) Copyright (c) BUSL-1.1 2016-present Scille SAS -from __future__ import annotations - -from dataclasses import dataclass - -from parsec._parsec import ( - DateTime, - SequesterAuthorityCertificate, - SequesterPrivateKeyDer, - SequesterPublicKeyDer, - SequesterServiceCertificate, - SequesterServiceID, - SequesterSigningKeyDer, - SequesterVerifyKeyDer, - SigningKey, -) -from parsec.backend.sequester import ( - BaseSequesterService, - SequesterServiceType, - StorageSequesterService, - WebhookSequesterService, -) - - -@dataclass -class SequesterAuthorityFullData: - certif: bytes - certif_data: SequesterAuthorityCertificate - signing_key: SequesterSigningKeyDer - verify_key: SequesterVerifyKeyDer - - -def sequester_authority_factory( - organization_root_signing_key: SigningKey, timestamp: DateTime | None = None -) -> SequesterAuthorityFullData: - timestamp = timestamp or DateTime.now() - # Don't use such a small key size in real world, this is only for test ! - # (RSA key generation gets ~10x slower between 1024 and 4096) - signing_key, verify_key = SequesterSigningKeyDer.generate_pair(1024) - certif = SequesterAuthorityCertificate( - timestamp=timestamp, - verify_key_der=verify_key, - ) - return SequesterAuthorityFullData( - certif=certif.dump_and_sign(organization_root_signing_key), - certif_data=certif, - signing_key=signing_key, - verify_key=verify_key, - ) - - -@dataclass -class SequesterServiceFullData: - certif: bytes - certif_data: SequesterServiceCertificate - decryption_key: SequesterPrivateKeyDer - encryption_key: SequesterPublicKeyDer - backend_service: BaseSequesterService - - @property - def service_id(self) -> SequesterServiceID: - return self.certif_data.service_id - - -def sequester_service_factory( - label: str, - authority: SequesterAuthorityFullData, - timestamp: DateTime | None = None, - service_type: SequesterServiceType = SequesterServiceType.STORAGE, - webhook_url: str | None = None, -) -> SequesterServiceFullData: - timestamp = timestamp or DateTime.now() - # Don't use such a small key size in real world, this is only for test ! - # (RSA key generation gets ~10x slower between 1024 and 4096) - decryption_key, encryption_key = SequesterPrivateKeyDer.generate_pair(1024) - certif_data = SequesterServiceCertificate( - service_id=SequesterServiceID.new(), - timestamp=timestamp, - service_label=label, - encryption_key_der=encryption_key, - ) - certif = authority.signing_key.sign(certif_data.dump()) - if service_type == SequesterServiceType.STORAGE: - assert webhook_url is None - backend_service = StorageSequesterService( - service_id=certif_data.service_id, - service_label=certif_data.service_label, - service_certificate=certif, - ) - else: - assert service_type == SequesterServiceType.WEBHOOK - assert webhook_url is not None - backend_service = WebhookSequesterService( - service_id=certif_data.service_id, - service_label=certif_data.service_label, - service_certificate=certif, - webhook_url=webhook_url, - ) - return SequesterServiceFullData( - certif=certif, - certif_data=certif_data, - decryption_key=decryption_key, - encryption_key=encryption_key, - backend_service=backend_service, - ) diff --git a/server/tests/common/trio_clock.py b/server/tests/common/trio_clock.py deleted file mode 100644 index dde8e54689e..00000000000 --- a/server/tests/common/trio_clock.py +++ /dev/null @@ -1,164 +0,0 @@ -# Parsec Cloud (https://parsec.cloud) Copyright (c) BUSL-1.1 2016-present Scille SAS -from __future__ import annotations - -import math -import time -from contextlib import asynccontextmanager - -import pytest -import trio -from trio.testing import MockClock - -# In the test we often want to wait on something that in theory would be -# instantaneously available but in fact depends on side effects. -# Exemple of side effects: -# - How fast & currently loaded is the CPU (especially when running on the CI) -# - Network socket -# - PostgreSQL database -# We have to decide how long we want to wait on those things, considering: -# - the shorter we wait, the more convenient it is for when developing -# - the longer we wait, the more we avoid false positive when side effets are unexpectedly long -# - if we wait forever we get rid of the false positive but we also hang forever -# in case a mistake in the code lead to a deadlock :( -# So the solution is to make this configurable: a good middle ground by default -# and a long long time on the CI. -_SIDE_EFFECTS_TIMEOUT = 3 - - -def get_side_effects_timeout() -> float: - return _SIDE_EFFECTS_TIMEOUT - - -def _set_side_effects_timeout(timeout: float) -> None: - global _SIDE_EFFECTS_TIMEOUT - _SIDE_EFFECTS_TIMEOUT = timeout - - -@asynccontextmanager -async def real_clock_timeout(): - # In tests we use a mock clock to make parsec code faster by not staying idle, - # however we might also want ensure some test code doesn't take too long. - # Hence using `trio.fail_after` in test code doesn't play nice with mock clock - # (especially given the CI can be unpredictably slow)... - # The solution is to have our own fail_after that use the real monotonic clock. - - # Timeout is not configurable by design to avoid letting the user think - # this parameter can be used to make the mocked trio clock advance - timeout = get_side_effects_timeout() - - # Starting a thread can be very slow (looking at you, Windows) so better - # take the starting time here - start = time.monotonic() - event_occurred = False - async with trio.open_nursery() as nursery: - - def _run_until_timeout_or_event_occurred(): - while not event_occurred and time.monotonic() - start < timeout: - # cancelling `_watchdog` coroutine doesn't stop the thread, - # so we sleep only by a short amount of time in order to - # detect early enough that we are no longer needed - time.sleep(0.01) - - async def _watchdog(): - await trio.to_thread.run_sync(_run_until_timeout_or_event_occurred) - if not event_occurred: - raise trio.TooSlowError() - - # Note: We could have started the thread directly instead of using - # trio's thread support. - # This would allow us to use a non-async contextmanager to better mimic - # `trio.fail_after`, however this would prevent us from using trio's - # threadpool system which is good given it allows us to reuse the thread - # and hence avoid most of it cost - nursery.start_soon(_watchdog) - try: - yield - finally: - event_occurred = True - nursery.cancel_scope.cancel() - - -@pytest.fixture -def mock_clock(): - # Prevent from using pytest_trio's `mock_clock` fixture. - raise RuntimeError("Use `frozen_clock` fixture instead !!!") - - -@pytest.fixture -def autojump_clock(): - # Prevent from using pytest_trio's `autojump` fixture. - raise RuntimeError("Use `frozen_clock` fixture instead !!!") - - -@pytest.fixture -def frozen_clock(): - # Mocked clock is a slippy slope: we want time to go faster (or even to - # jump to arbitrary point in time !) on some part of our application while - # some other parts should keep using the real time. - # For instance we want to make sure some part of a test doesn't take more than - # x seconds in real life (typically to detect deadlock), but this test might - # be about a ping occurring every 30s so we want to simulate this wait. - # - # The simple solution is to use `MockClock.rate` to make time go faster, - # but it's bad idea given we end up with two antagonistic goals: - # - rate should be as high as possible so that ping wait goes as fast as possible - # - the highest rate is, the smallest real time window we have when checking for - # deadlock, this is especially an issue given developer machine is a behemoth - # while CI run on potatoes (especially on MacOS) shared with other builds... - # - # So the solution we choose here is to separate the two times: - # - Parsec codebase uses the trio clock and `trio.fail_after/move_on_after` - # - Test codebase can use `trio.fail_after/move_on_after` as long as the test - # doesn't use a mock clock - # - In case of mock clock, test codebase must use `real_clock_timeout` that - # relies on monotonic clock and hence is totally isolated from trio's clock. - # - # On top of that we must be careful about the configuration of the mock clock ! - # As we said the Parsec codebase (i.e. not the tests) uses the trio clock for - # timeout handling & sleep (e.g. in the managers), hence: - # - Using `MockClock.rate` with a high value still lead to the issue discussed above. - # - `trio.to_thread.run_sync` doesn't play nice with `MockClock.autojump_threshold = 0` - # given trio considers the coroutine waiting for the thread is idle and hence - # trigger the clock jump. So a perfectly fine async code may break tests in - # an unexpected way if it starts using `trio.to_thread.run_sync`... - # - # So the idea of the `frozen_clock` is to only advance when especially - # specified in the test (i.e. rate 0 and no autojump_threshold). - # This way only the test code has control over the application timeout - # handling, and we have a clean separation with the test timeout (i.e. using - # `real_clock_timeout` to detect the test end up in a deadlock) - # - # The drawback of this approach is manually handling time jump can be cumbersome. - # For instance the backend connection retry logic: - # - sleeps for some time - # - connects to the backend - # - starts sync&message monitors - # - message monitor may trigger modifications in the sync monitor - # - in case of modification, sync monitor is going to sleep for a short time - # before doing the sync of the modification - # - # So to avoid having to mix `MockClock.jump` and `trio.testing.wait_all_tasks_blocked` - # in a very complex and fragile way, we introduce the `sleep_with_autojump()` - # method that is the only place where clock is going to move behind our back, but - # for only the amount of time we choose, and only in a very explicit manner. - # - # Finally, an additional bonus to this approach is we can use breakpoint in the - # code without worrying about triggering a timeout ;-) - - clock = MockClock(rate=0, autojump_threshold=math.inf) - - clock.real_clock_timeout = real_clock_timeout # Quick access helper - - async def _sleep_with_autojump(seconds): - old_rate = clock.rate - old_autojump_threshold = clock.autojump_threshold - clock.rate = 0 - clock.autojump_threshold = 0.01 - try: - await trio.sleep(seconds) - finally: - clock.rate = old_rate - clock.autojump_threshold = old_autojump_threshold - - clock.sleep_with_autojump = _sleep_with_autojump - yield clock diff --git a/server/tests/conftest.py b/server/tests/conftest.py index cbecf63946f..72718998508 100644 --- a/server/tests/conftest.py +++ b/server/tests/conftest.py @@ -8,26 +8,19 @@ import hypothesis import pytest import structlog -import trio -import trio_asyncio -from parsec.backend.config import ( +from parsec.config import ( + BaseBlockStoreConfig, MockedBlockStoreConfig, PostgreSQLBlockStoreConfig, - RAID0BlockStoreConfig, - RAID1BlockStoreConfig, - RAID5BlockStoreConfig, ) -from parsec.monitoring import TaskMonitoringInstrument -# TODO: needed ? # Must be done before the module has any chance to be imported pytest.register_assert_rewrite("tests.common.event_bus_spy") from tests.common import ( asyncio_reset_postgresql_testbed, bootstrap_postgresql_testbed, get_postgresql_url, - get_side_effects_timeout, reset_postgresql_testbed, ) @@ -35,7 +28,6 @@ def pytest_addoption(parser: pytest.Parser): - parser.addoption("--side-effects-timeout", default=get_side_effects_timeout(), type=float) parser.addoption("--hypothesis-max-examples", default=100, type=int) parser.addoption("--hypothesis-derandomize", action="store_true") parser.addoption( @@ -103,13 +95,6 @@ def pytest_configure(config): pytest.exit("bye") elif config.getoption("--postgresql") and not _is_xdist_master(config): bootstrap_postgresql_testbed() - # Configure custom side effets timeout - if config.getoption("--side-effects-timeout"): - import tests.common.trio_clock - - tests.common.trio_clock._set_side_effects_timeout( - float(config.getoption("--side-effects-timeout")) - ) def _is_xdist_master(config): @@ -207,47 +192,13 @@ def pytest_collection_modifyitems(config, items): def no_logs_gte_error(caplog): yield - # TODO: Concurrency bug in Hypercorn when the server is teardown while a - # client websocket is currently disconnecting - # see: https://github.com/Scille/parsec-cloud/issues/2716 - def skip_hypercorn_buggy_log(record): - try: - _, exc, _ = record.exc_info - except (ValueError, TypeError): - exc = None - - if record.name == "asyncio" and isinstance(exc, ConnectionError): - return True - - if record.name != "hypercorn.error": - return True - - if record.exc_text.endswith( - "wsproto.utilities.LocalProtocolError: Connection cannot be closed in state ConnectionState.CLOSED" - ): - return False - - if record.exc_text.endswith( - "trio.BusyResourceError: another task is currently sending data on this SocketStream" - ): - return False - - if record.exc_text.endswith( - "wsproto.utilities.LocalProtocolError: Event CloseConnection(code=1000, reason=None) cannot be sent in state ConnectionState.CLOSED." - ): - return False - - return True - # The test should use `caplog.assert_occurred_once` to indicate a log was expected, # otherwise we consider error logs as *actual* errors. asserted_records = getattr(caplog, "asserted_records", set()) errors = [ record for record in caplog.get_records("call") - if record.levelno >= logging.ERROR - and record not in asserted_records - and skip_hypercorn_buggy_log(record) + if record.levelno >= logging.ERROR and record not in asserted_records ] assert not errors @@ -265,24 +216,6 @@ def hypothesis_settings(request): # Other main fixtures -@pytest.fixture -async def nursery(): - # A word about the nursery fixture: - # The whole point of trio is to be able to build a graph of coroutines to - # simplify teardown. Using a single top level nursery kind of mitigate this - # given unrelated coroutines will end up there and be closed all together. - # Worst, among those coroutine it could exists a relationship that will be lost - # in a more or less subtle way (typically using a factory fixture that use the - # default nursery behind the scene). - # Bonus points occur if using trio-asyncio that creates yet another hidden - # layer of relationship that could end up in cryptic dead lock hardened enough - # to survive ^C. - # Finally if your still no convinced, factory fixtures not depending on async - # fixtures (like nursery is) can be used inside the Hypothesis tests. - # I know you love Hypothesis. Checkmate. You won't use this fixture ;-) - raise RuntimeError("Bad kitty ! Bad !!!") - - @pytest.fixture def postgresql_url(request): if not request.node.get_closest_marker("postgresql"): @@ -293,37 +226,12 @@ def postgresql_url(request): @pytest.fixture -async def asyncio_loop(request): - # asyncio loop is only needed for triopg - if not request.config.getoption("--postgresql"): - yield None - - else: - # When a ^C happens, trio send a Cancelled exception to each running - # coroutine. We must protect this one to avoid deadlock if it is cancelled - # before another coroutine that uses trio-asyncio. - with trio.CancelScope(shield=True): - async with trio_asyncio.open_loop() as loop: - yield loop - - -@pytest.fixture -async def task_monitoring(): - trio.lowlevel.add_instrument(TaskMonitoringInstrument()) - - -@pytest.fixture(scope="session") -def monitor(): - from tests.monitor import Monitor - - return Monitor() - - -@pytest.fixture() -def backend_store(request): +def db_url(request) -> str: if request.config.getoption("--postgresql"): reset_postgresql_testbed() - return get_postgresql_url() + url = get_postgresql_url() + assert url is not None + return url elif request.node.get_closest_marker("postgresql"): pytest.skip("`Test is postgresql-only") @@ -333,35 +241,12 @@ def backend_store(request): @pytest.fixture -def blockstore(backend_store, fixtures_customization): +def blockstore_config(db_url: str) -> BaseBlockStoreConfig: # TODO: allow to test against swift ? - if backend_store.startswith("postgresql://"): - config = PostgreSQLBlockStoreConfig() + if db_url.startswith("postgresql://"): + return PostgreSQLBlockStoreConfig() else: - config = MockedBlockStoreConfig() - - raid = fixtures_customization.get("blockstore_mode", "NO_RAID").upper() - if raid == "RAID0": - config = RAID0BlockStoreConfig(blockstores=[config, MockedBlockStoreConfig()]) - elif raid == "RAID1": - config = RAID1BlockStoreConfig(blockstores=[config, MockedBlockStoreConfig()]) - elif raid == "RAID1_PARTIAL_CREATE_OK": - config = RAID1BlockStoreConfig( - blockstores=[config, MockedBlockStoreConfig()], partial_create_ok=True - ) - elif raid == "RAID5": - config = RAID5BlockStoreConfig( - blockstores=[config, MockedBlockStoreConfig(), MockedBlockStoreConfig()] - ) - elif raid == "RAID5_PARTIAL_CREATE_OK": - config = RAID5BlockStoreConfig( - blockstores=[config, MockedBlockStoreConfig(), MockedBlockStoreConfig()], - partial_create_ok=True, - ) - else: - assert raid == "NO_RAID" - - return config + return MockedBlockStoreConfig() @pytest.fixture @@ -371,7 +256,7 @@ def reset_testbed( ): async def _reset_testbed(keep_logs=False): if request.config.getoption("--postgresql"): - await trio_asyncio.aio_as_trio(asyncio_reset_postgresql_testbed) + await asyncio_reset_postgresql_testbed() if not keep_logs: caplog.clear() diff --git a/server/tests/monitor.py b/server/tests/monitor.py deleted file mode 100644 index a562f64406b..00000000000 --- a/server/tests/monitor.py +++ /dev/null @@ -1,407 +0,0 @@ -# Parsec Cloud (https://parsec.cloud) Copyright (c) BUSL-1.1 2016-present Scille SAS -from __future__ import annotations - -import argparse -import logging - -# Monitor POC, shamelessly taken from curio -import os -import signal -import socket -import telnetlib -import threading -import traceback - -import trio -from trio.abc import Instrument -from trio.lowlevel import current_statistics - -LOGGER = logging.getLogger("trio.monitor") - -MONITOR_HOST = "127.0.0.1" -MONITOR_PORT = 48802 - -# Telnet doesn't support unicode, so we must rely on ascii art instead :'-( -if 0: - MID_PREFIX = "├─ " - MID_CONTINUE = "│ " - END_PREFIX = "└─ " -else: - MID_PREFIX = "|- " - MID_CONTINUE = "| " - END_PREFIX = "|_ " -END_CONTINUE = " " * len(END_PREFIX) - - -def is_shielded_task(task): - cancel_status = task._cancel_status - while cancel_status: - if cancel_status._scope.shield: - return True - cancel_status = cancel_status._parent - return False - - -def _render_subtree(name, rendered_children): - lines = [] - lines.append(name) - for child_lines in rendered_children: - if child_lines is rendered_children[-1]: - first_prefix = END_PREFIX - rest_prefix = END_CONTINUE - else: - first_prefix = MID_PREFIX - rest_prefix = MID_CONTINUE - lines.append(first_prefix + child_lines[0]) - for child_line in child_lines[1:]: - lines.append(rest_prefix + child_line) - return lines - - -def _rendered_nursery_children(nursery, format_task): - return [task_tree_lines(t, format_task) for t in nursery.child_tasks] - - -def task_tree_lines(task, format_task): - rendered_children = [] - nurseries = list(task.child_nurseries) - while nurseries: - nursery = nurseries.pop() - nursery_children = _rendered_nursery_children(nursery, format_task) - if rendered_children: - nested = _render_subtree("(nested nursery)", rendered_children) - nursery_children.append(nested) - rendered_children = nursery_children - return _render_subtree(format_task(task), rendered_children) - - -def render_task_tree(task, format_task): - return "\n".join(line for line in task_tree_lines(task, format_task)) + "\n" - - -class TaskWrapper: - def __init__(self, task): - self.task = task - self._monitor_state = None - self._monitor_short_id = None - - def __getattr__(self, name): - return getattr(self.task, name) - - -class Monitor(Instrument): - def __init__(self, host=MONITOR_HOST, port=MONITOR_PORT): - self.address = (host, port) - self._trio_token = None - self._next_task_short_id = 0 - self._tasks = {} - self._closing = None - self._ui_thread = None - - def get_task_from_short_id(self, shortid): - for task in self._tasks.values(): - if task._monitor_short_id == shortid: - return task - return None - - def before_run(self): - LOGGER.info("Starting Trio monitor at %s:%d", *self.address) - self._trio_token = trio.lowlevel.current_trio_token() - self._ui_thread = threading.Thread(target=self.server, args=(), daemon=True) - self._closing = threading.Event() - self._ui_thread.start() - - def task_spawned(self, task): - task_wrapper = TaskWrapper(task) - self._tasks[id(task)] = task_wrapper - task_wrapper._monitor_short_id = self._next_task_short_id - self._next_task_short_id += 1 - task_wrapper._monitor_state = "spawned" - - def task_scheduled(self, task): - self._tasks[id(task)]._monitor_state = "scheduled" - - def before_task_step(self, task): - self._tasks[id(task)]._monitor_state = "running" - - def after_task_step(self, task): - if id(task) in self._tasks: - self._tasks[id(task)]._monitor_state = "waiting" - - def task_exited(self, task): - del self._tasks[id(task)] - - # def before_io_wait(self, timeout): - # if timeout: - # print("### waiting for I/O for up to {} seconds".format(timeout)) - # else: - # print("### doing a quick check for I/O") - # self._sleep_time = trio.current_time() - - # def after_io_wait(self, timeout): - # duration = trio.current_time() - self._sleep_time - # print("### finished I/O check (took {} seconds)".format(duration)) - - def after_run(self): - LOGGER.info("Stopping Trio monitor ui thread") - self._closing.set() - self._ui_thread.join() - - def server(self): - sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) - sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) - - # set the timeout to prevent the server loop from - # blocking indefinitely on sock.accept() - sock.settimeout(0.5) - sock.bind(self.address) - sock.listen(1) - with sock: - while not self._closing.is_set(): - try: - client, addr = sock.accept() - with client: - client.settimeout(0.5) - - # This bit of magic is for reading lines of input while still allowing - # timeouts and the ability for the monitor to die when curio exits. - # See Issue #108. - - def readlines(): - buffer = bytearray() - while not self._closing.is_set(): - index = buffer.find(b"\n") - if index >= 0: - line = buffer[: index + 1].decode("latin-1") - del buffer[: index + 1] - yield line - - try: - chunk = client.recv(1000) - if not chunk: - break - - buffer.extend(chunk) - except socket.timeout: - pass - - sout = client.makefile("w", encoding="latin-1") - self.interactive_loop(sout, readlines()) - except socket.timeout: - continue - - def interactive_loop(self, sout, input_lines): - """ - Main interactive loop of the monitor - """ - sout.write("Trio Monitor: %d tasks running\n" % len(self._tasks)) - sout.write("Type help for commands\n") - while True: - sout.write("trio > ") - sout.flush() - resp = next(input_lines, None) - if not resp: - return - - try: - if resp.startswith("q"): - self.command_exit(sout) - return - - elif resp.startswith("pa"): - _, taskid_s = resp.split() - self.command_parents(sout, int(taskid_s)) - - elif resp.startswith("s"): - self.command_stats(sout) - - elif resp.startswith("p"): - self.command_ps(sout) - - elif resp.startswith("t"): - self.command_task_tree(sout) - - elif resp.startswith("exit"): - self.command_exit(sout) - return - - elif resp.startswith("cancel"): - _, taskid_s = resp.split() - self.command_cancel(sout, int(taskid_s)) - - elif resp.startswith("signal"): - _, signame = resp.split() - self.command_signal(sout, signame) - - elif resp.startswith("w"): - _, taskid_s = resp.split() - self.command_where(sout, int(taskid_s)) - - elif resp.startswith("h"): - self.command_help(sout) - else: - sout.write("Unknown command. Type help.\n") - except Exception as e: - sout.write("Bad command. %s\n" % e) - - def command_help(self, sout): - sout.write( - """Commands: - ps : Show task table - stat : Display general runtime informations - tree : Display hierarchical view of tasks and nurseries - where taskid : Show stack frames for a task - cancel taskid : Cancel an indicated task - signal signame : Send a Unix signal - parents taskid : List task parents - quit : Leave the monitor -""" - ) - - def command_stats(self, sout): - async def get_current_statistics(): - return current_statistics() - - stats = trio.from_thread.run(get_current_statistics, trio_token=self._trio_token) - sout.write( - """tasks_living: {s.tasks_living} -tasks_runnable: {s.tasks_runnable} -seconds_to_next_deadline: {s.seconds_to_next_deadline} -run_sync_soon_queue_size: {s.run_sync_soon_queue_size} -io_statistics: - tasks_waiting_read: {s.io_statistics.tasks_waiting_read} - tasks_waiting_write: {s.io_statistics.tasks_waiting_write} - backend: {s.io_statistics.backend} -""".format( - s=stats - ) - ) - - def command_ps(self, sout): - headers = ("Id", "State", "Shielded", "Task") - widths = (5, 10, 10, 50) - for h, w in zip(headers, widths): - sout.write("%-*s " % (w, h)) - sout.write("\n") - sout.write(" ".join(w * "-" for w in widths)) - sout.write("\n") - for task in sorted(self._tasks.values(), key=lambda t: t._monitor_short_id): - sout.write( - "%-*d %-*s %-*s %-*s\n" - % ( - widths[0], - task._monitor_short_id, - widths[1], - task._monitor_state, - widths[2], - "yes" if is_shielded_task(task) else "", - widths[3], - task.name, - ) - ) - - def command_task_tree(self, sout): - root_task = next(iter(self._tasks.values())).task - while root_task.parent_nursery is not None: - root_task = root_task.parent_nursery.parent_task - - def _format_task(task): - task = self._tasks[id(task)] - return "{} (id={}, {}{})".format( - task.name, - task._monitor_short_id, - task._monitor_state, - ", shielded" if is_shielded_task(task) else "", - ) - - task_tree = render_task_tree(root_task, _format_task) - sout.write(task_tree) - - def command_where(self, sout, taskid): - task = self.get_task_from_short_id(taskid) - if task: - - def walk_coro_stack(coro): - while coro is not None: - if hasattr(coro, "cr_frame"): - # A real coroutine - yield coro.cr_frame, coro.cr_frame.f_lineno - - coro = coro.cr_await - elif hasattr(coro, "gi_frame"): - # A generator decorated with @types.coroutine - yield coro.gi_frame, coro.gi_frame.f_lineno - - coro = coro.gi_yieldfrom - else: - # A coroutine wrapper (used by AsyncGenerator for - # instance), cannot go further - return - - ss = traceback.StackSummary.extract(walk_coro_stack(task.coro)) - tb = "".join(ss.format()) - sout.write(tb + "\n") - else: - sout.write("No task %d\n" % taskid) - - def command_signal(self, sout, signame): - if hasattr(signal, signame): - os.kill(os.getpid(), getattr(signal, signame)) - else: - sout.write("Unknown signal %s\n" % signame) - - def command_cancel(self, sout, taskid): - # TODO: how to cancel a single task ? - # Another solution could be to also display nurseries/cancel_scopes in - # the monitor and allow to cancel them. Given timeout are handled - # by cancel_scope, this could also allow us to monitor the remaining - # time (and task depending on it) in such object. - sout.write("Not supported yet...") - - def command_parents(self, sout, taskid): - task = self.get_task_from_short_id(taskid) - while task: - sout.write("%-6d %12s %s\n" % (task._monitor_short_id, "running", task.name)) - task = ( - self._tasks[id(task.parent_nursery._parent_task)] if task.parent_nursery else None - ) - - def command_exit(self, sout): - sout.write("Leaving monitor. Hit Ctrl-C to exit\n") - sout.flush() - - -def monitor_client(host, port): - """ - Client to connect to the monitor via "telnet" - """ - tn = telnetlib.Telnet() - tn.open(host, port, timeout=0.5) - try: - tn.interact() - except KeyboardInterrupt: - pass - finally: - tn.close() - - -def main(): - parser = argparse.ArgumentParser("usage: python -m trio.monitor [options]") - parser.add_argument( - "-H", "--host", dest="monitor_host", default=MONITOR_HOST, type=str, help="monitor host ip" - ) - - parser.add_argument( - "-p", - "--port", - dest="monitor_port", - default=MONITOR_PORT, - type=int, - help="monitor port number", - ) - args = parser.parse_args() - monitor_client(args.monitor_host, args.monitor_port) - - -if __name__ == "__main__": - main() diff --git a/server/tests/run_parallel.py b/server/tests/run_parallel.py deleted file mode 100644 index 2bd2bd60c50..00000000000 --- a/server/tests/run_parallel.py +++ /dev/null @@ -1,291 +0,0 @@ -#! /usr/bin/env python3 -# Parsec Cloud (https://parsec.cloud) Copyright (c) BUSL-1.1 2016-present Scille SAS -from __future__ import annotations - -""" -Why this an not just pytest_xdist ? - -The issue is pytest_xdist is based on the `execnet` library for dispatching -tests. This is what allow pytest_xdist to run tests across different machines, -but is a bit overkill if you only want to run tests locally. - -However the main issue is that `execnet` doesn't run the actual pytest code -in the main thread of the newly created process which is an issue on macOS. - -On top of that I suspect the forking mechanism of pytest_xdist to keep sharing -too much things between processes (given the fork occurs after pytest has been -started). This is probably not the case, but I'm currently a very desperate -man considering how unstable the CI is right now... - -So the idea is simple here: -- Each child starts pytest with the `--slice-test` option, so parent doesn't even - have to do the test dispatching. The drawback is we cannot do work stealing if - a job lags behind because it got most of the slow tests. -- We share as few thing as possible between parent and child process (i.e. a single - queue to inform what is going in the child). -""" - -import multiprocessing -import queue -import sys -from collections import defaultdict -from typing import List - -import pytest - -COLOR_END = "\033[0m" -COLOR_RED = "\033[91m" -COLOR_GREEN = "\033[92m" -COLOR_YELLOW = "\033[93m" - -EVT_WORKER_READY = "worker_ready" -EVT_WORKER_FINISHED = "worker_finished" -EVT_INTERNAL_ERROR = "internal_error" -EVT_COLLECTION_FINISH = "collection_finish" -EVT_TEST_REPORT = "test_report" - - -class TestStatusReportPlugin: - def __init__(self, job_id: int, report_queue: multiprocessing.Queue): - self.job_id = job_id - self.report_queue = report_queue - - def sendevent(self, name, **kwargs): - self.report_queue.put((self.job_id, name, kwargs)) - - @pytest.hookimpl - def pytest_internalerror(self, excrepr): - formatted_error = str(excrepr) - self.sendevent(EVT_INTERNAL_ERROR, formatted_error=formatted_error) - - @pytest.hookimpl - def pytest_collection_finish(self, session: pytest.Session): - self.sendevent(EVT_COLLECTION_FINISH, total_tests_count=len(session.items)) - - @pytest.hookimpl - def pytest_sessionstart(self, session): - self.sendevent(EVT_WORKER_READY) - - @pytest.hookimpl - def pytest_sessionfinish(self, exitstatus): - self.sendevent(EVT_WORKER_FINISHED, exitstatus=exitstatus) - - @pytest.hookimpl - def pytest_runtest_logreport(self, report: pytest.TestReport): - # Ignore tests skipped for being out of the job's tests slice - if ("test_out_of_slice", True) in report.user_properties: - return - self.sendevent( - EVT_TEST_REPORT, - nodeid=report.nodeid, - outcome=report.outcome, - when=report.when, - longrepr=report.longrepr, - duration=report.duration, - ) - - @pytest.hookimpl - def pytest_warning_recorded(self, warning_message, when, nodeid, location): - self.sendevent( - "warning_recorded", - warning_message_data=str(warning_message), - when=when, - nodeid=nodeid, - location=location, - ) - - -def _run_pytest(job_index, args, plugins): - import os - import sys - - # Stdout is shared with parent process, so we must disable it to keep it readable - sys.stdout = open(os.devnull, "w") - try: - pytest.main(args, plugins) - - except BaseException as exc: - import traceback - - tb_formatted = traceback.format_exception(exc, exc, exc.__traceback__) - msg = "unexpected exception from pytest:\n" + "\n".join(tb_formatted) - # Print on stderr so we're sure the stacktrace will appear in the logs - # even if parent process fail to process the event when send - print( - f">>> [gw{job_index}] {COLOR_RED}CRASH !!!{COLOR_END} {msg}", - file=sys.stderr, - flush=True, - ) - plugins[0].report_queue.put((job_index, "unexpected_exception", {"msg": msg})) - - if not isinstance(exc, Exception): - raise exc - - -if __name__ == "__main__": - verbose = any(True for x in sys.argv if x == "--verbose" or x.startswith("-v")) - fast_fail = "-x" in sys.argv - try: - index = sys.argv.index("-n") - parallelism = sys.argv[index + 1] - if parallelism == "auto": - parallelism = multiprocessing.cpu_count() - parallelism = int(parallelism) - except (IndexError, ValueError): - raise SystemExit(f"usage: {sys.argv[0]} -n auto tests") - - args = sys.argv[1:index] + sys.argv[index + 2 :] - - multiprocessing.set_start_method("spawn") - print("==== Running in parallel ===") - report_queue = multiprocessing.Queue() - jobs: List[multiprocessing.Process] = [] - for job_index in range(parallelism): - job_args = [f"--slice-tests={job_index + 1}/{parallelism}", *args] - print(f"pytest {' '.join(job_args)}") - plugins = [TestStatusReportPlugin(job_index, report_queue)] - job = multiprocessing.Process(target=_run_pytest, args=[job_index, job_args, plugins]) - jobs.append(job) - - jobs_status: List[str] = [] - for job in jobs: - job.start() - jobs_status.append("started") - - total_tests_count = None - tests_started = {} - tests_has_failed = False - - def _set_test_has_failed(): - global tests_has_failed - tests_has_failed = True - if fast_fail: - raise KeyboardInterrupt() - - # Use a default dict here, so setting the error never overwrite a previously - # set error (this is suppose not to happen, but if it does we really want to know !) - job_crashes = defaultdict(lambda: "") - - def _percent_display(event_params): - percent_color = COLOR_RED if tests_has_failed else COLOR_GREEN - percent = len(tests_started) * 100 / total_tests_count - return f"{percent_color}[{int(percent)}%]{COLOR_END}" - - try: - while True: - try: - job_index, event_name, event_params = report_queue.get(timeout=1) - except queue.Empty: - if all(not job.is_alive() for job in jobs): - break - - else: - if event_name == "unexpected_exception": - # The event has already be printed by the child process, - # only store the event for final recap - job_crashes[ - job_index - ] += ( - f">>> [gw{job_index}] {COLOR_RED}CRASH !!!{COLOR_END} {event_params['msg']}" - ) - _set_test_has_failed() - - elif event_name == EVT_INTERNAL_ERROR: - msg = f">>> [gw{job_index}] {COLOR_RED}CRASH !!!{COLOR_END} Pytest internal error:\n{event_params['formatted_error']}" - print(msg, flush=True) - job_crashes[job_index] += msg - _set_test_has_failed() - - elif event_name == EVT_COLLECTION_FINISH: - total_tests_count = event_params["total_tests_count"] - - elif event_name == EVT_WORKER_READY: - jobs_status[job_index] = "ready" - - elif event_name == EVT_WORKER_FINISHED: - jobs_status[job_index] = "finished" - print( - f">>> [gw{job_index}] pytest job has finished with status {COLOR_RED if event_params['exitstatus'] else COLOR_GREEN}{event_params['exitstatus']}{COLOR_END}" - ) - if event_params["exitstatus"] != 0: - _set_test_has_failed() - if all(x == "finished" for x in jobs_status): - break - - elif event_name == EVT_TEST_REPORT: - percent_color = COLOR_RED if tests_has_failed else COLOR_GREEN - tests_started.setdefault(event_params["nodeid"], event_params["outcome"]) - base = ( - f"[gw{job_index}] {_percent_display(event_params)} {event_params['nodeid']}" - ) - if event_params["when"] == "setup": - if event_params["outcome"] == "skipped": - outcome = f"{COLOR_YELLOW}SKIPPED{COLOR_END}" - print(f"{base} {outcome}") - elif event_params["outcome"] == "passed": - outcome = "..." - print(f"{base} {outcome}") - else: - outcome = f"{COLOR_RED}Error !!!{COLOR_END}\n{event_params['longrepr']}" - print(f"{base} {outcome}") - _set_test_has_failed() - - elif event_params["when"] == "call": - if tests_started.get(event_params["nodeid"]) == "skipped": - continue - if event_params["outcome"] not in ("skipped", "passed"): - outcome = f"{COLOR_RED}Error !!!{COLOR_END}\n{event_params['longrepr']}" - tests_started[event_params["nodeid"]] = ("error", outcome) - print(f"{base} {outcome}") - _set_test_has_failed() - - elif event_params["when"] == "teardown": - if tests_started.get(event_params["nodeid"]) == "skipped": - continue - # Teardown is never in `skipped` state - if event_params["outcome"] == "passed": - if event_params["duration"] > 1: - outcome = ( - f"{COLOR_GREEN}PASSED{COLOR_END} ({event_params['duration']}s)" - ) - else: - outcome = f"{COLOR_GREEN}PASSED{COLOR_END}" - print(f"{base} {outcome}") - else: - _set_test_has_failed() - outcome = f"{COLOR_RED}Error !!!{COLOR_END}\n{event_params['longrepr']}" - print(f"{base} {outcome}") - - elif event_name == "warning_recorded": - base = f">>> [gw{job_index}] {_percent_display(event_params)} {event_params['nodeid']}" - print( - f"{base} {COLOR_YELLOW}Warning during {event_params['when']} step{COLOR_END}: {event_params['warning_message_data']}" - ) - - except KeyboardInterrupt: - print("^C hit, terminating jobs...") - for job in jobs: - job.terminate() - - else: - if any(job_status != "finished" for job_status in jobs_status): - tests_has_failed = True # Just to be sure - print( - ">>> Some jobs died before pytest has finished :'(\n" - + " \n".join( - f"[gw{job_index}] last status {COLOR_GREEN if job_status == 'finished' else COLOR_RED}{job_status}{COLOR_END}" - for job_index, job_status in enumerate(jobs_status) - ), - flush=True, - ) - - for job_index, job in enumerate(jobs): - crash_msg = job_crashes.get(job_index) - if crash_msg: - print(f">>> [gw{job_index}] {COLOR_RED}CRASH !!!{COLOR_END} {crash_msg}", flush=True) - try: - job.join(timeout=5) - except TimeoutError: - print(f"Job gw{job_index} takes too long to join...") - - raise SystemExit(1 if tests_has_failed else 0) diff --git a/server/tests/scripts/bench_mountpoint.py b/server/tests/scripts/bench_mountpoint.py deleted file mode 100755 index 9228a429f20..00000000000 --- a/server/tests/scripts/bench_mountpoint.py +++ /dev/null @@ -1,108 +0,0 @@ -#! /usr/bin/env python3 -# Parsec Cloud (https://parsec.cloud) Copyright (c) BUSL-1.1 2016-present Scille SAS -from __future__ import annotations - -import signal -from contextlib import contextmanager -from pathlib import Path -from subprocess import PIPE, Popen, run -from tempfile import mkdtemp -from time import sleep - -PORT = 6778 -ORGNAME = "Org42" -TOKEN = "CCDCC27B6108438D99EF8AF5E847C3BB" -DEVICE = "alice@dev1" -PASSWORD = "P@ssw0rd." - -PARSEC_CLI = "python -m parsec.cli" -PARSEC_PROFILE_CLI = "python -m cProfile -o bench.prof -m parsec.cli" - - -def run_cmd(cmd): - print(f"---> {cmd}") - out = run(cmd.split(), capture_output=True) - if out.returncode != 0: - print(out.stdout.decode()) - print(out.stderr.decode()) - raise RuntimeError(f"Error during command `{cmd}`") - return out - - -@contextmanager -def keep_running_cmd(cmd): - print(f"===> {cmd}") - process = Popen(cmd.split(), stdout=PIPE, stderr=PIPE) - sleep(0.2) - if process.poll(): - print(process.stdout.read().decode()) - print(process.stderr.read().decode()) - raise RuntimeError(f"Command `{cmd}` has stopped with status code {process.returncode}") - try: - yield - finally: - if not process.poll(): - process.send_signal(signal.SIGINT) - process.wait() - if process.returncode != 0: - print(process.stdout.read().decode()) - print(process.stderr.read().decode()) - raise RuntimeError(f"Command `{cmd}` return status code {process.returncode}") - - -def main(): - workdir = Path(mkdtemp(prefix="parsec-bench-")) - print(f"Workdir: {workdir}") - confdir = workdir / "core" - mountdir = workdir / "mountpoint" - confdir.mkdir(exist_ok=True) - mountdir.mkdir(exist_ok=True) - - # Start backend & create organization - with keep_running_cmd(f"{PARSEC_CLI} backend run --port={PORT}"): - backend_addr = f"parsec://127.0.0.1:{PORT}?no_ssl=true" - - out = run_cmd( - f"{PARSEC_CLI} core create_organization {ORGNAME}" - f" --addr={backend_addr} --administration-token={TOKEN}" - ) - - bootstrap_addr = out.stdout.decode().split("Bootstrap organization url: ")[-1].strip() - out = run_cmd( - f"{PARSEC_CLI} core bootstrap_organization {DEVICE}" - f" --addr={bootstrap_addr} --config-dir={confdir} --password={PASSWORD}" - ) - - out = run_cmd( - f"{PARSEC_CLI} core create_workspace w1" - f" --config-dir={confdir} --device={DEVICE} --password={PASSWORD}" - ) - - with keep_running_cmd( - f"{PARSEC_PROFILE_CLI} core run -l INFO" - f" --device={DEVICE} --password={PASSWORD} --mountpoint={mountdir} --config-dir={confdir}" - ): - # Wait for mountpoint to be ready - w1dir = mountdir / "w1" - for _ in range(10): - sleep(0.1) - if w1dir.exists(): - break - else: - RuntimeError("Parsec failed to mount workspace") - - # Create 100 MB file - file = workdir / "sample" - file.write_bytes(bytearray(100 * 1024 * 1024)) - - try: - # Copy it into the workspace - print("********** starting bench ***********") - run(f"time pv {file} > {mountdir}/w1/sample", shell=True) - print("********** bench done ***********") - finally: - file.unlink() - - -if __name__ == "__main__": - main() diff --git a/server/tests/scripts/run_testbed_server.py b/server/tests/scripts/run_testbed_server.py deleted file mode 100644 index 2af6a54577a..00000000000 --- a/server/tests/scripts/run_testbed_server.py +++ /dev/null @@ -1,195 +0,0 @@ -# Parsec Cloud (https://parsec.cloud) Copyright (c) BUSL-1.1 2016-present Scille SAS - -from __future__ import annotations - -import argparse -import sys -import tempfile -from functools import partial - -import psutil -import trio -from quart import make_response - -try: - from parsec._parsec import testbed -except ImportError as exc: - raise RuntimeError("Test features are disabled !") from exc -from parsec.api.protocol import OrganizationID -from parsec.backend import backend_app_factory -from parsec.backend.asgi import app_factory as asgi_app_factory -from parsec.backend.asgi import serve_backend_with_asgi -from parsec.backend.config import BackendConfig, MockedBlockStoreConfig, MockedEmailConfig -from parsec.logging import configure_logging - -DEFAULT_ORGANIZATION_LIFE_LIMIT = 10 * 60 # 10mn - - -async def _run_server( - host: str, - port: int, - backend_addr: str, - orga_life_limit: float, - stop_after_process: int | None, -): - # TODO: avoid tempdir for email ? - tmpdir = tempfile.mkdtemp(prefix="tmp-email-folder-") - config = BackendConfig( - debug=True, - db_url="MOCKED", - db_min_connections=1, - db_max_connections=1, - sse_keepalive=30, - forward_proto_enforce_https=None, - backend_addr=None, - email_config=MockedEmailConfig("no-reply@parsec.com", tmpdir), - blockstore_config=MockedBlockStoreConfig(), - administration_token="s3cr3t", - organization_spontaneous_bootstrap=True, - ) - async with backend_app_factory(config=config) as backend: - # Nursery must be enclosed by backend (and not the other way around !) given - # we will sleep forever in it __aexit__ part - async with trio.open_nursery() as nursery: - if stop_after_process: - - async def _watch_and_stop_after_process(pid: int, cancel_scope: trio.CancelScope): - while True: - await trio.sleep(1) - if not psutil.pid_exists(pid): - print(f"PID `{pid}` has left, closing server.") - cancel_scope.cancel() - break - - nursery.start_soon( - _watch_and_stop_after_process, stop_after_process, nursery.cancel_scope - ) - - org_count = 0 - template_id_to_org_id_and_crc: dict[str, tuple[OrganizationID, int]] = {} - - # All set ! Now we can start the server - - asgi = asgi_app_factory(backend) - - # Testbed server often run in background, so it output on crash is often - # not visible (e.g. on the CI). Hence it's convenient to have the client - # print the stacktrace on our behalf. - # Note the testbed server is only meant to be run for tests and on a local - # local machine so this has no security implication. - @asgi.errorhandler(500) - def _on_500(e): - import traceback - - msg = traceback.format_exception( - type(e.original_exception), - e.original_exception, - e.original_exception.__traceback__, - ) - return "".join(msg), 500 - - # Add CORS handling - @asgi.after_request - def _add_cors(response): - response.headers["ACCESS-CONTROL-ALLOW-ORIGIN"] = "*" - response.headers["ACCESS-CONTROL-ALLOW-METHODS"] = "*" - response.headers["ACCESS-CONTROL-ALLOW-HEADERS"] = "*" - return response - - # We don't use json in the /testbed/... routes, this is to simplify - # as much as possible implementation on the client side - - load_template_lock = trio.Lock() - - @asgi.route("/testbed/new/