diff --git a/.github/workflows/cd-syft-dev.yml b/.github/workflows/cd-syft-dev.yml index 508610699b1..d8941739d69 100644 --- a/.github/workflows/cd-syft-dev.yml +++ b/.github/workflows/cd-syft-dev.yml @@ -121,6 +121,17 @@ jobs: ${{ secrets.ACR_SERVER }}/openmined/grid-seaweedfs:dev-${{ github.sha }} ${{ secrets.ACR_SERVER }}/openmined/grid-seaweedfs:${{ steps.grid.outputs.GRID_VERSION }} + - name: Build and push `grid-veilid` image to registry + uses: docker/build-push-action@v5 + with: + context: ./packages/grid/veilid + file: ./packages/grid/veilid/veilid.dockerfile + push: true + tags: | + ${{ secrets.ACR_SERVER }}/openmined/grid-veilid:dev + ${{ secrets.ACR_SERVER }}/openmined/grid-veilid:dev-${{ github.sha }} + ${{ secrets.ACR_SERVER }}/openmined/grid-veilid:${{ steps.grid.outputs.GRID_VERSION }} + - name: Build Helm Chart & Copy to infra if: github.ref == 'refs/heads/dev' || github.event.inputs.deploy-helm == 'true' shell: bash diff --git a/.github/workflows/cd-syft.yml b/.github/workflows/cd-syft.yml index e13c0b488f9..b327a7316fa 100644 --- a/.github/workflows/cd-syft.yml +++ b/.github/workflows/cd-syft.yml @@ -246,7 +246,24 @@ jobs: digest="${{ steps.grid-seaweedfs-build.outputs.digest }}" touch "/tmp/digests/grid-seaweedfs/${digest#sha256:}" - - name: Upload digest for grid-backend, grid-frontend and grid-seaweedfs + - name: Build and push `grid-veilid` image to DockerHub + id: grid-veilid-build + uses: docker/build-push-action@v5 + with: + context: ./packages/grid/veilid + file: ./packages/grid/veilid/veilid.dockerfile + platforms: ${{ steps.release_metadata.outputs.release_platform }} + outputs: type=image,name=openmined/grid-veilid,push-by-digest=true,name-canonical=true,push=true + cache-from: type=registry,ref=openmined/grid-veilid:cache-${{ steps.release_metadata.outputs.short_release_platform }} + cache-to: type=registry,ref=openmined/grid-veilid:cache-${{ steps.release_metadata.outputs.short_release_platform}},mode=max + + - name: Export digest for grid-veilid + run: | + mkdir -p /tmp/digests/grid-veilid + digest="${{ steps.grid-veilid-build.outputs.digest }}" + touch "/tmp/digests/grid-veilid/${digest#sha256:}" + + - name: Upload digest for grid-backend, grid-frontend and grid-seaweedfs, grid-veilid uses: actions/upload-artifact@v4 with: name: digests-${{ steps.release_metadata.outputs.grid_version }}-${{ steps.release_metadata.outputs.short_release_platform }} @@ -305,6 +322,14 @@ jobs: -t openmined/grid-seaweedfs:${{ needs.build-and-push-docker-images.outputs.release_tag }} \ $(printf 'openmined/grid-seaweedfs@sha256:%s ' *) + - name: Create manifest list and push for grid-veilid + working-directory: /tmp/digests/grid-veilid + run: | + docker buildx imagetools create \ + -t openmined/grid-veilid:${{ needs.build-and-push-docker-images.outputs.grid_version }} \ + -t openmined/grid-veilid:${{ needs.build-and-push-docker-images.outputs.release_tag }} \ + $(printf 'openmined/grid-veilid@sha256:%s ' *) + deploy-syft: needs: [merge-docker-images] if: always() && needs.merge-docker-images.result == 'success' diff --git a/.github/workflows/pr-tests-stack.yml b/.github/workflows/pr-tests-stack.yml index 0327248dd0b..7e6f1de22c3 100644 --- a/.github/workflows/pr-tests-stack.yml +++ b/.github/workflows/pr-tests-stack.yml @@ -547,6 +547,12 @@ jobs: run: | sudo python ./scripts/patch_hosts.py --add-k3d-registry + - name: Free Disk Space (Ubuntu) + uses: jlumbroso/free-disk-space@main + with: + tool-cache: true + large-packages: false + # free 10GB of space - name: Remove unnecessary files if: matrix.os == 'ubuntu-latest' diff --git a/notebooks/Testing/Veilid/Alice-Python-Server.ipynb b/notebooks/Testing/Veilid/Alice-Python-Server.ipynb new file mode 100644 index 00000000000..b398119c7f0 --- /dev/null +++ b/notebooks/Testing/Veilid/Alice-Python-Server.ipynb @@ -0,0 +1,232 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "c74990eb-d769-4117-8c88-e9210136606e", + "metadata": {}, + "source": [ + "## Alice Python Server" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "20df98d8-de6c-496c-b30e-6421ac99401c", + "metadata": {}, + "outputs": [], + "source": [ + "# third party\n", + "import requests" + ] + }, + { + "cell_type": "markdown", + "id": "54885cd0-f803-4911-8423-e595dc4cd7c3", + "metadata": {}, + "source": [ + "### 1. Create DHT Key and Private Route" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "41d82ff3-ceda-4569-8178-8758ef635cb0", + "metadata": {}, + "outputs": [], + "source": [ + "host = \"localhost\"\n", + "port = 4000" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "0d9f3cca-66a7-4e6c-a332-b38a8f5c02db", + "metadata": {}, + "outputs": [], + "source": [ + "res = requests.post(f\"http://{host}:{port}/generate_dht_key\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "81c6aa9d-26b4-4672-a059-643edfeeed95", + "metadata": {}, + "outputs": [], + "source": [ + "res.content" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "4a9487e3-f5c8-468e-acd0-261e21bc3e14", + "metadata": {}, + "outputs": [], + "source": [ + "res = requests.get(f\"http://{host}:{port}/retrieve_dht_key\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "5b87e9e6-244f-47f7-a31a-fa7cbce65b88", + "metadata": {}, + "outputs": [], + "source": [ + "self_dht_key = res.json()[\"message\"]\n", + "print(\"=\" * 30)\n", + "print(self_dht_key)\n", + "print(\"=\" * 30)" + ] + }, + { + "cell_type": "markdown", + "id": "a8c70d99-6814-453d-80bf-d141c40ba24e", + "metadata": {}, + "source": [ + "### Send AppMessage using DHT Key to Self" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "aca01ec6-1bbe-44b5-ad4a-053ba1edcfe6", + "metadata": {}, + "outputs": [], + "source": [ + "json_data = {\"dht_key\": self_dht_key, \"message\": \"Hello to me again\"}\n", + "app_message = requests.post(f\"http://{host}:{port}/app_message\", json=json_data)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "ff09ab92-3423-483a-abf3-51e8c2448cf9", + "metadata": {}, + "outputs": [], + "source": [ + "app_message.content" + ] + }, + { + "cell_type": "markdown", + "id": "4d0d9e39-bf05-4ef3-b00a-2bb605f041ee", + "metadata": {}, + "source": [ + "### Send AppCall using DHT Key to Self" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "b8bc9f54-b2f0-4f88-8897-f640866ba2ed", + "metadata": {}, + "outputs": [], + "source": [ + "json_data = {\"dht_key\": self_dht_key, \"message\": \"Hello to app call\"}\n", + "app_call = requests.post(f\"http://{host}:{port}/app_call\", json=json_data)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "2c1c4148-461a-459e-846a-fad332a7ce3a", + "metadata": {}, + "outputs": [], + "source": [ + "app_call.json()" + ] + }, + { + "cell_type": "markdown", + "id": "fd824cca-2a7f-4ea9-9e67-1c06d1f8bec2", + "metadata": {}, + "source": [ + "### Send AppMessage using DHT Key to Peer" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "25cfb508-dd08-44b9-85c9-e6aa07e96a97", + "metadata": {}, + "outputs": [], + "source": [ + "peer_dht_key = input(\"Enter Peer DHT Key\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "2e2c1341-d840-4429-b3e5-093d8e90365e", + "metadata": {}, + "outputs": [], + "source": [ + "json_data = {\"dht_key\": peer_dht_key, \"message\": \"How are you doing , Bob\"}\n", + "app_message = requests.post(f\"http://{host}:{port}/app_message\", json=json_data)" + ] + }, + { + "cell_type": "markdown", + "id": "153377f6-698e-4013-9be3-0833b71ee0c4", + "metadata": {}, + "source": [ + "### Send Proxy Message " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "271d7316-eaab-438c-9192-55a4e44b9dea", + "metadata": {}, + "outputs": [], + "source": [ + "res = requests.get(\n", + " f\"http://{host}:{port}/proxy\",\n", + " json={\"url\": \"https://www.google.com\", \"method\": \"GET\", \"dht_key\": self_dht_key},\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "77e1ad1d-379a-4899-8805-c703ad437c0d", + "metadata": {}, + "outputs": [], + "source": [ + "res.content" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "73c1f0b0-d240-4964-a88b-365ea89b1bdd", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.5" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/notebooks/Testing/Veilid/Bob-Python-Server.ipynb b/notebooks/Testing/Veilid/Bob-Python-Server.ipynb new file mode 100644 index 00000000000..c0b92df4115 --- /dev/null +++ b/notebooks/Testing/Veilid/Bob-Python-Server.ipynb @@ -0,0 +1,191 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "a003292f-d8f6-4888-b47d-9e0e9b1309ec", + "metadata": {}, + "source": [ + "## Bob Python Server" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "338b22f9-938e-4628-9636-14c192e42e49", + "metadata": {}, + "outputs": [], + "source": [ + "# third party\n", + "import requests" + ] + }, + { + "cell_type": "markdown", + "id": "f1279a42-f391-4ec8-b711-e9a05d601ce2", + "metadata": {}, + "source": [ + "### 1. Create DHT Key and Private Route" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "755d48fe-9471-4474-b47f-d344d31604aa", + "metadata": {}, + "outputs": [], + "source": [ + "host = \"localhost\"\n", + "port = 4001" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "f14915f1-2535-424b-bdd9-23efab16bb43", + "metadata": {}, + "outputs": [], + "source": [ + "res = requests.post(f\"http://{host}:{port}/generate_dht_key\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "29aa597d-660e-4524-82ac-62c119e10fdf", + "metadata": {}, + "outputs": [], + "source": [ + "res.content" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "632ccceb-f742-4c8a-b00f-c55e6333fdc1", + "metadata": {}, + "outputs": [], + "source": [ + "res = requests.get(f\"http://{host}:{port}/retrieve_dht_key\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "a7b8581a-a73d-4d15-97ec-2869aff00e90", + "metadata": {}, + "outputs": [], + "source": [ + "self_dht_key = res.json()[\"message\"]\n", + "print(\"=\" * 30)\n", + "print(self_dht_key)\n", + "print(\"=\" * 30)" + ] + }, + { + "cell_type": "markdown", + "id": "616f208c-fead-40cc-9391-416b59d7dc15", + "metadata": {}, + "source": [ + "### Send AppMessage using DHT Key to Self" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "538913ae-29be-41a5-9608-4c694ccb392b", + "metadata": {}, + "outputs": [], + "source": [ + "json_data = {\"dht_key\": self_dht_key, \"message\": \"Hello to me\"}\n", + "app_message = requests.post(f\"http://{host}:{port}/app_message\", json=json_data)" + ] + }, + { + "cell_type": "markdown", + "id": "3ed2c114-eab7-4be7-bd89-d5ec3a7ec4c2", + "metadata": {}, + "source": [ + "### Send AppCall using DHT Key to Self" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "db49c78d-9767-4358-aa00-e740ce04e000", + "metadata": {}, + "outputs": [], + "source": [ + "json_data = {\"dht_key\": self_dht_key, \"message\": \"Hello to app call\"}\n", + "app_call = requests.post(f\"http://{host}:{port}/app_call\", json=json_data)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "9bc0a69e-7cff-42fc-8859-e5de6edacdeb", + "metadata": {}, + "outputs": [], + "source": [ + "app_call.json()" + ] + }, + { + "cell_type": "markdown", + "id": "73eee970-bb61-4014-9380-1944587b929a", + "metadata": {}, + "source": [ + "### Send AppMessage using DHT Key to Peer" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "9e5671f6-1ffd-410c-b72a-6fb39f68fe93", + "metadata": {}, + "outputs": [], + "source": [ + "peer_dht_key = input(\"Enter Peer DHT Key\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "8a753450-19e3-4603-ae93-a48bfbc7f829", + "metadata": {}, + "outputs": [], + "source": [ + "json_data = {\"dht_key\": peer_dht_key, \"message\": \"Hello Alice\"}\n", + "app_message = requests.post(f\"http://{host}:{port}/app_message\", json=json_data)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "0cf79332-1a88-4d02-87b7-53c19d4fd1ad", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.5" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/notebooks/Testing/Veilid/Veilid Route-Connection-Testing.ipynb b/notebooks/Testing/Veilid/Veilid Route-Connection-Testing.ipynb new file mode 100644 index 00000000000..bd2ea78c1c6 --- /dev/null +++ b/notebooks/Testing/Veilid/Veilid Route-Connection-Testing.ipynb @@ -0,0 +1,116 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "id": "a8d2d5a4-5512-4a24-aafd-7133d64c22fc", + "metadata": {}, + "outputs": [], + "source": [ + "# syft absolute" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "5a5a1b05-336d-4523-ae85-4022783acf85", + "metadata": {}, + "outputs": [], + "source": [ + "# syft absolute\n", + "from syft.client.client import VeilidConnection" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "963f96e5-8d62-44b2-a975-faa23624bbd4", + "metadata": {}, + "outputs": [], + "source": [ + "veilid_conn = VeilidConnection(dht_key=\"test\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "f2d6083b-527f-46be-a582-15f4404950b5", + "metadata": {}, + "outputs": [], + "source": [ + "# syft absolute\n", + "from syft.service.network.routes import connection_to_route\n", + "from syft.service.network.routes import route_to_connection" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "9e8e508f-c527-43f4-98d1-7e7c6ef0dfb3", + "metadata": {}, + "outputs": [], + "source": [ + "veilid_route = connection_to_route(veilid_conn)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "7aba2e02-46c7-46a2-ab11-9253e05fd2fe", + "metadata": {}, + "outputs": [], + "source": [ + "veilid_route.dht_key" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "0d50eec2-a7ed-49f6-b90c-082cd8c40e0a", + "metadata": {}, + "outputs": [], + "source": [ + "re_veilid_conn = route_to_connection(veilid_route)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "ea5d2d73-1cbc-496a-a6b6-4136e9423394", + "metadata": {}, + "outputs": [], + "source": [ + "re_veilid_conn" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "a535caf0-d1e6-40b9-842b-066ce2b6b897", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.5" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/notebooks/Testing/Veilid/Veilid-Connection-Test.ipynb b/notebooks/Testing/Veilid/Veilid-Connection-Test.ipynb new file mode 100644 index 00000000000..c38143c7c35 --- /dev/null +++ b/notebooks/Testing/Veilid/Veilid-Connection-Test.ipynb @@ -0,0 +1,554 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "id": "df3d4dbb-e179-4995-9507-1f82cb417fc5", + "metadata": {}, + "outputs": [], + "source": [ + "# syft absolute\n", + "import syft as sy\n", + "from syft.client.client import connect" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "cc7f02fb-b4f8-4615-a39f-dca2752b58b2", + "metadata": {}, + "outputs": [], + "source": [ + "domain_client = sy.login(email=\"info@openmined.org\", password=\"changethis\", port=8080)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "4d9ce704-36e6-455b-a633-fe943848420c", + "metadata": {}, + "outputs": [], + "source": [ + "domain_client.api.services.veilid.generate_dht_key()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "ef053ef6-e31a-4634-8d5e-2e8ff2e002de", + "metadata": {}, + "outputs": [], + "source": [ + "domain_client.api.services.veilid.retrieve_dht_key()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "670f2e09-3409-4545-be3a-17e1b2a97cd2", + "metadata": {}, + "outputs": [], + "source": [ + "domain_client = sy.login_as_guest(\n", + " dht_key=\"VLD0:OBeFkuuQz6LIofeIIzC5Y-zwR96NoKqbojqGCcNKu8c\",\n", + " vld_forward_proxy=\"http://localhost:4000\",\n", + " vld_reverse_proxy=\"http://proxy\",\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "63a9a5f3-a004-4523-bf70-e3ebee06408e", + "metadata": {}, + "outputs": [], + "source": [ + "domain_client.api" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "b2195bbd-5ef1-4a53-8886-1b2ea6854bc3", + "metadata": {}, + "outputs": [], + "source": [ + "connect_client = connect(\n", + " dht_key=\"VLD0:OBeFkuuQz6LIofeIIzC5Y-zwR96NoKqbojqGCcNKu8c\",\n", + " vld_forward_proxy=\"http://localhost:4000\",\n", + " vld_reverse_proxy=\"http://proxy\",\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "71261091-1cfc-428f-9087-7f24395a2750", + "metadata": {}, + "outputs": [], + "source": [ + "domain_client = sy.login(\n", + " dht_key=\"VLD0:OBeFkuuQz6LIofeIIzC5Y-zwR96NoKqbojqGCcNKu8c\",\n", + " vld_forward_proxy=\"http://localhost:4000\",\n", + " vld_reverse_proxy=\"http://proxy\",\n", + " email=\"info@openmined.org\",\n", + " password=\"changethis\",\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "ec181b37-71cc-411b-8b6c-0f149e45c79c", + "metadata": {}, + "outputs": [], + "source": [ + "domain_client.api" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "293b55c9-9f9b-4702-b74f-6dfe9b5eee8d", + "metadata": {}, + "outputs": [], + "source": [ + "# syft absolute\n", + "import syft as sy" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "2f469470-6280-466f-85e3-ed655484178e", + "metadata": {}, + "outputs": [], + "source": [ + "domain_client = sy.login_as_guest(port=8080)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "141a0871-d322-4508-b0b1-68ad1654dcda", + "metadata": {}, + "outputs": [], + "source": [ + "res = sy.serialize(domain_client.api, to_bytes=True)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "61192beb-a4f7-495f-adf5-f2294ec5a199", + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "af269af3-f55b-4f3d-8cc1-cbe8ee10d327", + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "95facdab-92ab-42cf-b976-a9b646ae2901", + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "8de7d433-c26b-43e9-9a45-d960cfb18645", + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "7c7a97e3-9585-485f-ad41-2982bf935564", + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "eb073a52-1c7a-4c02-bce3-0782c6f89064", + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "98f58488-e927-4e44-a885-04740f8c8b31", + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "5adb6185-9f49-444c-ae26-702e17bcfabf", + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "6ed88528-1e23-4585-89ca-0e3cfa098d37", + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "985f6211-efa8-4850-b2fa-280b064032ff", + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "3f3abeb1-228c-45ff-acc9-fbc2314c6e31", + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "83aee788-4a14-4e41-b924-53dcbebe8e14", + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "0d2d9fa5-9098-4d79-a35e-2da46f615ef7", + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "markdown", + "id": "18613355-f3bd-45c3-8ac3-97165dd6e28d", + "metadata": {}, + "source": [ + "## Debugging" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "f2d4a8ea-f9e5-4411-bf68-0d4ed25f3fa6", + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "77f7d4b4-7ea2-4a61-8a67-a2dacbfd054f", + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "9bf0aa58-b6a1-463a-8d14-76f74dcc6d7c", + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "raw", + "id": "1142383d-82df-49f5-ad5f-ede5fde39b20", + "metadata": {}, + "source": [ + "import lzma" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "c8026971-b496-4a24-b84f-b57d898f15d9", + "metadata": {}, + "outputs": [], + "source": [ + "# stdlib\n", + "import lzma" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "69542e59-2ba3-4721-8c39-192258180114", + "metadata": {}, + "outputs": [], + "source": [ + "len(res)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "61f3fb0e-50e1-4cca-94cf-490e5bde974b", + "metadata": {}, + "outputs": [], + "source": [ + "comp = lzma.compress(res)\n", + "print(len(comp))\n", + "decom = lzma.decompress(comp)\n", + "print(len(decom))" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "ee53df6e-e979-4011-8fe7-24141f7df001", + "metadata": {}, + "outputs": [], + "source": [ + "# third party\n", + "from pympler import asizeof" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "8db1d8a9-ee4b-4efa-a69b-1d735ceaf129", + "metadata": {}, + "outputs": [], + "source": [ + "asizeof.asizeof(domain_client.api)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "f3bdfb82-687e-49a7-a268-2bb0e74364cc", + "metadata": {}, + "outputs": [], + "source": [ + "# stdlib\n", + "import sys\n", + "\n", + "# third party\n", + "from pympler import asizeof" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "41944d4d-7613-461e-a6e7-905514bb08da", + "metadata": {}, + "outputs": [], + "source": [ + "for attr_name, attr_value in domain_client.api.__dict__.items():\n", + " if attr_name != \"refresh_api_callback\":\n", + " res = sy.serialize(attr_value, to_bytes=True)\n", + " immediate_size = sys.getsizeof(res)\n", + " total_size = asizeof.asizeof(res)\n", + " print(\n", + " f\"{attr_name}: immediate size = {immediate_size} bytes, total size = {total_size} bytes\"\n", + " )" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "a1d4ad18-7fb0-4ec7-966d-cf86a6b280f1", + "metadata": {}, + "outputs": [], + "source": [ + "count = 0\n", + "for i in domain_client.api.lib_endpoints.values():\n", + " count += 1\n", + " print(count, \" \", i.module_path)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "59ad85c9-6acb-4fbd-b9e7-25a0e34d8f6c", + "metadata": {}, + "outputs": [], + "source": [ + "len(res)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "8ee3d56b-298e-4706-9e93-055960f41654", + "metadata": {}, + "outputs": [], + "source": [ + "# stdlib\n", + "import zlib" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "d4509185-ba56-42d4-aaf3-84341cdeaa52", + "metadata": {}, + "outputs": [], + "source": [ + "%%time\n", + "c = zlib.compress(res)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "b9b7539e-06ce-4a92-bf8e-6a65331f3ee1", + "metadata": {}, + "outputs": [], + "source": [ + "len(c)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "1eb8fc1d-1d8a-4301-bd36-618393e6ff8a", + "metadata": {}, + "outputs": [], + "source": [ + "# stdlib\n", + "import lzma" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "d8b9cabe-382d-4085-861d-ca55d99a938e", + "metadata": {}, + "outputs": [], + "source": [ + "%%time\n", + "lc = lzma.compress(res)\n", + "print(len(lc))\n", + "ld = lzma.decompress(lc)\n", + "print(len(ld))" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "4a8462ce-6de8-472b-8685-72665f36f940", + "metadata": {}, + "outputs": [], + "source": [ + "# stdlib\n", + "import gzip" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "9f357c7d-059d-46b5-bf03-c8acb5a3e7df", + "metadata": {}, + "outputs": [], + "source": [ + "%%time\n", + "c2 = gzip.compress(res)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "9b4647a5-ec95-4f22-9ac2-104f30600cf5", + "metadata": {}, + "outputs": [], + "source": [ + "len(sy.serialize(domain_client.api.endpoints, to_bytes=True))" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "7d7b89fe-b270-40c0-bc18-066f9be62569", + "metadata": {}, + "outputs": [], + "source": [ + "# res = veilid_conn.get_node_metadata(credentials=None)\n", + "res = b\"\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "4f6a98cf-ad5b-4ad0-87c7-b8cdc7d0678d", + "metadata": {}, + "outputs": [], + "source": [ + "res" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "3ab82cd6-c080-46dd-b15d-da0c904e967e", + "metadata": {}, + "outputs": [], + "source": [ + "# stdlib\n", + "import json" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "cba2d15b-826d-4f6b-82d1-bb70ba0e439d", + "metadata": {}, + "outputs": [], + "source": [ + "type(json.loads(res))" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "cbfda25f-5b2e-4c55-a906-1ca78497623f", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.5" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/notebooks/Testing/Veilid/Veilid-Gateway-Testing.ipynb b/notebooks/Testing/Veilid/Veilid-Gateway-Testing.ipynb new file mode 100644 index 00000000000..16f5e7abe41 --- /dev/null +++ b/notebooks/Testing/Veilid/Veilid-Gateway-Testing.ipynb @@ -0,0 +1,226 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "id": "a300f01b-8357-43ca-9c64-c489839603e8", + "metadata": {}, + "outputs": [], + "source": [ + "# syft absolute\n", + "import syft as sy" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "9aeed160-94d3-49c1-98c5-7795c6df7280", + "metadata": {}, + "outputs": [], + "source": [ + "domain_client = sy.login(email=\"info@openmined.org\", password=\"changethis\", port=9082)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "e7a79ee9-68bf-4a93-935e-32f42e332f97", + "metadata": {}, + "outputs": [], + "source": [ + "gateway_client = sy.login(email=\"info@openmined.org\", password=\"changethis\", port=9081)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "b2d66293-b573-4cdf-8721-9d91a620dd9d", + "metadata": {}, + "outputs": [], + "source": [ + "domain_client.api.services.veilid.generate_dht_key()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "e3b10d70-1c30-42e2-98bd-86af6a228455", + "metadata": {}, + "outputs": [], + "source": [ + "gateway_client.api.services.veilid.generate_dht_key()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "7500007e-e5f6-4c4a-bbc3-46f2357d2433", + "metadata": {}, + "outputs": [], + "source": [ + "domain_route = domain_client.api.services.veilid.get_veilid_route()\n", + "gateway_route = gateway_client.api.services.veilid.get_veilid_route()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "82bee827-ea59-4255-9c32-5b9e10e5676f", + "metadata": {}, + "outputs": [], + "source": [ + "gateway_route.dht_key" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "960f6b4c-3073-45ec-93cf-54c384262d0b", + "metadata": {}, + "outputs": [], + "source": [ + "domain_route.dht_key" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "e3e916e7-2897-4d63-b8b8-a913a2baed8a", + "metadata": {}, + "outputs": [], + "source": [ + "domain_client.connect_to_gateway(gateway_client, protocol=\"veilid\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "ee4b39c1-01d5-4cae-9115-a0d83667c31a", + "metadata": {}, + "outputs": [], + "source": [ + "domain_client.peers[0].node_routes[0].dht_key" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "6c56a7d4-88dc-43e0-b092-4c443734e3c3", + "metadata": {}, + "outputs": [], + "source": [ + "gateway_client.api.services.network.get_all_peers()[0].node_routes[0].dht_key" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "8febe455-4b82-478f-85b5-d1e2e104fb1a", + "metadata": {}, + "outputs": [], + "source": [ + "gateway_client.peers" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "9f6871cb-37bf-4570-94cd-b993906c11f8", + "metadata": {}, + "outputs": [], + "source": [ + "domain_peer = gateway_client.api.services.network.get_all_peers()[0]" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "91c303f5-36af-4a65-a81a-7cb24f5c3494", + "metadata": {}, + "outputs": [], + "source": [ + "connection = gateway_client.connection.with_proxy(domain_peer.id)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "5b744210-dddb-4a20-a32e-146b0a92678c", + "metadata": {}, + "outputs": [], + "source": [ + "# syft absolute\n", + "from syft.node.credentials import SyftSigningKey" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "66b4f4c5-780d-4259-8360-2692ade1358f", + "metadata": {}, + "outputs": [], + "source": [ + "metadata = connection.get_node_metadata(credentials=SyftSigningKey.generate())" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "508e9374-37ca-412b-af34-631994f80ff7", + "metadata": {}, + "outputs": [], + "source": [ + "proxy_client = gateway_client.domains[0]" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "2081964a-12da-428d-b543-7ba1a4c82600", + "metadata": {}, + "outputs": [], + "source": [ + "admin_client = proxy_client.login(email=\"info@openmined.org\", password=\"changethis\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "68a6e4bb-d6f6-4173-a8bb-dc70ea52c0b5", + "metadata": {}, + "outputs": [], + "source": [ + "admin_client" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "760f17f0-b44c-4e71-ae93-ba9f4c291fd9", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.5" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/packages/grid/default.env b/packages/grid/default.env index 7ba01f7a770..f5b42ff6323 100644 --- a/packages/grid/default.env +++ b/packages/grid/default.env @@ -115,3 +115,6 @@ OBLV_LOCALHOST_PORT=3030 # Registation ENABLE_SIGNUP=False + +# Veilid +DOCKER_IMAGE_VEILID=openmined/grid-veilid diff --git a/packages/grid/devspace.yaml b/packages/grid/devspace.yaml index ac81a14cccc..3fbe3c4c8f8 100644 --- a/packages/grid/devspace.yaml +++ b/packages/grid/devspace.yaml @@ -10,8 +10,8 @@ pipelines: purge_deployments --all run_dependencies --all # 1. Deploy any projects this project needs (see "dependencies") ensure_pull_secrets --all # 2. Ensure pull secrets - create_deployments --all # 3. Deploy Helm charts and manifests specfied as "deployments" build_images --all + create_deployments --all # 3. Deploy Helm charts and manifests specfied as "deployments" start_dev --all # 4. Start dev mode "app" (see "dev" section) # You can run this pipeline via `devspace deploy` (or `devspace run-pipeline deploy`) deploy: @@ -55,6 +55,13 @@ images: context: ./seaweedfs tags: - dev-${DEVSPACE_TIMESTAMP} + veilid: + image: "${CONTAINER_REGISTRY}/${DOCKER_IMAGE_VEILID}" + buildKit: {} + dockerfile: ./veilid/veilid.dockerfile + context: ./veilid + tags: + - dev-${DEVSPACE_TIMESTAMP} # This is a list of `deployments` that DevSpace can create for this project deployments: @@ -75,6 +82,8 @@ deployments: rootEmail: info@openmined.org defaultWorkerPoolCount: 1 resourcesPreset: micro + veilid: + enabled: true dev: mongo: @@ -109,6 +118,16 @@ dev: - path: ./backend/grid:/root/app/grid - path: ../syft:/root/app/syft ssh: {} + veilid: + labelSelector: + app.kubernetes.io/name: syft + app.kubernetes.io/component: veilid + env: + - name: DEV_MODE + value: "True" + logs: {} + sync: + - path: ./veilid/server:/app/server profiles: - name: gateway @@ -116,3 +135,7 @@ profiles: - op: replace path: deployments.syft.helm.values.node.type value: "gateway" + - op: remove + path: images.seaweedfs + - op: remove + path: dev.seaweedfs diff --git a/packages/grid/helm/syft/templates/backend/backend-statefulset.yaml b/packages/grid/helm/syft/templates/backend/backend-statefulset.yaml index 3673312d922..a0c6a665dbd 100644 --- a/packages/grid/helm/syft/templates/backend/backend-statefulset.yaml +++ b/packages/grid/helm/syft/templates/backend/backend-statefulset.yaml @@ -121,6 +121,11 @@ spec: - name: OBLV_ENABLED value: {{ .Values.node.oblv.enabled | quote }} {{- end }} + # Veilid + {{- if .Values.veilid.enabled }} + - name: VEILID_ENABLED + value: {{ .Values.veilid.enabled | quote }} + {{- end }} {{- if .Values.node.env }} {{- toYaml .Values.node.env | nindent 12 }} {{- end }} diff --git a/packages/grid/helm/syft/templates/veilid/veilid-deployment.yaml b/packages/grid/helm/syft/templates/veilid/veilid-deployment.yaml new file mode 100644 index 00000000000..1b05569837a --- /dev/null +++ b/packages/grid/helm/syft/templates/veilid/veilid-deployment.yaml @@ -0,0 +1,59 @@ +{{- if .Values.veilid.enabled }} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: veilid + labels: + {{- include "common.labels" . | nindent 4 }} + app.kubernetes.io/component: veilid +spec: + replicas: 1 + selector: + matchLabels: + {{- include "common.selectorLabels" . | nindent 6 }} + app.kubernetes.io/component: veilid + strategy: + type: Recreate + template: + metadata: + labels: + {{- include "common.labels" . | nindent 8 }} + app.kubernetes.io/component: veilid + spec: + containers: + - name: veilid-container + image: {{ .Values.global.registry }}/openmined/grid-veilid:{{ .Values.global.version }} + imagePullPolicy: Always + resources: {{ include "common.resources.set" (dict "resources" .Values.veilid.resources "preset" .Values.veilid.resourcesPreset) | nindent 12 }} + + env: + - name: VEILID_FLAGS + value: {{ .Values.veilid.serverFlags | quote }} + - name: UVICORN_LOG_LEVEL + value: {{ .Values.veilid.uvicornLogLevel }} + - name: APP_LOG_LEVEL + value: {{ .Values.veilid.appLogLevel }} + {{- if .Values.veilid.env }} + {{- toYaml .Values.veilid.env | nindent 12 }} + {{- end }} + + ports: + - name: veilid-api + containerPort: 4000 + startupProbe: + httpGet: + path: /healthcheck?probe=startupProbe + port: veilid-api + failureThreshold: 30 + periodSeconds: 10 + livenessProbe: + httpGet: + path: /healthcheck?probe=livenessProbe + port: veilid-api + periodSeconds: 15 + timeoutSeconds: 5 + failureThreshold: 3 + readinessProbe: null + terminationGracePeriodSeconds: 5 + +{{ end }} \ No newline at end of file diff --git a/packages/grid/helm/syft/templates/veilid/veilid-service.yaml b/packages/grid/helm/syft/templates/veilid/veilid-service.yaml new file mode 100644 index 00000000000..4b71381b9cc --- /dev/null +++ b/packages/grid/helm/syft/templates/veilid/veilid-service.yaml @@ -0,0 +1,19 @@ +{{- if .Values.veilid.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: veilid + labels: + {{- include "common.labels" . | nindent 4 }} + app.kubernetes.io/component: veilid +spec: + type: ClusterIP + selector: + {{- include "common.selectorLabels" . | nindent 4 }} + app.kubernetes.io/component: veilid + ports: + - name: python-server + port: 80 + protocol: TCP + targetPort: 4000 +{{ end }} \ No newline at end of file diff --git a/packages/grid/helm/syft/values.yaml b/packages/grid/helm/syft/values.yaml index 59af4023e30..1a0db271d48 100644 --- a/packages/grid/helm/syft/values.yaml +++ b/packages/grid/helm/syft/values.yaml @@ -145,3 +145,20 @@ ingress: # ---------------------------------------- class: null className: null + +# ---------------------------------------- +# For Veilid Core Debug Logs +# serverFlags: "--debug" +# ---------------------------------------- +veilid: + enabled: false + serverFlags: "" + appLogLevel: "info" + uvicornLogLevel: "info" + + # Extra environment vars + env: null + + # Pod Resource Limits + resourcesPreset: nano + resources: null \ No newline at end of file diff --git a/packages/grid/veilid/development.md b/packages/grid/veilid/development.md new file mode 100644 index 00000000000..f6baa053ac7 --- /dev/null +++ b/packages/grid/veilid/development.md @@ -0,0 +1,67 @@ +## Veilid - Development Instructions + +### 1. Building Veilid Container + +```sh +cd packages/grid/veilid && docker build -f veilid.dockerfile -t veilid:0.1 . +``` + +### Running veilid Container + +#### 1. Development Mode + +```sh +cd packages/grid/veilid && \ +docker run --rm -e DEV_MODE=True -p 4000:4000 -p 5959:5959 -p 5959:5959/udp -v $(pwd)/server:/app/server veilid:0.1 +``` + +##### 2. Additional Flags for Development + +``` +a. VEILID_FLAGS="--debug" (For Veilid Debug logs) +b. APP_LOG_LEVEL="debug" (For changing logging method inside the application could be info, debug, warning, critical) +c. UVICORN_LOG_LEVEL="debug" (For setting logging method for uvicorn) +``` + +#### 3. Production Mode + +```sh +cd packages/grid/veilid && \ +docker run --rm -p 4000:4000 -p 5959:5959 -p 5959:5959/udp veilid:0.1 +``` + +### Kubernetes Development + +#### 1. Gateway Node + +##### Creation + +```sh +bash -c '\ + export CLUSTER_NAME=testgateway1 CLUSTER_HTTP_PORT=9081 DEVSPACE_PROFILE=gateway && \ + tox -e dev.k8s.start && \ + tox -e dev.k8s.hotreload' +``` + +##### Deletion + +```sh +bash -c "CLUSTER_NAME=testgateway1 tox -e dev.k8s.destroy || true" +``` + +#### 2. Domain Node + +##### Creation + +```sh +bash -c '\ + export CLUSTER_NAME=testdomain1 CLUSTER_HTTP_PORT=9082 && \ + tox -e dev.k8s.start && \ + tox -e dev.k8s.hotreload' +``` + +##### Deletion + +```sh +bash -c "CLUSTER_NAME=testdomain1 tox -e dev.k8s.destroy || true" +``` diff --git a/packages/grid/veilid/requirements.txt b/packages/grid/veilid/requirements.txt index 4540e75958c..4d83d470465 100644 --- a/packages/grid/veilid/requirements.txt +++ b/packages/grid/veilid/requirements.txt @@ -1 +1,4 @@ -veilid==0.2.5 +fastapi==0.103.2 +httpx==0.27.0 +loguru==0.7.2 +uvicorn[standard]==0.24.0.post1 diff --git a/packages/grid/veilid/server/constants.py b/packages/grid/veilid/server/constants.py new file mode 100644 index 00000000000..0714b9e0902 --- /dev/null +++ b/packages/grid/veilid/server/constants.py @@ -0,0 +1,11 @@ +HOST = "localhost" +PORT = 5959 +# name of the Table Database +TABLE_DB_KEY = "syft-table-db" +# name of the DHT Key in the table Database +DHT_KEY = "syft-dht-key" +# name of the DHT Key Credentials in the table Database +# Credentials refer to the Public and Private Key created for the DHT Key +DHT_KEY_CREDS = "syft-dht-key-creds" + +USE_DIRECT_CONNECTION = True diff --git a/packages/grid/veilid/server/main.py b/packages/grid/veilid/server/main.py new file mode 100644 index 00000000000..f28389414f8 --- /dev/null +++ b/packages/grid/veilid/server/main.py @@ -0,0 +1,115 @@ +# stdlib +import json +import lzma +import os +import sys +from typing import Annotated + +# third party +from fastapi import Body +from fastapi import FastAPI +from fastapi import HTTPException +from fastapi import Request +from fastapi import Response +from loguru import logger + +# relative +from .models import ResponseModel +from .veilid_core import VeilidConnectionSingleton +from .veilid_core import app_call +from .veilid_core import app_message +from .veilid_core import generate_dht_key +from .veilid_core import healthcheck +from .veilid_core import retrieve_dht_key + +# Logging Configuration +log_level = os.getenv("APP_LOG_LEVEL", "INFO").upper() +logger.remove() +logger.add(sys.stderr, colorize=True, level=log_level) + +app = FastAPI(title="Veilid") +veilid_conn = VeilidConnectionSingleton() + + +@app.get("/", response_model=ResponseModel) +async def read_root() -> ResponseModel: + return ResponseModel(message="Veilid has started") + + +@app.get("/healthcheck", response_model=ResponseModel) +async def healthcheck_endpoint() -> ResponseModel: + res = await healthcheck() + if res: + return ResponseModel(message="OK") + else: + return ResponseModel(message="FAIL") + + +@app.post("/generate_dht_key", response_model=ResponseModel) +async def generate_dht_key_endpoint() -> ResponseModel: + try: + res = await generate_dht_key() + return ResponseModel(message=res) + except Exception as e: + raise HTTPException(status_code=500, detail=f"Failed to generate DHT key: {e}") + + +@app.get("/retrieve_dht_key", response_model=ResponseModel) +async def retrieve_dht_key_endpoint() -> ResponseModel: + try: + res = await retrieve_dht_key() + return ResponseModel(message=res) + except Exception as e: + raise HTTPException(status_code=500, detail=str(e)) + + +@app.post("/app_message", response_model=ResponseModel) +async def app_message_endpoint( + request: Request, dht_key: Annotated[str, Body()], message: Annotated[bytes, Body()] +) -> ResponseModel: + try: + res = await app_message(dht_key=dht_key, message=message) + return ResponseModel(message=res) + except Exception as e: + raise HTTPException(status_code=500, detail=str(e)) + + +@app.post("/app_call") +async def app_call_endpoint( + request: Request, dht_key: Annotated[str, Body()], message: Annotated[bytes, Body()] +) -> Response: + try: + res = await app_call(dht_key=dht_key, message=message) + return Response(res, media_type="application/octet-stream") + except Exception as e: + raise HTTPException(status_code=500, detail=str(e)) + + +@app.api_route("/proxy", methods=["GET", "POST", "PUT"]) +async def proxy(request: Request) -> Response: + logger.info("Proxying request") + + request_data = await request.json() + logger.info(f"Request URL: {request_data}") + + dht_key = request_data.get("dht_key") + request_data.pop("dht_key") + message = json.dumps(request_data).encode() + + res = await app_call(dht_key=dht_key, message=message) + decompressed_res = lzma.decompress(res) + return Response(decompressed_res, media_type="application/octet-stream") + + +@app.on_event("startup") +async def startup_event() -> None: + try: + await veilid_conn.initialize_connection() + except Exception as e: + logger.exception(f"Failed to connect to Veilid: {e}") + raise e + + +@app.on_event("shutdown") +async def shutdown_event() -> None: + await veilid_conn.release_connection() diff --git a/packages/grid/veilid/server/models.py b/packages/grid/veilid/server/models.py new file mode 100644 index 00000000000..95ae93c0f93 --- /dev/null +++ b/packages/grid/veilid/server/models.py @@ -0,0 +1,6 @@ +# third party +from pydantic import BaseModel + + +class ResponseModel(BaseModel): + message: str diff --git a/packages/grid/veilid/server/veilid_core.py b/packages/grid/veilid/server/veilid_core.py new file mode 100644 index 00000000000..a611449bd6c --- /dev/null +++ b/packages/grid/veilid/server/veilid_core.py @@ -0,0 +1,237 @@ +# stdlib +import base64 +from collections.abc import Callable +import json +import lzma + +# third party +import httpx +from loguru import logger +import veilid +from veilid import KeyPair +from veilid import Sequencing +from veilid import Stability +from veilid import TypedKey +from veilid import ValueData +from veilid import VeilidUpdate +from veilid.json_api import _JsonRoutingContext +from veilid.json_api import _JsonVeilidAPI +from veilid.types import RouteId + +# relative +from .constants import HOST +from .constants import PORT +from .constants import USE_DIRECT_CONNECTION +from .veilid_db import load_dht_key +from .veilid_db import store_dht_key +from .veilid_db import store_dht_key_creds + + +async def main_callback(update: VeilidUpdate) -> None: + # TODO: Handle other types of network events like + # when our private route goes + if update.kind == veilid.VeilidUpdateKind.APP_MESSAGE: + logger.info(f"Received App Message: {update.detail.message}") + + elif update.kind == veilid.VeilidUpdateKind.APP_CALL: + logger.info(f"Received App Call: {update.detail.message}") + message: dict = json.loads(update.detail.message) + + async with httpx.AsyncClient() as client: + data = message.get("data", None) + # TODO: can we optimize this? + # We encode the data to base64,as while sending + # json expects valid utf-8 strings + if data: + message["data"] = base64.b64decode(data) + response = await client.request( + method=message.get("method"), + url=message.get("url"), + data=message.get("data", None), + params=message.get("params", None), + json=message.get("json", None), + ) + + async with await get_veilid_conn() as conn: + compressed_response = lzma.compress(response.content) + logger.info(f"Compression response size: {len(compressed_response)}") + await conn.app_call_reply(update.detail.call_id, compressed_response) + + +async def noop_callback(update: VeilidUpdate) -> None: + pass + + +async def get_veilid_conn( + host: str = HOST, port: int = PORT, update_callback: Callable = noop_callback +) -> _JsonVeilidAPI: + return await veilid.json_api_connect( + host=host, port=port, update_callback=update_callback + ) + + +async def get_routing_context(conn: _JsonVeilidAPI) -> _JsonRoutingContext: + if USE_DIRECT_CONNECTION: + return await (await conn.new_routing_context()).with_safety( + veilid.SafetySelection.unsafe(veilid.Sequencing.ENSURE_ORDERED) + ) + else: + return await (await conn.new_routing_context()).with_sequencing( + veilid.Sequencing.ENSURE_ORDERED + ) + + +class VeilidConnectionSingleton: + _instance = None + + def __new__(cls) -> "VeilidConnectionSingleton": + if cls._instance is None: + cls._instance = super().__new__(cls) + cls._instance._connection = None + return cls._instance + + def __init__(self) -> None: + self._connection: _JsonVeilidAPI | None = None + + @property + def connection(self) -> _JsonVeilidAPI | None: + return self._connection + + async def initialize_connection(self) -> None: + if self._connection is None: + self._connection = await get_veilid_conn(update_callback=main_callback) + logger.info("Connected to Veilid") + + async def release_connection(self) -> None: + if self._connection is not None: + await self._connection.release() + logger.info("Disconnected from Veilid") + self._connection = None + + +async def create_private_route( + conn: _JsonVeilidAPI, + stability: Stability = veilid.Stability.RELIABLE, + sequencing: Sequencing = veilid.Sequencing.ENSURE_ORDERED, +) -> tuple[RouteId, bytes]: + route_id, route_blob = await conn.new_custom_private_route( + [veilid.CryptoKind.CRYPTO_KIND_VLD0], + stability=stability, + sequencing=sequencing, + ) + logger.info(f"Private Route created with Route ID: {route_id}") + return (route_id, route_blob) + + +async def get_node_id(conn: _JsonVeilidAPI) -> str: + state = await conn.get_state() + config = state.config.config + node_id = config.network.routing_table.node_id[0] + return node_id + + +async def generate_dht_key() -> str: + logger.info("Generating DHT Key") + + async with await get_veilid_conn() as conn: + if await load_dht_key(conn): + return "DHT Key already exists" + + async with await get_routing_context(conn) as router: + dht_record = await router.create_dht_record(veilid.DHTSchema.dflt(1)) + + if USE_DIRECT_CONNECTION: + node_id = await get_node_id(conn) + await router.set_dht_value(dht_record.key, 0, node_id.encode()) + else: + _, route_blob = await create_private_route(conn) + await router.set_dht_value(dht_record.key, 0, route_blob) + + await router.close_dht_record(dht_record.key) + + keypair = KeyPair.from_parts( + key=dht_record.owner, secret=dht_record.owner_secret + ) + + await store_dht_key(conn, dht_record.key) + await store_dht_key_creds(conn, keypair) + + return "DHT Key generated successfully" + + +async def retrieve_dht_key() -> str: + async with await get_veilid_conn() as conn: + dht_key = await load_dht_key(conn) + + if dht_key is None: + raise Exception("DHT Key does not exist. Please generate one.") + return str(dht_key) + + +async def get_dht_value( + router: _JsonRoutingContext, + dht_key: TypedKey, + subkey: int, + force_refresh: bool = True, +) -> ValueData: + try: + await router.open_dht_record(key=dht_key, writer=None) + except Exception as e: + raise Exception(f"Unable to open DHT Record:{dht_key} . Exception: {e}") + + try: + dht_value = await router.get_dht_value( + key=dht_key, subkey=subkey, force_refresh=force_refresh + ) + # NOTE: Always close the DHT record after reading the value + await router.close_dht_record(dht_key) + return dht_value + except Exception as e: + raise Exception( + f"Unable to get subkey value:{subkey} from DHT Record:{dht_key}. Exception: {e}" + ) + + +# TODO: change verbosity of logs to debug at appropriate places +async def get_route_from_dht_record( + dht_key: str, conn: _JsonVeilidAPI, router: _JsonRoutingContext +) -> str | RouteId: + dht_key = veilid.TypedKey(dht_key) + logger.info(f"App Call to DHT Key: {dht_key}") + dht_value = await get_dht_value(router, dht_key, 0) + logger.info(f"DHT Value:{dht_value}") + + if USE_DIRECT_CONNECTION: + route = dht_value.data.decode() + logger.info(f"Node ID: {route}") + else: + route = await conn.import_remote_private_route(dht_value.data) + logger.info(f"Private Route of Peer: {route} ") + + return route + + +async def app_message(dht_key: str, message: bytes) -> str: + async with await get_veilid_conn() as conn: + async with await get_routing_context(conn) as router: + route = await get_route_from_dht_record(dht_key, conn, router) + + await router.app_message(route, message) + + return "Message sent successfully" + + +async def app_call(dht_key: str, message: bytes) -> bytes: + async with await get_veilid_conn() as conn: + async with await get_routing_context(conn) as router: + route = await get_route_from_dht_record(dht_key, conn, router) + + result = await router.app_call(route, message) + + return result + + +async def healthcheck() -> bool: + async with await get_veilid_conn() as conn: + state = await conn.get_state() + return state.network.started diff --git a/packages/grid/veilid/server/veilid_db.py b/packages/grid/veilid/server/veilid_db.py new file mode 100644 index 00000000000..bb295910fd2 --- /dev/null +++ b/packages/grid/veilid/server/veilid_db.py @@ -0,0 +1,54 @@ +# Contains all the database related functions for the Veilid server +# stdlib + +# third party +from veilid import KeyPair +from veilid import TypedKey +from veilid.json_api import _JsonVeilidAPI + +# relative +from .constants import DHT_KEY +from .constants import DHT_KEY_CREDS +from .constants import TABLE_DB_KEY + + +async def load_key(conn: _JsonVeilidAPI, key: str) -> str | None: + tdb = await conn.open_table_db(TABLE_DB_KEY, 1) + + async with tdb: + key_bytes = key.encode() + value = await tdb.load(key_bytes) + if value is None: + return None + return value.decode() + + +async def store_key(conn: _JsonVeilidAPI, key: str, value: str) -> None: + tdb = await conn.open_table_db(TABLE_DB_KEY, 1) + + async with tdb: + key_bytes = key.encode() + value_bytes = value.encode() + await tdb.store(key_bytes, value_bytes) + + +async def load_dht_key(conn: _JsonVeilidAPI) -> TypedKey | None: + value = await load_key(conn, DHT_KEY) + if value is None: + return None + return TypedKey(value) + + +async def load_dht_key_creds(conn: _JsonVeilidAPI) -> KeyPair | None: + value = await load_key(conn, DHT_KEY_CREDS) + if value is None: + return None + return KeyPair(value) + + +async def store_dht_key(conn: _JsonVeilidAPI, keypair: TypedKey) -> None: + await store_key(conn, DHT_KEY, str(keypair)) + + +async def store_dht_key_creds(conn: _JsonVeilidAPI, keypair: KeyPair) -> None: + await store_key(conn, DHT_KEY_CREDS, str(keypair)) diff --git a/packages/grid/veilid/start.sh b/packages/grid/veilid/start.sh index a11d10a131e..86572d98e66 100644 --- a/packages/grid/veilid/start.sh +++ b/packages/grid/veilid/start.sh @@ -1,4 +1,21 @@ #!/usr/bin/env bash +set -e +export PATH="/root/.local/bin:${PATH}" -/veilid/veilid-server -c /veilid/veilid-server.conf --debug +APP_MODULE=server.main:app +APP_LOG_LEVEL=${APP_LOG_LEVEL:-info} +UVICORN_LOG_LEVEL=${UVICORN_LOG_LEVEL:-info} +HOST=${HOST:-0.0.0.0} +PORT=${PORT:-4000} +RELOAD="" +VEILID_FLAGS=${VEILID_FLAGS:-""} +if [[ ${DEV_MODE} == "True" ]]; +then + echo "DEV_MODE Enabled" + RELOAD="--reload" +fi + +/veilid/veilid-server -c /veilid/veilid-server.conf $VEILID_FLAGS & + +exec uvicorn $RELOAD --host $HOST --port $PORT --log-level $UVICORN_LOG_LEVEL "$APP_MODULE" \ No newline at end of file diff --git a/packages/grid/veilid/veilid-server.conf b/packages/grid/veilid/veilid-server.conf index bae004ab415..11ff999e74f 100644 --- a/packages/grid/veilid/veilid-server.conf +++ b/packages/grid/veilid/veilid-server.conf @@ -3,3 +3,7 @@ daemon: client_api: enabled: true listen_address: ':5959' +core: + network: + rpc: + timeout_ms: 10000 diff --git a/packages/grid/veilid/veilid.dockerfile b/packages/grid/veilid/veilid.dockerfile index 576a6a1c2ad..baa168b9b12 100644 --- a/packages/grid/veilid/veilid.dockerfile +++ b/packages/grid/veilid/veilid.dockerfile @@ -1,8 +1,14 @@ -# ======== [Stage 1] Build Veilid Server ========== # +ARG VEILID_VERSION="0.2.5" +ARG PYTHON_VERSION="3.12" +# ======== [Stage 1] Build Veilid Server ========== # +# TODO: Switch from building the packages to using the pre-built packages +# from debian or rpm. This will reduce the build time and the size of the +# final image. FROM rust as build +ARG VEILID_VERSION RUN apt update && apt install -y git -RUN git clone -b v0.2.5 https://gitlab.com/veilid/veilid +RUN git clone -b v${VEILID_VERSION} https://gitlab.com/veilid/veilid WORKDIR /veilid RUN bash -c "source scripts/earthly/install_capnproto.sh" RUN bash -c "source scripts/earthly/install_protoc.sh" @@ -10,15 +16,18 @@ RUN cd veilid-server && cargo build --release -p veilid-server # ========== [Stage 2] Dependency Install ========== # -FROM python:3.12-bookworm +FROM python:${PYTHON_VERSION}-bookworm +ARG VEILID_VERSION COPY --from=build /veilid/target/release/veilid-server /veilid/veilid-server WORKDIR /app COPY ./requirements.txt /app/requirements.txt RUN --mount=type=cache,target=/root/.cache \ - pip install --user -r requirements.txt + pip install --user -r requirements.txt && \ + pip install veilid==${VEILID_VERSION} + COPY ./start.sh /app/start.sh RUN chmod +x /app/start.sh -COPY ./veilid.py /app/veilid.py +COPY ./server /app/server COPY ./veilid-server.conf /veilid # ========== [Final] Start Veilid Server and Python Web Server ========== # diff --git a/packages/grid/veilid/veilid.py b/packages/grid/veilid/veilid.py deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/packages/syft/src/syft/client/client.py b/packages/syft/src/syft/client/client.py index bc6b2c60bb5..ce96e350fd5 100644 --- a/packages/syft/src/syft/client/client.py +++ b/packages/syft/src/syft/client/client.py @@ -2,6 +2,7 @@ from __future__ import annotations # stdlib +import base64 from collections.abc import Callable from copy import deepcopy from enum import Enum @@ -14,6 +15,7 @@ # third party from argon2 import PasswordHasher +from pydantic import Field from pydantic import field_validator import requests from requests import Response @@ -46,7 +48,11 @@ from ..service.user.user import UserView from ..service.user.user_roles import ServiceRole from ..service.user.user_service import UserService +from ..service.veilid.veilid_endpoints import VEILID_PROXY_PATH +from ..service.veilid.veilid_endpoints import VEILID_SERVICE_URL +from ..service.veilid.veilid_endpoints import VEILID_SYFT_PROXY_URL from ..types.grid_url import GridURL +from ..types.syft_object import SYFT_OBJECT_VERSION_1 from ..types.syft_object import SYFT_OBJECT_VERSION_2 from ..types.uid import UID from ..util.logger import debug @@ -61,6 +67,7 @@ from .api import SyftAPICall from .api import debox_signed_syftapicall_response from .connection import NodeConnection +from .protocol import SyftProtocol if TYPE_CHECKING: # relative @@ -323,6 +330,213 @@ def get_client_type(self) -> type[SyftClient]: return SyftError(message=f"Unknown node type {metadata.node_type}") +@serializable( + attrs=["proxy_target_uid", "dht_key", "vld_forward_proxy", "vld_reverse_proxy"] +) +class VeilidConnection(NodeConnection): + __canonical_name__ = "VeilidConnection" + __version__ = SYFT_OBJECT_VERSION_1 + + vld_forward_proxy: GridURL = Field(default=GridURL.from_url(VEILID_SERVICE_URL)) + vld_reverse_proxy: GridURL = Field(default=GridURL.from_url(VEILID_SYFT_PROXY_URL)) + dht_key: str + proxy_target_uid: UID | None = None + routes: type[Routes] = Field(default=Routes) + session_cache: Session | None = None + + @field_validator("vld_forward_proxy", mode="before") + def make_forward_proxy_url(cls, v: GridURL | str) -> GridURL: + if isinstance(v, str): + return GridURL.from_url(v) + else: + return v + + # TODO: Remove this once when we remove reverse proxy in Veilid Connection + @field_validator("vld_reverse_proxy", mode="before") + def make_reverse_proxy_url(cls, v: GridURL | str) -> GridURL: + if isinstance(v, str): + return GridURL.from_url(v) + else: + return v + + def with_proxy(self, proxy_target_uid: UID) -> Self: + raise NotImplementedError("VeilidConnection does not support with_proxy") + + def get_cache_key(self) -> str: + return str(self.dht_key) + + # def to_blob_route(self, path: str, **kwargs) -> GridURL: + # _path = self.routes.ROUTE_BLOB_STORE.value + path + # return self.url.with_path(_path) + + @property + def session(self) -> Session: + if self.session_cache is None: + session = requests.Session() + retry = Retry(total=3, backoff_factor=0.5) + adapter = HTTPAdapter(max_retries=retry) + session.mount("http://", adapter) + session.mount("https://", adapter) + self.session_cache = session + return self.session_cache + + def _make_get(self, path: str, params: dict | None = None) -> bytes: + rev_proxy_url = self.vld_reverse_proxy.with_path(path) + forward_proxy_url = self.vld_forward_proxy.with_path(VEILID_PROXY_PATH) + + json_data = { + "url": str(rev_proxy_url), + "method": "GET", + "dht_key": self.dht_key, + "params": params, + } + response = self.session.get(str(forward_proxy_url), json=json_data) + if response.status_code != 200: + raise requests.ConnectionError( + f"Failed to fetch {forward_proxy_url}. Response returned with code {response.status_code}" + ) + + return response.content + + def _make_post( + self, + path: str, + json: dict[str, Any] | None = None, + data: bytes | None = None, + ) -> bytes: + rev_proxy_url = self.vld_reverse_proxy.with_path(path) + forward_proxy_url = self.vld_forward_proxy.with_path(VEILID_PROXY_PATH) + + json_data = { + "url": str(rev_proxy_url), + "method": "POST", + "dht_key": self.dht_key, + "json": json, + "data": data, + } + + response = self.session.post(str(forward_proxy_url), json=json_data) + if response.status_code != 200: + raise requests.ConnectionError( + f"Failed to fetch {forward_proxy_url}. Response returned with code {response.status_code}" + ) + + return response.content + + def get_node_metadata(self, credentials: SyftSigningKey) -> NodeMetadataJSON: + # TODO: Implement message proxy forwarding for gateway + + response = self._make_get(self.routes.ROUTE_METADATA.value) + metadata_json = json.loads(response) + return NodeMetadataJSON(**metadata_json) + + def get_api( + self, credentials: SyftSigningKey, communication_protocol: int + ) -> SyftAPI: + # TODO: Implement message proxy forwarding for gateway + + params = { + "verify_key": str(credentials.verify_key), + "communication_protocol": communication_protocol, + } + content = self._make_get(self.routes.ROUTE_API.value, params=params) + obj = _deserialize(content, from_bytes=True) + obj.connection = self + obj.signing_key = credentials + obj.communication_protocol = communication_protocol + if self.proxy_target_uid: + obj.node_uid = self.proxy_target_uid + return cast(SyftAPI, obj) + + def login( + self, + email: str, + password: str, + ) -> SyftSigningKey | None: + # TODO: Implement message proxy forwarding for gateway + + credentials = {"email": email, "password": password} + response = self._make_post(self.routes.ROUTE_LOGIN.value, credentials) + obj = _deserialize(response, from_bytes=True) + + return obj + + def register(self, new_user: UserCreate) -> Any: + # TODO: Implement message proxy forwarding for gateway + + data = _serialize(new_user, to_bytes=True) + response = self._make_post(self.routes.ROUTE_REGISTER.value, data=data) + response = _deserialize(response, from_bytes=True) + return response + + def make_call(self, signed_call: SignedSyftAPICall) -> Any: + msg_bytes: bytes = _serialize(obj=signed_call, to_bytes=True) + # Since JSON expects strings, we need to encode the bytes to base64 + # as some bytes may not be valid utf-8 + # TODO: Can we optimize this? + msg_base64 = base64.b64encode(msg_bytes).decode() + + rev_proxy_url = self.vld_reverse_proxy.with_path( + self.routes.ROUTE_API_CALL.value + ) + forward_proxy_url = self.vld_forward_proxy.with_path(VEILID_PROXY_PATH) + json_data = { + "url": str(rev_proxy_url), + "method": "POST", + "dht_key": self.dht_key, + "data": msg_base64, + } + response = requests.post( # nosec + url=str(forward_proxy_url), + json=json_data, + ) + + if response.status_code != 200: + raise requests.ConnectionError( + f"Failed to fetch metadata. Response returned with code {response.status_code}" + ) + + result = _deserialize(response.content, from_bytes=True) + return result + + def __repr__(self) -> str: + return self.__str__() + + def __str__(self) -> str: + res = f"{type(self).__name__}:" + res += f"\n DHT Key: {self.dht_key}" + res += f"\n Forward Proxy: {self.vld_forward_proxy}" + res += f"\n Reverse Proxy: {self.vld_reverse_proxy}" + return res + + def __hash__(self) -> int: + return ( + hash(self.proxy_target_uid) + + hash(self.dht_key) + + hash(self.vld_forward_proxy) + + hash(self.vld_reverse_proxy) + ) + + def get_client_type(self) -> type[SyftClient]: + # TODO: Rasswanth, should remove passing in credentials + # when metadata are proxy forwarded in the grid routes + # in the gateway fixes PR + # relative + from .domain_client import DomainClient + from .enclave_client import EnclaveClient + from .gateway_client import GatewayClient + + metadata = self.get_node_metadata(credentials=SyftSigningKey.generate()) + if metadata.node_type == NodeType.DOMAIN.value: + return DomainClient + elif metadata.node_type == NodeType.GATEWAY.value: + return GatewayClient + elif metadata.node_type == NodeType.ENCLAVE.value: + return EnclaveClient + else: + return SyftError(message=f"Unknown node type {metadata.node_type}") + + @serializable() class PythonConnection(NodeConnection): __canonical_name__ = "PythonConnection" @@ -646,19 +860,34 @@ def guest(self) -> Self: metadata=self.metadata, ) - def exchange_route(self, client: Self) -> SyftSuccess | SyftError: + def exchange_route( + self, client: Self, protocol: SyftProtocol = SyftProtocol.HTTP + ) -> SyftSuccess | SyftError: # relative from ..service.network.routes import connection_to_route - self_node_route = connection_to_route(self.connection) - remote_node_route = connection_to_route(client.connection) - if client.metadata is None: - return SyftError(f"client {client}'s metadata is None!") - result = self.api.services.network.exchange_credentials_with( - self_node_route=self_node_route, - remote_node_route=remote_node_route, - remote_node_verify_key=client.metadata.to(NodeMetadataV3).verify_key, - ) + if protocol == SyftProtocol.HTTP: + self_node_route = connection_to_route(self.connection) + remote_node_route = connection_to_route(client.connection) + if client.metadata is None: + return SyftError(f"client {client}'s metadata is None!") + + result = self.api.services.network.exchange_credentials_with( + self_node_route=self_node_route, + remote_node_route=remote_node_route, + remote_node_verify_key=client.metadata.to(NodeMetadataV3).verify_key, + ) + + elif protocol == SyftProtocol.VEILID: + remote_node_route = connection_to_route(client.connection) + + result = self.api.services.network.exchange_veilid_route( + remote_node_route=remote_node_route, + ) + else: + raise ValueError( + f"Invalid Route Exchange SyftProtocol: {protocol}.Supported protocols are {SyftProtocol.all()}" + ) return result @@ -729,6 +958,7 @@ def login( register: bool = False, **kwargs: Any, ) -> Self: + # TODO: Remove this Hack (Note to Rasswanth) # If SYFT_LOGIN_{NODE_NAME}_PASSWORD is set, use that as the password # for the login. This is useful for CI/CD environments to test password # randomization that is implemented by helm charts @@ -925,9 +1155,18 @@ def connect( url: str | GridURL = DEFAULT_PYGRID_ADDRESS, node: AbstractNode | None = None, port: int | None = None, + vld_forward_proxy: str | GridURL | None = None, + vld_reverse_proxy: str | GridURL | None = None, + dht_key: str | None = None, ) -> SyftClient: if node: connection = PythonConnection(node=node) + elif dht_key and vld_forward_proxy and vld_reverse_proxy: + connection = VeilidConnection( + vld_forward_proxy=vld_forward_proxy, + vld_reverse_proxy=vld_reverse_proxy, + dht_key=dht_key, + ) else: url = GridURL.from_url(url) if isinstance(port, int | str): @@ -964,12 +1203,25 @@ def register( @instrument def login_as_guest( + # HTTPConnection url: str | GridURL = DEFAULT_PYGRID_ADDRESS, - node: AbstractNode | None = None, port: int | None = None, + # PythonConnection + node: AbstractNode | None = None, + # Veilid Connection + vld_forward_proxy: str | GridURL | None = None, + vld_reverse_proxy: str | GridURL | None = None, + dht_key: str | None = None, verbose: bool = True, ) -> SyftClient: - _client = connect(url=url, node=node, port=port) + _client = connect( + url=url, + node=node, + port=port, + vld_forward_proxy=vld_forward_proxy, + vld_reverse_proxy=vld_reverse_proxy, + dht_key=dht_key, + ) if isinstance(_client, SyftError): return _client @@ -986,13 +1238,26 @@ def login_as_guest( @instrument def login( email: str, + # HTTPConnection url: str | GridURL = DEFAULT_PYGRID_ADDRESS, - node: AbstractNode | None = None, port: int | None = None, + # PythonConnection + node: AbstractNode | None = None, + # Veilid Connection + vld_forward_proxy: str | GridURL | None = None, + vld_reverse_proxy: str | GridURL | None = None, + dht_key: str | None = None, password: str | None = None, cache: bool = True, ) -> SyftClient: - _client = connect(url=url, node=node, port=port) + _client = connect( + url=url, + node=node, + port=port, + vld_forward_proxy=vld_forward_proxy, + vld_reverse_proxy=vld_reverse_proxy, + dht_key=dht_key, + ) if isinstance(_client, SyftError): return _client diff --git a/packages/syft/src/syft/client/domain_client.py b/packages/syft/src/syft/client/domain_client.py index 7f6f8555d22..cd25acf7150 100644 --- a/packages/syft/src/syft/client/domain_client.py +++ b/packages/syft/src/syft/client/domain_client.py @@ -39,6 +39,7 @@ from .client import login from .client import login_as_guest from .connection import NodeConnection +from .protocol import SyftProtocol if TYPE_CHECKING: # relative @@ -290,7 +291,11 @@ def connect_to_gateway( handle: NodeHandle | None = None, # noqa: F821 email: str | None = None, password: str | None = None, + protocol: str | SyftProtocol = SyftProtocol.HTTP, ) -> SyftSuccess | SyftError | None: + if isinstance(protocol, str): + protocol = SyftProtocol(protocol) + if via_client is not None: client = via_client elif handle is not None: @@ -304,7 +309,7 @@ def connect_to_gateway( if isinstance(client, SyftError): return client - res = self.exchange_route(client) + res = self.exchange_route(client, protocol=protocol) if isinstance(res, SyftSuccess): if self.metadata: return SyftSuccess( diff --git a/packages/syft/src/syft/client/enclave_client.py b/packages/syft/src/syft/client/enclave_client.py index 34fcc64605d..dc9c7c15de1 100644 --- a/packages/syft/src/syft/client/enclave_client.py +++ b/packages/syft/src/syft/client/enclave_client.py @@ -25,6 +25,7 @@ from .client import SyftClient from .client import login from .client import login_as_guest +from .protocol import SyftProtocol if TYPE_CHECKING: # relative @@ -72,7 +73,11 @@ def connect_to_gateway( handle: NodeHandle | None = None, # noqa: F821 email: str | None = None, password: str | None = None, + protocol: str | SyftProtocol = SyftProtocol.HTTP, ) -> SyftSuccess | SyftError | None: + if isinstance(protocol, str): + protocol = SyftProtocol(protocol) + if via_client is not None: client = via_client elif handle is not None: @@ -87,7 +92,7 @@ def connect_to_gateway( return client self.metadata: NodeMetadataJSON = self.metadata - res = self.exchange_route(client) + res = self.exchange_route(client, protocol=protocol) if isinstance(res, SyftSuccess): return SyftSuccess( diff --git a/packages/syft/src/syft/client/protocol.py b/packages/syft/src/syft/client/protocol.py new file mode 100644 index 00000000000..e969d59ca5d --- /dev/null +++ b/packages/syft/src/syft/client/protocol.py @@ -0,0 +1,12 @@ +# stdlib +from enum import Enum + + +class SyftProtocol(Enum): + """Enum class to represent the different Syft protocols.""" + + HTTP = "http" + VEILID = "veilid" + + def all(self) -> list: + return [p.value for p in SyftProtocol] diff --git a/packages/syft/src/syft/node/node.py b/packages/syft/src/syft/node/node.py index f5211f853b3..6b87e9d30e4 100644 --- a/packages/syft/src/syft/node/node.py +++ b/packages/syft/src/syft/node/node.py @@ -97,6 +97,7 @@ from ..service.user.user_roles import ServiceRole from ..service.user.user_service import UserService from ..service.user.user_stash import UserStash +from ..service.veilid import VEILID_ENABLED from ..service.worker.image_registry_service import SyftImageRegistryService from ..service.worker.utils import DEFAULT_WORKER_IMAGE_TAG from ..service.worker.utils import DEFAULT_WORKER_POOL_NAME @@ -393,6 +394,12 @@ def __init__( services += [OblvService] create_oblv_key_pair(worker=self) + if VEILID_ENABLED: + # relative + from ..service.veilid.veilid_service import VeilidService + + services += [VeilidService] + self.enable_warnings = enable_warnings self.in_memory_workers = in_memory_workers @@ -990,6 +997,12 @@ def _construct_services(self) -> None: store_services += [OblvService] + if VEILID_ENABLED: + # relative + from ..service.veilid.veilid_service import VeilidService + + store_services += [VeilidService] + if service_klass in store_services: kwargs["store"] = self.document_store # type: ignore[assignment] self.service_path_map[service_klass.__name__.lower()] = service_klass( diff --git a/packages/syft/src/syft/protocol/protocol_version.json b/packages/syft/src/syft/protocol/protocol_version.json index 78cce59bb03..ab4aecf4586 100644 --- a/packages/syft/src/syft/protocol/protocol_version.json +++ b/packages/syft/src/syft/protocol/protocol_version.json @@ -1157,18 +1157,6 @@ "action": "add" } }, - "EnclaveMetadata": { - "1": { - "version": 1, - "hash": "39f85e475015e6f860ddcc5fea819423eba2db8f4b7d8e004c05a44d6f8444c6", - "action": "remove" - }, - "2": { - "version": 2, - "hash": "b5b03b47cbcdf4c679228932eabe06512da18759ab7358a3c80772502be15f29", - "action": "add" - } - }, "DataSubject": { "1": { "version": 1, @@ -1549,7 +1537,7 @@ }, "2": { "version": 2, - "hash": "92c4d4a2ff206c4729d44beb89af349c093a26a3c36527efcf94227a6a150b8d", + "hash": "14cf8b9bb7c95c20caec8606ae5dddb882832f00fba2326352e7a0f2444dbc9f", "action": "add" } }, @@ -1660,6 +1648,32 @@ "hash": "0af1abb9ac899c0bc133971f75d17be8260b80a2df9fe191965db431bb6fd910", "action": "add" } + }, + "VeilidConnection": { + "1": { + "version": 1, + "hash": "29f803cec69b9ca6118e7c004867e82de6297f138b267ebd3df9ed35d5c944e4", + "action": "add" + } + }, + "VeilidNodeRoute": { + "1": { + "version": 1, + "hash": "0ecd536def6b99475f4478acefb0226886336934206529647ee3e4667e211514", + "action": "add" + } + }, + "EnclaveMetadata": { + "1": { + "version": 1, + "hash": "39f85e475015e6f860ddcc5fea819423eba2db8f4b7d8e004c05a44d6f8444c6", + "action": "remove" + }, + "2": { + "version": 2, + "hash": "5103272305abd2bcf23c616bd9014be986a92c40dc37b6238680114036451852", + "action": "add" + } } } } diff --git a/packages/syft/src/syft/service/network/network_service.py b/packages/syft/src/syft/service/network/network_service.py index 44e0c6f784f..9cb0daa97ff 100644 --- a/packages/syft/src/syft/service/network/network_service.py +++ b/packages/syft/src/syft/service/network/network_service.py @@ -13,6 +13,7 @@ from ...client.client import HTTPConnection from ...client.client import PythonConnection from ...client.client import SyftClient +from ...client.client import VeilidConnection from ...node.credentials import SyftVerifyKey from ...node.worker_settings import WorkerSettings from ...serde.serializable import serializable @@ -27,6 +28,7 @@ from ...types.transforms import keep from ...types.transforms import transform from ...types.transforms import transform_method +from ...types.uid import UID from ...util.telemetry import instrument from ..context import AuthedServiceContext from ..data_subject.data_subject import NamePartitionKey @@ -37,12 +39,14 @@ from ..service import SERVICE_TO_TYPES from ..service import TYPE_TO_SERVICE from ..service import service_method +from ..user.user_roles import DATA_OWNER_ROLE_LEVEL from ..user.user_roles import GUEST_ROLE_LEVEL from ..warnings import CRUDWarning from .node_peer import NodePeer from .routes import HTTPNodeRoute from .routes import NodeRoute from .routes import PythonNodeRoute +from .routes import VeilidNodeRoute VerifyKeyPartitionKey = PartitionKey(key="verify_key", type_=SyftVerifyKey) NodeTypePartitionKey = PartitionKey(key="node_type", type_=NodeType) @@ -88,7 +92,7 @@ def update_peer( ) if existing.is_ok() and existing.ok(): existing = existing.ok() - existing.update_routes(new_routes=peer.node_routes) + existing.update_routes(peer.node_routes) result = self.update(credentials, existing) return result else: @@ -367,6 +371,103 @@ def get_peers_by_type( # Return peers or an empty list when result is None return result.ok() or [] + @service_method( + path="network.delete_peer_by_id", + name="delete_peer_by_id", + roles=DATA_OWNER_ROLE_LEVEL, + ) + def delete_peer_by_id( + self, context: AuthedServiceContext, uid: UID + ) -> SyftSuccess | SyftError: + """Delete Node Peer""" + result = self.stash.delete_by_uid(context.credentials, uid) + if result.is_err(): + return SyftError(message=str(result.err())) + return SyftSuccess(message="Node Peer Deleted") + + @service_method( + path="network.exchange_veilid_route", + name="exchange_veilid_route", + roles=DATA_OWNER_ROLE_LEVEL, + ) + def exchange_veilid_route( + self, + context: AuthedServiceContext, + remote_node_route: NodeRoute, + ) -> SyftSuccess | SyftError: + """Exchange Route With Another Node""" + context.node = cast(AbstractNode, context.node) + # Step 1: Get our own Veilid Node Peer to send to the remote node + self_node_peer: NodePeer = context.node.settings.to(NodePeer) + + veilid_service = context.node.get_service("veilidservice") + veilid_route = veilid_service.get_veilid_route(context=context) + + if isinstance(veilid_route, SyftError): + return veilid_route + + self_node_peer.node_routes = [veilid_route] + + # Step 2: Create a Remote Client + remote_client: SyftClient = remote_node_route.client_with_context( + context=context + ) + + # Step 3: Send the Node Peer to the remote node + remote_node_peer: NodePeer | SyftError = ( + remote_client.api.services.network.add_veilid_peer( + peer=self_node_peer, + ) + ) + + if not isinstance(remote_node_peer, NodePeer): + return remote_node_peer + + # Step 4: Add the remote Node Peer to our stash + result = self.stash.update_peer(context.node.verify_key, remote_node_peer) + if result.is_err(): + return SyftError(message=str(result.err())) + + return SyftSuccess(message="Routes Exchanged") + + @service_method( + path="network.add_veilid_peer", name="add_veilid_peer", roles=GUEST_ROLE_LEVEL + ) + def add_veilid_peer( + self, + context: AuthedServiceContext, + peer: NodePeer, + ) -> NodePeer | SyftError: + """Add a Veilid Node Peer""" + context.node = cast(AbstractNode, context.node) + # Step 1: Using the verify_key of the peer to verify the signature + # It is also our single source of truth for the peer + if peer.verify_key != context.credentials: + return SyftError( + message=( + f"The {type(peer)}.verify_key: " + f"{peer.verify_key} does not match the signature of the message" + ) + ) + + # Step 2: Save the remote peer to our stash + result = self.stash.update_peer(context.node.verify_key, peer) + if result.is_err(): + return SyftError(message=str(result.err())) + + # Step 3: Get our own Veilid Node Peer to send to the remote node + self_node_peer: NodePeer = context.node.settings.to(NodePeer) + + veilid_service = context.node.get_service("veilidservice") + veilid_route = veilid_service.get_veilid_route(context=context) + + if isinstance(veilid_route, SyftError): + return veilid_route + + self_node_peer.node_routes = [veilid_route] + + return self_node_peer + TYPE_TO_SERVICE[NodePeer] = NetworkService SERVICE_TO_TYPES[NetworkService].update({NodePeer}) @@ -420,6 +521,20 @@ def node_route_to_http_connection( return HTTPConnection(url=url, proxy_target_uid=obj.proxy_target_uid) +@transform_method(VeilidNodeRoute, VeilidConnection) +def node_route_to_veilid_connection( + obj: Any, context: TransformContext | None = None +) -> list[Callable]: + return VeilidConnection(dht_key=obj.dht_key, proxy_target_uid=obj.proxy_target_uid) + + +@transform_method(VeilidConnection, VeilidNodeRoute) +def veilid_connection_to_node_route( + obj: Any, context: TransformContext | None = None +) -> list[Callable]: + return VeilidNodeRoute(dht_key=obj.dht_key, proxy_target_uid=obj.proxy_target_uid) + + @transform(NodeMetadataV3, NodePeer) def metadata_to_peer() -> list[Callable]: return [ diff --git a/packages/syft/src/syft/service/network/node_peer.py b/packages/syft/src/syft/service/network/node_peer.py index 6ab1dc70372..bd7dedce97d 100644 --- a/packages/syft/src/syft/service/network/node_peer.py +++ b/packages/syft/src/syft/service/network/node_peer.py @@ -1,8 +1,5 @@ # stdlib -# third party -from typing_extensions import Self - # relative from ...abstract_node import NodeType from ...client.client import SyftClient @@ -18,6 +15,8 @@ from .routes import HTTPNodeRoute from .routes import NodeRoute from .routes import NodeRouteType +from .routes import PythonNodeRoute +from .routes import VeilidNodeRoute from .routes import connection_to_route from .routes import route_to_connection @@ -84,7 +83,7 @@ def existed_route(self, route: NodeRoute) -> tuple[bool, int | None]: ): return (True, i) return (False, None) - else: # PythonNodeRoute + elif isinstance(route, PythonNodeRoute): # PythonNodeRoute for i, r in enumerate(self.node_routes): # something went wrong here if ( (route.worker_settings.id == r.worker_settings.id) @@ -101,9 +100,20 @@ def existed_route(self, route: NodeRoute) -> tuple[bool, int | None]: ): return (True, i) return (False, None) + elif isinstance(route, VeilidNodeRoute): + for i, r in enumerate(self.node_routes): + if ( + route.dht_key == r.dht_key + and route.proxy_target_uid == r.proxy_target_uid + ): + return (True, i) + + return (False, None) + else: + raise ValueError(f"Unsupported route type: {type(route)}") - @classmethod - def from_client(cls, client: SyftClient) -> Self: + @staticmethod + def from_client(client: SyftClient) -> "NodePeer": if not client.metadata: raise Exception("Client has to have metadata first") diff --git a/packages/syft/src/syft/service/network/routes.py b/packages/syft/src/syft/service/network/routes.py index bbe8d27f2f8..c9d27f78e6e 100644 --- a/packages/syft/src/syft/service/network/routes.py +++ b/packages/syft/src/syft/service/network/routes.py @@ -16,8 +16,10 @@ from ...client.client import NodeConnection from ...client.client import PythonConnection from ...client.client import SyftClient +from ...client.client import VeilidConnection from ...node.worker_settings import WorkerSettings from ...serde.serializable import serializable +from ...types.syft_object import SYFT_OBJECT_VERSION_1 from ...types.syft_object import SYFT_OBJECT_VERSION_2 from ...types.syft_object import SyftObject from ...types.transforms import TransformContext @@ -90,6 +92,21 @@ def __eq__(self, other: Any) -> bool: return self == other +@serializable() +class VeilidNodeRoute(SyftObject, NodeRoute): + __canonical_name__ = "VeilidNodeRoute" + __version__ = SYFT_OBJECT_VERSION_1 + + dht_key: str + proxy_target_uid: UID | None = None + priority: int = 1 + + def __eq__(self, other: Any) -> bool: + if isinstance(other, VeilidNodeRoute): + return hash(self) == hash(other) + return self == other + + @serializable() class PythonNodeRoute(SyftObject, NodeRoute): __canonical_name__ = "PythonNodeRoute" @@ -127,7 +144,7 @@ def __eq__(self, other: Any) -> bool: return self == other -NodeRouteType = HTTPNodeRoute | PythonNodeRoute +NodeRouteType = HTTPNodeRoute | PythonNodeRoute | VeilidNodeRoute def route_to_connection( @@ -135,12 +152,20 @@ def route_to_connection( ) -> NodeConnection: if isinstance(route, HTTPNodeRoute): return route.to(HTTPConnection, context=context) - else: + elif isinstance(route, PythonNodeRoute): return route.to(PythonConnection, context=context) + elif isinstance(route, VeilidNodeRoute): + return route.to(VeilidConnection, context=context) + else: + raise ValueError(f"Route {route} is not supported.") def connection_to_route(connection: NodeConnection) -> NodeRoute: if isinstance(connection, HTTPConnection): return connection.to(HTTPNodeRoute) + elif isinstance(connection, PythonConnection): # type: ignore[unreachable] + return connection.to(PythonNodeRoute) + elif isinstance(connection, VeilidConnection): + return connection.to(VeilidNodeRoute) else: - return connection.to(PythonNodeRoute) # type: ignore[unreachable] + raise ValueError(f"Connection {connection} is not supported.") diff --git a/packages/syft/src/syft/service/service.py b/packages/syft/src/syft/service/service.py index fee806c440c..8a98eca633b 100644 --- a/packages/syft/src/syft/service/service.py +++ b/packages/syft/src/syft/service/service.py @@ -39,6 +39,7 @@ from .response import SyftError from .user.user_roles import DATA_OWNER_ROLE_LEVEL from .user.user_roles import ServiceRole +from .veilid import VEILID_ENABLED from .warnings import APIEndpointWarning if TYPE_CHECKING: @@ -227,14 +228,19 @@ def register_lib_obj(lib_obj: CMPBase) -> None: LibConfigRegistry.register(lib_config) -# hacky, prevent circular imports -for lib_obj in action_execute_registry_libs.flatten(): - # # for functions - # func_name = func.__name__ - # # for classes - # func_name = path.split(".")[-1] - if isinstance(lib_obj, CMPFunction) or isinstance(lib_obj, CMPClass): - register_lib_obj(lib_obj) +# NOTE: Currently we disable adding library enpoints like numpy, torch when veilid is enabled +# This is because the /api endpoint which return SyftAPI along with the lib enpoints exceeds +# 2 MB . But veilid has a limit of 32 KB for sending and receiving message. +# This would be fixed, when chunking is implemented at veilid core. +if not VEILID_ENABLED: + # hacky, prevent circular imports + for lib_obj in action_execute_registry_libs.flatten(): + # # for functions + # func_name = func.__name__ + # # for classes + # func_name = path.split(".")[-1] + if isinstance(lib_obj, CMPFunction) or isinstance(lib_obj, CMPClass): + register_lib_obj(lib_obj) def deconstruct_param(param: inspect.Parameter) -> dict[str, Any]: diff --git a/packages/syft/src/syft/service/veilid/__init__.py b/packages/syft/src/syft/service/veilid/__init__.py new file mode 100644 index 00000000000..e07b6b857c9 --- /dev/null +++ b/packages/syft/src/syft/service/veilid/__init__.py @@ -0,0 +1,7 @@ +# stdlib +import os + +# relative +from ...util.util import str_to_bool + +VEILID_ENABLED: bool = str_to_bool(os.environ.get("VEILID_ENABLED", "False")) diff --git a/packages/syft/src/syft/service/veilid/veilid_endpoints.py b/packages/syft/src/syft/service/veilid/veilid_endpoints.py new file mode 100644 index 00000000000..08b67585f74 --- /dev/null +++ b/packages/syft/src/syft/service/veilid/veilid_endpoints.py @@ -0,0 +1,8 @@ +VEILID_SERVICE_URL = "http://veilid:80" +# Service name of our traefik service +# TODO: Remove this once when we remove reverse proxy in Veilid Connection +VEILID_SYFT_PROXY_URL = "http://proxy:80" +HEALTHCHECK_ENDPOINT = "/healthcheck" +GEN_DHT_KEY_ENDPOINT = "/generate_dht_key" +RET_DHT_KEY_ENDPOINT = "/retrieve_dht_key" +VEILID_PROXY_PATH = "/proxy" diff --git a/packages/syft/src/syft/service/veilid/veilid_service.py b/packages/syft/src/syft/service/veilid/veilid_service.py new file mode 100644 index 00000000000..612f5415244 --- /dev/null +++ b/packages/syft/src/syft/service/veilid/veilid_service.py @@ -0,0 +1,92 @@ +# stdlib +from collections.abc import Callable + +# third party +import requests + +# relative +from ...serde.serializable import serializable +from ...store.document_store import DocumentStore +from ...util.telemetry import instrument +from ..context import AuthedServiceContext +from ..network.routes import VeilidNodeRoute +from ..response import SyftError +from ..response import SyftSuccess +from ..service import AbstractService +from ..service import service_method +from ..user.user_roles import DATA_OWNER_ROLE_LEVEL +from .veilid_endpoints import GEN_DHT_KEY_ENDPOINT +from .veilid_endpoints import HEALTHCHECK_ENDPOINT +from .veilid_endpoints import RET_DHT_KEY_ENDPOINT +from .veilid_endpoints import VEILID_SERVICE_URL + + +@instrument +@serializable() +class VeilidService(AbstractService): + store: DocumentStore + + def __init__(self, store: DocumentStore) -> None: + self.store = store + + def perform_request( + self, method: Callable, endpoint: str, raw: bool = False + ) -> SyftSuccess | SyftError | str: + try: + response = method(f"{VEILID_SERVICE_URL}{endpoint}") + response.raise_for_status() + message = response.json().get("message") + return message if raw else SyftSuccess(message=message) + except requests.HTTPError: + return SyftError(message=f"{response.json()['detail']}") + except requests.RequestException as e: + return SyftError(message=f"Failed to perform request. {e}") + + def is_veilid_service_healthy(self) -> bool: + res = self.perform_request( + method=requests.get, endpoint=HEALTHCHECK_ENDPOINT, raw=True + ) + return res == "OK" + + @service_method( + path="veilid.generate_dht_key", + name="generate_dht_key", + roles=DATA_OWNER_ROLE_LEVEL, + ) + def generate_dht_key(self, context: AuthedServiceContext) -> str | SyftError: + if not self.is_veilid_service_healthy(): + return SyftError( + message="Veilid service is not healthy. Please try again later." + ) + return self.perform_request( + method=requests.post, + endpoint=GEN_DHT_KEY_ENDPOINT, + ) + + @service_method( + path="veilid.retrieve_dht_key", + name="retrieve_dht_key", + roles=DATA_OWNER_ROLE_LEVEL, + ) + def retrieve_dht_key(self, context: AuthedServiceContext) -> str | SyftError: + if not self.is_veilid_service_healthy(): + return SyftError( + message="Veilid service is not healthy. Please try again later." + ) + return self.perform_request( + method=requests.get, + endpoint=RET_DHT_KEY_ENDPOINT, + raw=True, + ) + + @service_method( + path="veilid.get_veilid_route", + name="get_veilid_route", + ) + def get_veilid_route( + self, context: AuthedServiceContext + ) -> VeilidNodeRoute | SyftError: + dht_key = self.retrieve_dht_key(context) + if isinstance(dht_key, SyftError): + return dht_key + return VeilidNodeRoute(dht_key=dht_key) diff --git a/scripts/k8s/delete_stack.sh b/scripts/k8s/delete_stack.sh new file mode 100755 index 00000000000..86d0a1ce176 --- /dev/null +++ b/scripts/k8s/delete_stack.sh @@ -0,0 +1,7 @@ +#!/bin/bash + +# Deleting gateway node +bash -c "CLUSTER_NAME=testgateway1 tox -e dev.k8s.destroy || true" + +# Deleting domain node +bash -c "CLUSTER_NAME=testdomain1 tox -e dev.k8s.destroy || true" \ No newline at end of file diff --git a/scripts/k8s/launch_domain.sh b/scripts/k8s/launch_domain.sh new file mode 100755 index 00000000000..d39f45744d3 --- /dev/null +++ b/scripts/k8s/launch_domain.sh @@ -0,0 +1,7 @@ +#!/bin/bash + +# Domain Node +bash -c '\ + export CLUSTER_NAME=testdomain1 CLUSTER_HTTP_PORT=9082 && \ + tox -e dev.k8s.start && \ + tox -e dev.k8s.hotreload' \ No newline at end of file diff --git a/scripts/k8s/launch_gateway.sh b/scripts/k8s/launch_gateway.sh new file mode 100755 index 00000000000..792a0885ae4 --- /dev/null +++ b/scripts/k8s/launch_gateway.sh @@ -0,0 +1,7 @@ +#!/bin/bash + +# Gateway Node +bash -c '\ + export CLUSTER_NAME=testgateway1 CLUSTER_HTTP_PORT=9081 DEVSPACE_PROFILE=gateway && \ + tox -e dev.k8s.start && \ + tox -e dev.k8s.hotreload' \ No newline at end of file diff --git a/tests/integration/veilid/gateway_veilid_test.py b/tests/integration/veilid/gateway_veilid_test.py new file mode 100644 index 00000000000..984389dca22 --- /dev/null +++ b/tests/integration/veilid/gateway_veilid_test.py @@ -0,0 +1,93 @@ +# third party +import pytest + +# syft absolute +import syft as sy +from syft.abstract_node import NodeType +from syft.client.domain_client import DomainClient +from syft.client.gateway_client import GatewayClient +from syft.client.protocol import SyftProtocol +from syft.service.network.node_peer import NodePeer +from syft.service.network.routes import VeilidNodeRoute +from syft.service.response import SyftSuccess +from syft.service.user.user_roles import ServiceRole + + +def remove_existing_peers(client): + for peer in client.api.services.network.get_all_peers(): + res = client.api.services.network.delete_peer_by_id(peer.id) + assert isinstance(res, SyftSuccess) + + +@pytest.mark.veilid +def test_domain_connect_to_gateway_veilid(domain_1_port, gateway_port): + # Revert to the guest login, when we automatically generate the dht key + # gateway_client: GatewayClient = sy.login_as_guest(port=gateway_port) + gateway_client: GatewayClient = sy.login( + port=gateway_port, email="info@openmined.org", password="changethis" + ) + domain_client: DomainClient = sy.login( + port=domain_1_port, email="info@openmined.org", password="changethis" + ) + + # Remove existing peers due to the previous gateway test + remove_existing_peers(domain_client) + remove_existing_peers(gateway_client) + + # Generate DHT Record + gateway_dht_res = gateway_client.api.services.veilid.generate_dht_key() + assert isinstance(gateway_dht_res, SyftSuccess), gateway_dht_res + domain_dht_res = domain_client.api.services.veilid.generate_dht_key() + assert isinstance(domain_dht_res, SyftSuccess), domain_dht_res + + # Retrieve DHT Record + domain_veilid_route = domain_client.api.services.veilid.get_veilid_route() + assert isinstance(domain_veilid_route, VeilidNodeRoute), domain_veilid_route + gateway_veilid_route = gateway_client.api.services.veilid.get_veilid_route() + assert isinstance(gateway_veilid_route, VeilidNodeRoute), gateway_veilid_route + + # Connect Domain to Gateway via Veilid + result = domain_client.connect_to_gateway( + gateway_client, protocol=SyftProtocol.VEILID + ) + assert isinstance(result, SyftSuccess) + + proxy_domain_client = gateway_client.peers[0] + domain_peer = domain_client.peers[0] + gateway_peer = gateway_client.api.services.network.get_all_peers()[0] + + # Domain Asserts + assert len(domain_client.peers) == 1 + assert isinstance(proxy_domain_client, DomainClient) + assert domain_peer.node_type == NodeType.GATEWAY + assert isinstance(domain_peer, NodePeer) + assert isinstance(domain_peer.node_routes[0], VeilidNodeRoute) + assert domain_peer.node_routes[0].dht_key == gateway_veilid_route.dht_key + assert domain_client.name == proxy_domain_client.name + + # Gateway Asserts + assert len(gateway_client.peers) == 1 + assert gateway_peer.node_type == NodeType.DOMAIN + assert isinstance(gateway_peer.node_routes[0], VeilidNodeRoute) + assert gateway_peer.node_routes[0].dht_key == domain_veilid_route.dht_key + assert gateway_client.name == domain_peer.name + assert len(gateway_client.domains) == 1 + assert len(gateway_client.enclaves) == 0 + + # Proxy Domain Asserts + assert proxy_domain_client.metadata == domain_client.metadata + assert proxy_domain_client.user_role == ServiceRole.NONE + + domain_client = domain_client.login( + email="info@openmined.org", password="changethis" + ) + proxy_domain_client = proxy_domain_client.login( + email="info@openmined.org", password="changethis" + ) + + assert proxy_domain_client.logged_in_user == "info@openmined.org" + assert proxy_domain_client.user_role == ServiceRole.ADMIN + assert proxy_domain_client.credentials == domain_client.credentials + assert ( + proxy_domain_client.api.endpoints.keys() == domain_client.api.endpoints.keys() + ) diff --git a/tox.ini b/tox.ini index a11b484d81a..ff6bb1d2bdf 100644 --- a/tox.ini +++ b/tox.ini @@ -754,6 +754,10 @@ commands = bash -c "source ./scripts/get_k8s_secret_ci.sh; \ pytest tests/integration/network -k 'not test_domain_gateway_user_code' -p no:randomly -vvvv" + # Veilid Integration tests + bash -c "source ./scripts/get_k8s_secret_ci.sh; \ + pytest tests/integration/veilid -p no:randomly -vvvv" + # Shutting down the gateway cluster to free up space, as the # below code does not require gateway cluster bash -c "CLUSTER_NAME=testgateway1 tox -e dev.k8s.destroy || true" @@ -859,7 +863,7 @@ commands = bash -c 'NODE_NAME=syft NODE_PORT=${NODE_PORT} && \ k3d cluster create syft -p "$NODE_PORT:80@loadbalancer" --registry-use k3d-registry.localhost || true \ k3d cluster start syft' - tox -e dev.k8s.patch.coredns + CLUSTER_NAME=syft tox -e dev.k8s.patch.coredns sleep 10 bash -c "kubectl --context k3d-syft create namespace syft || true" @@ -933,15 +937,17 @@ commands = [testenv:dev.k8s.patch.coredns] description = Patch CoreDNS to resolve k3d-registry.localhost changedir = {toxinidir} -passenv=HOME,USER +passenv=HOME,USER,CLUSTER_NAME +setenv = + CLUSTER_NAME = {env:CLUSTER_NAME:syft-dev} allowlist_externals = bash commands = ; patch coredns so k3d-registry.localhost works in k3d - bash -c 'kubectl apply -f ./scripts/k8s-coredns-custom.yml' + bash -c 'kubectl apply -f ./scripts/k8s-coredns-custom.yml --context k3d-${CLUSTER_NAME}' ; restarts coredns - bash -c 'kubectl delete pod -n kube-system -l k8s-app=kube-dns' + bash -c 'kubectl delete pod -n kube-system -l k8s-app=kube-dns --context k3d-${CLUSTER_NAME}' [testenv:dev.k8s.start] description = Start local Kubernetes registry & cluster with k3d