diff --git a/.bumpversion.cfg b/.bumpversion.cfg index cd48a82318d..cc664a69ad6 100644 --- a/.bumpversion.cfg +++ b/.bumpversion.cfg @@ -1,5 +1,5 @@ [bumpversion] -current_version = 0.8.7-beta.13 +current_version = 0.8.8-beta.2 tag = False tag_name = {new_version} commit = True diff --git a/.bumpversion_stable.cfg b/.bumpversion_stable.cfg index 52b011ac7b1..4ed947e8dd6 100644 --- a/.bumpversion_stable.cfg +++ b/.bumpversion_stable.cfg @@ -1,5 +1,5 @@ [bumpversion] -current_version = 0.8.6 +current_version = 0.8.7 tag = False tag_name = {new_version} commit = True diff --git a/.github/workflows/cd-feature-branch.yml b/.github/workflows/cd-feature-branch.yml new file mode 100644 index 00000000000..cab49a481f0 --- /dev/null +++ b/.github/workflows/cd-feature-branch.yml @@ -0,0 +1,381 @@ +#TODO: Due to lack of time, this could not be de-duplicated +# from cd-syft.yml, which have a similar structure +name: CD - Feature Branch + +on: + workflow_dispatch: + inputs: + release_version: + description: "Syft Version to Release" + required: true + type: string + + release_branch: + description: "Branch to Release from" + required: true + type: string + + release_platform: + description: "Release Platform" + required: true + default: "TEST_PYPI" + type: choice + options: + - TEST_PYPI + # - REAL_PYPI + # - REAL_AND_TEST_PYPI + +# Prevents concurrent runs of the same workflow +# while the previous run is still in progress +concurrency: + group: "CD - Feature Branch" + cancel-in-progress: false + +jobs: + build-and-push-docker-images: + strategy: + matrix: + runner: [sh-arc-linux-x64, sh-arc-linux-arm64] + runs-on: ${{ matrix.runner }} + + outputs: + server_version: ${{ steps.release_metadata.outputs.server_version }} + + steps: + # actions/setup-python doesn't yet support ARM + - name: Setup Python on x64 + if: ${{ !endsWith(matrix.runner, '-arm64') }} + uses: actions/setup-python@v5 + with: + python-version: "3.12" + + # Currently psutil package requires gcc to be installed on arm + # for building psutil from source + # as linux/aarch64 wheels not avaialble for psutil + # We could remove once we have aarch64 wheels + # https://github.com/giampaolo/psutil/issues/1972 + - name: Install Metadata packages for arm64 + if: ${{ endsWith(matrix.runner, '-arm64') }} + run: | + sudo apt update -y + sudo apt install software-properties-common -y + sudo apt install gcc curl -y + sudo apt-get install python3-dev -y + + - name: Setup Python on arm64 + if: ${{ endsWith(matrix.runner, '-arm64') }} + uses: deadsnakes/action@v3.1.0 + with: + python-version: "3.12" + + - name: Install Git + run: | + sudo apt-get update + sudo apt-get install git -y + + - uses: actions/checkout@v4 + with: + ref: ${{ github.event.inputs.release_branch }} + + - name: Check python version + run: | + python --version + python3 --version + which python + + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install uv==0.2.17 tox tox-uv==1.9.0 bump2version==1.0.1 + uv --version + + - name: Generate Release Metadata + id: release_metadata + run: | + if [[ ${{matrix.runner}} == *"x64"* ]]; then + echo "release_platform=linux/amd64" >> $GITHUB_OUTPUT + echo "short_release_platform=amd64" >> $GITHUB_OUTPUT + else + echo "release_platform=linux/arm64" >> $GITHUB_OUTPUT + echo "short_release_platform=arm64" >> $GITHUB_OUTPUT + fi + echo "server_version=${{ github.event.inputs.release_version }}" >> $GITHUB_OUTPUT + + - name: Bump to Final Release version + run: | + python scripts/bump_version.py --bump-to-stable ${{ steps.release_metadata.outputs.server_version}} + + - name: Update Commit Hash in Syft + run: | + python packages/syft/src/syft/util/update_commit.py packages/syft/src/syft/util/commit.py + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + + - name: Login to Docker + uses: docker/login-action@v3 + with: + username: ${{ secrets.DOCKER_LOGIN }} + password: ${{ secrets.DOCKER_PASSWORD }} + + - name: Build and push `syft-backend` image to DockerHub + id: syft-backend-build + uses: docker/build-push-action@v6 + with: + context: ./packages + file: ./packages/grid/backend/backend.dockerfile + platforms: ${{ steps.release_metadata.outputs.release_platform }} + target: backend + outputs: type=image,name=openmined/syft-backend,push-by-digest=true,name-canonical=true,push=true + cache-from: type=registry,ref=openmined/syft-backend:cache-${{ steps.release_metadata.outputs.short_release_platform }} + cache-to: type=registry,ref=openmined/syft-backend:cache-${{ steps.release_metadata.outputs.short_release_platform }},mode=max + + - name: Export digest for syft-backend + run: | + mkdir -p /tmp/digests/syft-backend + digest="${{ steps.syft-backend-build.outputs.digest }}" + touch "/tmp/digests/syft-backend/${digest#sha256:}" + + - name: Build and push `syft-frontend` image to DockerHub + id: syft-frontend-build + uses: docker/build-push-action@v6 + with: + context: ./packages/grid/frontend + file: ./packages/grid/frontend/frontend.dockerfile + platforms: ${{ steps.release_metadata.outputs.release_platform }} + outputs: type=image,name=openmined/syft-frontend,push-by-digest=true,name-canonical=true,push=true + target: syft-ui-development + cache-from: type=registry,ref=openmined/syft-frontend:cache-${{ steps.release_metadata.outputs.short_release_platform }} + cache-to: type=registry,ref=openmined/syft-frontend:cache-${{ steps.release_metadata.outputs.short_release_platform}},mode=max + + - name: Export digest for syft-frontend + run: | + mkdir -p /tmp/digests/syft-frontend + digest="${{ steps.syft-frontend-build.outputs.digest }}" + touch "/tmp/digests/syft-frontend/${digest#sha256:}" + + - name: Build and push `syft-seaweedfs` image to DockerHub + id: syft-seaweedfs-build + uses: docker/build-push-action@v6 + with: + context: ./packages/grid/seaweedfs + file: ./packages/grid/seaweedfs/seaweedfs.dockerfile + platforms: ${{ steps.release_metadata.outputs.release_platform }} + outputs: type=image,name=openmined/syft-seaweedfs,push-by-digest=true,name-canonical=true,push=true + cache-from: type=registry,ref=openmined/syft-seaweedfs:cache-${{ steps.release_metadata.outputs.short_release_platform }} + cache-to: type=registry,ref=openmined/syft-seaweedfs:cache-${{ steps.release_metadata.outputs.short_release_platform}},mode=max + + - name: Export digest for syft-seaweedfs + run: | + mkdir -p /tmp/digests/syft-seaweedfs + digest="${{ steps.syft-seaweedfs-build.outputs.digest }}" + touch "/tmp/digests/syft-seaweedfs/${digest#sha256:}" + + # Some of the dependencies of syft-enclave-attestation are not available for arm64 + # Hence, we are building syft-enclave-attestation only for x64 (see the `if` conditional) + - name: Build and push `syft-enclave-attestation` image to DockerHub + if: ${{ endsWith(matrix.runner, '-x64') }} + id: syft-enclave-attestation-build + uses: docker/build-push-action@v6 + with: + context: ./packages/grid/enclave/attestation + file: ./packages/grid/enclave/attestation/attestation.dockerfile + platforms: ${{ steps.release_metadata.outputs.release_platform }} + outputs: type=image,name=openmined/syft-enclave-attestation,push-by-digest=true,name-canonical=true,push=true + cache-from: type=registry,ref=openmined/syft-enclave-attestation:cache-${{ steps.release_metadata.outputs.short_release_platform }} + cache-to: type=registry,ref=openmined/syft-enclave-attestation:cache-${{ steps.release_metadata.outputs.short_release_platform}},mode=max + + - name: Export digest for syft-enclave-attestation + if: ${{ endsWith(matrix.runner, '-x64') }} + run: | + mkdir -p /tmp/digests/syft-enclave-attestation + digest="${{ steps.syft-enclave-attestation-build.outputs.digest }}" + touch "/tmp/digests/syft-enclave-attestation/${digest#sha256:}" + + - name: Build and push `syft` image to registry + id: syft-build + uses: docker/build-push-action@v6 + with: + context: ./packages/ + file: ./packages/grid/syft-client/syft.Dockerfile + outputs: type=image,name=openmined/syft-client,push-by-digest=true,name-canonical=true,push=true + platforms: ${{ steps.release_metadata.outputs.release_platform }} + cache-from: type=registry,ref=openmined/syft-client:cache-${{ steps.release_metadata.outputs.short_release_platform }} + cache-to: type=registry,ref=openmined/syft-client:cache-${{ steps.release_metadata.outputs.short_release_platform }},mode=max + + - name: Export digest for `syft` image + run: | + mkdir -p /tmp/digests/syft + digest="${{ steps.syft-build.outputs.digest }}" + touch "/tmp/digests/syft/${digest#sha256:}" + + - name: Upload digests + uses: actions/upload-artifact@v4 + with: + name: digests-${{ steps.release_metadata.outputs.server_version }}-${{ steps.release_metadata.outputs.short_release_platform }} + path: /tmp/digests/* + if-no-files-found: error + retention-days: 1 + + #Used to merge x64 and arm64 into one docker image + merge-docker-images: + needs: [build-and-push-docker-images] + if: always() && (needs.build-and-push-docker-images.result == 'success') + + runs-on: sh-arc-linux-x64 + + outputs: + server_version: ${{ needs.build-and-push-docker-images.outputs.server_version }} + + steps: + - name: Download digests + uses: actions/download-artifact@v4 + with: + path: /tmp/digests + pattern: digests-${{ needs.build-and-push-docker-images.outputs.server_version }}-* + merge-multiple: true + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + + - name: Login to Docker + uses: docker/login-action@v3 + with: + username: ${{ secrets.DOCKER_LOGIN }} + password: ${{ secrets.DOCKER_PASSWORD }} + + - name: Create manifest list and push for syft-backend + working-directory: /tmp/digests/syft-backend + run: | + docker buildx imagetools create \ + -t openmined/syft-backend:${{ needs.build-and-push-docker-images.outputs.server_version }} \ + $(printf 'openmined/syft-backend@sha256:%s ' *) + + - name: Create manifest list and push for syft-frontend + working-directory: /tmp/digests/syft-frontend + run: | + docker buildx imagetools create \ + -t openmined/syft-frontend:${{ needs.build-and-push-docker-images.outputs.server_version }} \ + $(printf 'openmined/syft-frontend@sha256:%s ' *) + + - name: Create manifest list and push for syft-seaweedfs + working-directory: /tmp/digests/syft-seaweedfs + run: | + docker buildx imagetools create \ + -t openmined/syft-seaweedfs:${{ needs.build-and-push-docker-images.outputs.server_version }} \ + $(printf 'openmined/syft-seaweedfs@sha256:%s ' *) + + - name: Create manifest list and push for syft-enclave-attestation + working-directory: /tmp/digests/syft-enclave-attestation + run: | + docker buildx imagetools create \ + -t openmined/syft-enclave-attestation:${{ needs.build-and-push-docker-images.outputs.server_version }} \ + $(printf 'openmined/syft-enclave-attestation@sha256:%s ' *) + + - name: Create manifest list and push for syft client + working-directory: /tmp/digests/syft + run: | + docker buildx imagetools create \ + -t openmined/syft-client:${{ needs.build-and-push-docker-images.outputs.server_version }} \ + $(printf 'openmined/syft-client@sha256:%s ' *) + + deploy-syft: + needs: [merge-docker-images] + if: always() && needs.merge-docker-images.result == 'success' + + runs-on: ubuntu-latest + + steps: + - name: Permission to home directory + run: | + sudo chown -R $USER:$USER $HOME + + - uses: actions/checkout@v4 + with: + token: ${{ secrets.SYFT_BOT_COMMIT_TOKEN }} + ref: ${{ github.event.inputs.release_branch }} + + # free 10GB of space + - name: Remove unnecessary files + run: | + sudo rm -rf /usr/share/dotnet + sudo rm -rf "$AGENT_TOOLSDIRECTORY" + docker image prune --all --force + docker builder prune --all --force + docker system prune --all --force + + - name: Set up Python + uses: actions/setup-python@v5 + with: + python-version: "3.12" + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install uv==0.2.17 tox tox-uv==1.9.0 setuptools wheel twine bump2version PyYAML + uv --version + + - name: Bump to Final Release version + run: | + python scripts/bump_version.py --bump-to-stable ${{ needs.merge-docker-images.outputs.server_version }} + + - name: Update Commit Hash in Syft + run: | + python packages/syft/src/syft/util/update_commit.py packages/syft/src/syft/util/commit.py + + - name: Build Helm Chart + shell: bash + run: | + # install k3d + K3D_VERSION=v5.6.3 + wget https://github.com/k3d-io/k3d/releases/download/${K3D_VERSION}/k3d-linux-amd64 + mv k3d-linux-amd64 k3d + chmod +x k3d + export PATH=`pwd`:$PATH + k3d version + + #Install Devspace + DEVSPACE_VERSION=v6.3.12 + curl -sSL https://github.com/loft-sh/devspace/releases/download/${DEVSPACE_VERSION}/devspace-linux-amd64 -o ./devspace + chmod +x devspace + devspace version + + # Install helm + curl https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3 | bash + helm version + + tox -e syft.build.helm + tox -e syft.package.helm + + - name: Linting + run: | + tox -e lint || true + + - name: Manual Build and Publish + run: | + tox -e syft.publish + if [[ "${{ github.event.inputs.release_platform }}" == "TEST_PYPI" ]]; then + twine upload -r testpypi -u __token__ -p ${{ secrets.OM_SYFT_TEST_PYPI_TOKEN }} packages/syft/dist/* + fi + + # Checkout to gh-pages and update helm repo + - name: Checkout to gh-pages + uses: actions/checkout@v4 + with: + ref: gh-pages + token: ${{ secrets.SYFT_BOT_COMMIT_TOKEN }} + path: ghpages + + - name: Copy helm repo files from Syft Repo + run: | + rm -rf ghpages/helm/* + cp -R packages/grid/helm/repo/. ghpages/helm/ + + - name: Commit changes to gh-pages + uses: EndBug/add-and-commit@v9 + with: + author_name: ${{ secrets.OM_BOT_NAME }} + author_email: ${{ secrets.OM_BOT_EMAIL }} + message: "Update Helm package from Syft Repo" + add: "helm/" + push: "origin gh-pages" + cwd: "./ghpages/" diff --git a/.github/workflows/cd-syft-dev.yml b/.github/workflows/cd-syft-dev.yml index b28510a2906..0b9ce3fe27d 100644 --- a/.github/workflows/cd-syft-dev.yml +++ b/.github/workflows/cd-syft-dev.yml @@ -77,11 +77,11 @@ jobs: username: ${{ secrets.ACR_USERNAME }} password: ${{ secrets.ACR_PASSWORD }} - - name: Set Grid package version - id: grid + - name: Set Server package version + id: server shell: bash run: | - echo "GRID_VERSION=$(python packages/grid/VERSION)" >> $GITHUB_OUTPUT + echo "SERVER_VERSION=$(python packages/grid/VERSION)" >> $GITHUB_OUTPUT - name: Build and push `syft` image to registry uses: docker/build-push-action@v6 @@ -92,9 +92,9 @@ jobs: tags: | ${{ secrets.ACR_SERVER }}/openmined/syft-client:dev ${{ secrets.ACR_SERVER }}/openmined/syft-client:dev-${{ github.sha }} - ${{ secrets.ACR_SERVER }}/openmined/syft-client:${{ steps.grid.outputs.GRID_VERSION }} + ${{ secrets.ACR_SERVER }}/openmined/syft-client:${{ steps.server.outputs.SERVER_VERSION }} - - name: Build and push `grid-backend` image to registry + - name: Build and push `syft-backend` image to registry uses: docker/build-push-action@v6 with: context: ./packages @@ -102,43 +102,43 @@ jobs: push: true target: backend tags: | - ${{ secrets.ACR_SERVER }}/openmined/grid-backend:dev - ${{ secrets.ACR_SERVER }}/openmined/grid-backend:dev-${{ github.sha }} - ${{ secrets.ACR_SERVER }}/openmined/grid-backend:${{ steps.grid.outputs.GRID_VERSION }} + ${{ secrets.ACR_SERVER }}/openmined/syft-backend:dev + ${{ secrets.ACR_SERVER }}/openmined/syft-backend:dev-${{ github.sha }} + ${{ secrets.ACR_SERVER }}/openmined/syft-backend:${{ steps.syft.outputs.SERVER_VERSION }} - - name: Build and push `grid-frontend` image to registry + - name: Build and push `syft-frontend` image to registry uses: docker/build-push-action@v6 with: context: ./packages/grid/frontend file: ./packages/grid/frontend/frontend.dockerfile push: true tags: | - ${{ secrets.ACR_SERVER }}/openmined/grid-frontend:dev - ${{ secrets.ACR_SERVER }}/openmined/grid-frontend:dev-${{ github.sha }} - ${{ secrets.ACR_SERVER }}/openmined/grid-frontend:${{ steps.grid.outputs.GRID_VERSION }} - target: grid-ui-development + ${{ secrets.ACR_SERVER }}/openmined/syft-frontend:dev + ${{ secrets.ACR_SERVER }}/openmined/syft-frontend:dev-${{ github.sha }} + ${{ secrets.ACR_SERVER }}/openmined/syft-frontend:${{ steps.syft.outputs.SERVER_VERSION }} + target: syft-ui-development - - name: Build and push `grid-seaweedfs` image to registry + - name: Build and push `syft-seaweedfs` image to registry uses: docker/build-push-action@v6 with: context: ./packages/grid/seaweedfs file: ./packages/grid/seaweedfs/seaweedfs.dockerfile push: true tags: | - ${{ secrets.ACR_SERVER }}/openmined/grid-seaweedfs:dev - ${{ secrets.ACR_SERVER }}/openmined/grid-seaweedfs:dev-${{ github.sha }} - ${{ secrets.ACR_SERVER }}/openmined/grid-seaweedfs:${{ steps.grid.outputs.GRID_VERSION }} + ${{ secrets.ACR_SERVER }}/openmined/syft-seaweedfs:dev + ${{ secrets.ACR_SERVER }}/openmined/syft-seaweedfs:dev-${{ github.sha }} + ${{ secrets.ACR_SERVER }}/openmined/syft-seaweedfs:${{ steps.syft.outputs.SERVER_VERSION }} - - name: Build and push `grid-enclave-attestation` image to registry + - name: Build and push `syft-enclave-attestation` image to registry uses: docker/build-push-action@v6 with: context: ./packages/grid/enclave/attestation file: ./packages/grid/enclave/attestation/attestation.dockerfile push: true tags: | - ${{ secrets.ACR_SERVER }}/openmined/grid-enclave-attestation:dev - ${{ secrets.ACR_SERVER }}/openmined/grid-enclave-attestation:dev-${{ github.sha }} - ${{ secrets.ACR_SERVER }}/openmined/grid-enclave-attestation:${{ steps.grid.outputs.GRID_VERSION }} + ${{ secrets.ACR_SERVER }}/openmined/syft-enclave-attestation:dev + ${{ secrets.ACR_SERVER }}/openmined/syft-enclave-attestation:dev-${{ github.sha }} + ${{ secrets.ACR_SERVER }}/openmined/syft-enclave-attestation:${{ steps.syft.outputs.SERVER_VERSION }} - name: Build Helm Chart & Copy to infra if: github.ref == 'refs/heads/dev' || github.event.inputs.deploy-helm == 'true' diff --git a/.github/workflows/cd-syft.yml b/.github/workflows/cd-syft.yml index d0610ac7956..97d22f442ff 100644 --- a/.github/workflows/cd-syft.yml +++ b/.github/workflows/cd-syft.yml @@ -12,8 +12,8 @@ on: default: "false" type: choice options: - - false - - true + - "false" + - "true" release_platform: description: "Release Platform" @@ -90,7 +90,7 @@ jobs: outputs: release_tag: ${{ steps.get_release_tag.outputs.release_tag }} - grid_version: ${{ steps.release_metadata.outputs.grid_version }} + server_version: ${{ steps.release_metadata.outputs.server_version }} steps: - uses: actions/checkout@v4 @@ -166,7 +166,7 @@ jobs: echo "release_platform=linux/arm64" >> $GITHUB_OUTPUT echo "short_release_platform=arm64" >> $GITHUB_OUTPUT fi - echo "grid_version=$(python packages/grid/VERSION)" >> $GITHUB_OUTPUT + echo "server_version=$(python packages/grid/VERSION)" >> $GITHUB_OUTPUT # TODO: Optimize redundant bump protocol version checks - name: Check and Bump Protocol Version @@ -185,79 +185,79 @@ jobs: username: ${{ secrets.DOCKER_LOGIN }} password: ${{ secrets.DOCKER_PASSWORD }} - - name: Build and push `grid-backend` image to DockerHub - id: grid-backend-build + - name: Build and push `syft-backend` image to DockerHub + id: syft-backend-build uses: docker/build-push-action@v6 with: context: ./packages file: ./packages/grid/backend/backend.dockerfile platforms: ${{ steps.release_metadata.outputs.release_platform }} target: backend - outputs: type=image,name=openmined/grid-backend,push-by-digest=true,name-canonical=true,push=true - cache-from: type=registry,ref=openmined/grid-backend:cache-${{ steps.release_metadata.outputs.short_release_platform }} - cache-to: type=registry,ref=openmined/grid-backend:cache-${{ steps.release_metadata.outputs.short_release_platform }},mode=max + outputs: type=image,name=openmined/syft-backend,push-by-digest=true,name-canonical=true,push=true + cache-from: type=registry,ref=openmined/syft-backend:cache-${{ steps.release_metadata.outputs.short_release_platform }} + cache-to: type=registry,ref=openmined/syft-backend:cache-${{ steps.release_metadata.outputs.short_release_platform }},mode=max - - name: Export digest for grid-backend + - name: Export digest for syft-backend run: | - mkdir -p /tmp/digests/grid-backend - digest="${{ steps.grid-backend-build.outputs.digest }}" - touch "/tmp/digests/grid-backend/${digest#sha256:}" + mkdir -p /tmp/digests/syft-backend + digest="${{ steps.syft-backend-build.outputs.digest }}" + touch "/tmp/digests/syft-backend/${digest#sha256:}" - - name: Build and push `grid-frontend` image to DockerHub - id: grid-frontend-build + - name: Build and push `syft-frontend` image to DockerHub + id: syft-frontend-build uses: docker/build-push-action@v6 with: context: ./packages/grid/frontend file: ./packages/grid/frontend/frontend.dockerfile platforms: ${{ steps.release_metadata.outputs.release_platform }} - outputs: type=image,name=openmined/grid-frontend,push-by-digest=true,name-canonical=true,push=true - target: grid-ui-development - cache-from: type=registry,ref=openmined/grid-frontend:cache-${{ steps.release_metadata.outputs.short_release_platform }} - cache-to: type=registry,ref=openmined/grid-frontend:cache-${{ steps.release_metadata.outputs.short_release_platform}},mode=max + outputs: type=image,name=openmined/syft-frontend,push-by-digest=true,name-canonical=true,push=true + target: syft-ui-development + cache-from: type=registry,ref=openmined/syft-frontend:cache-${{ steps.release_metadata.outputs.short_release_platform }} + cache-to: type=registry,ref=openmined/syft-frontend:cache-${{ steps.release_metadata.outputs.short_release_platform}},mode=max - - name: Export digest for grid-frontend + - name: Export digest for syft-frontend run: | - mkdir -p /tmp/digests/grid-frontend - digest="${{ steps.grid-frontend-build.outputs.digest }}" - touch "/tmp/digests/grid-frontend/${digest#sha256:}" + mkdir -p /tmp/digests/syft-frontend + digest="${{ steps.syft-frontend-build.outputs.digest }}" + touch "/tmp/digests/syft-frontend/${digest#sha256:}" - - name: Build and push `grid-seaweedfs` image to DockerHub - id: grid-seaweedfs-build + - name: Build and push `syft-seaweedfs` image to DockerHub + id: syft-seaweedfs-build uses: docker/build-push-action@v6 with: context: ./packages/grid/seaweedfs file: ./packages/grid/seaweedfs/seaweedfs.dockerfile platforms: ${{ steps.release_metadata.outputs.release_platform }} - outputs: type=image,name=openmined/grid-seaweedfs,push-by-digest=true,name-canonical=true,push=true - cache-from: type=registry,ref=openmined/grid-seaweedfs:cache-${{ steps.release_metadata.outputs.short_release_platform }} - cache-to: type=registry,ref=openmined/grid-seaweedfs:cache-${{ steps.release_metadata.outputs.short_release_platform}},mode=max + outputs: type=image,name=openmined/syft-seaweedfs,push-by-digest=true,name-canonical=true,push=true + cache-from: type=registry,ref=openmined/syft-seaweedfs:cache-${{ steps.release_metadata.outputs.short_release_platform }} + cache-to: type=registry,ref=openmined/syft-seaweedfs:cache-${{ steps.release_metadata.outputs.short_release_platform}},mode=max - - name: Export digest for grid-seaweedfs + - name: Export digest for syft-seaweedfs run: | - mkdir -p /tmp/digests/grid-seaweedfs - digest="${{ steps.grid-seaweedfs-build.outputs.digest }}" - touch "/tmp/digests/grid-seaweedfs/${digest#sha256:}" + mkdir -p /tmp/digests/syft-seaweedfs + digest="${{ steps.syft-seaweedfs-build.outputs.digest }}" + touch "/tmp/digests/syft-seaweedfs/${digest#sha256:}" - # Some of the dependencies of grid-enclave-attestation are not available for arm64 - # Hence, we are building grid-enclave-attestation only for x64 (see the `if` conditional) - - name: Build and push `grid-enclave-attestation` image to DockerHub + # Some of the dependencies of syft-enclave-attestation are not available for arm64 + # Hence, we are building syft-enclave-attestation only for x64 (see the `if` conditional) + - name: Build and push `syft-enclave-attestation` image to DockerHub if: ${{ endsWith(matrix.runner, '-x64') }} - id: grid-enclave-attestation-build + id: syft-enclave-attestation-build uses: docker/build-push-action@v6 with: context: ./packages/grid/enclave/attestation file: ./packages/grid/enclave/attestation/attestation.dockerfile platforms: ${{ steps.release_metadata.outputs.release_platform }} - outputs: type=image,name=openmined/grid-enclave-attestation,push-by-digest=true,name-canonical=true,push=true - cache-from: type=registry,ref=openmined/grid-enclave-attestation:cache-${{ steps.release_metadata.outputs.short_release_platform }} - cache-to: type=registry,ref=openmined/grid-enclave-attestation:cache-${{ steps.release_metadata.outputs.short_release_platform}},mode=max + outputs: type=image,name=openmined/syft-enclave-attestation,push-by-digest=true,name-canonical=true,push=true + cache-from: type=registry,ref=openmined/syft-enclave-attestation:cache-${{ steps.release_metadata.outputs.short_release_platform }} + cache-to: type=registry,ref=openmined/syft-enclave-attestation:cache-${{ steps.release_metadata.outputs.short_release_platform}},mode=max - - name: Export digest for grid-enclave-attestation + - name: Export digest for syft-enclave-attestation if: ${{ endsWith(matrix.runner, '-x64') }} run: | - mkdir -p /tmp/digests/grid-enclave-attestation - digest="${{ steps.grid-enclave-attestation-build.outputs.digest }}" - touch "/tmp/digests/grid-enclave-attestation/${digest#sha256:}" + mkdir -p /tmp/digests/syft-enclave-attestation + digest="${{ steps.syft-enclave-attestation-build.outputs.digest }}" + touch "/tmp/digests/syft-enclave-attestation/${digest#sha256:}" - name: Build and push `syft` image to registry id: syft-build @@ -279,7 +279,7 @@ jobs: - name: Upload digests uses: actions/upload-artifact@v4 with: - name: digests-${{ steps.release_metadata.outputs.grid_version }}-${{ steps.release_metadata.outputs.short_release_platform }} + name: digests-${{ steps.release_metadata.outputs.server_version }}-${{ steps.release_metadata.outputs.short_release_platform }} path: /tmp/digests/* if-no-files-found: error retention-days: 1 @@ -299,7 +299,7 @@ jobs: uses: actions/download-artifact@v4 with: path: /tmp/digests - pattern: digests-${{ needs.build-and-push-docker-images.outputs.grid_version }}-* + pattern: digests-${{ needs.build-and-push-docker-images.outputs.server_version }}-* merge-multiple: true - name: Set up Docker Buildx @@ -311,43 +311,43 @@ jobs: username: ${{ secrets.DOCKER_LOGIN }} password: ${{ secrets.DOCKER_PASSWORD }} - - name: Create manifest list and push for grid-backend - working-directory: /tmp/digests/grid-backend + - name: Create manifest list and push for syft-backend + working-directory: /tmp/digests/syft-backend run: | docker buildx imagetools create \ - -t openmined/grid-backend:${{ needs.build-and-push-docker-images.outputs.grid_version }} \ - -t openmined/grid-backend:${{ needs.build-and-push-docker-images.outputs.release_tag }} \ - $(printf 'openmined/grid-backend@sha256:%s ' *) + -t openmined/syft-backend:${{ needs.build-and-push-docker-images.outputs.server_version }} \ + -t openmined/syft-backend:${{ needs.build-and-push-docker-images.outputs.release_tag }} \ + $(printf 'openmined/syft-backend@sha256:%s ' *) - - name: Create manifest list and push for grid-frontend - working-directory: /tmp/digests/grid-frontend + - name: Create manifest list and push for syft-frontend + working-directory: /tmp/digests/syft-frontend run: | docker buildx imagetools create \ - -t openmined/grid-frontend:${{ needs.build-and-push-docker-images.outputs.grid_version }} \ - -t openmined/grid-frontend:${{ needs.build-and-push-docker-images.outputs.release_tag }} \ - $(printf 'openmined/grid-frontend@sha256:%s ' *) + -t openmined/syft-frontend:${{ needs.build-and-push-docker-images.outputs.server_version }} \ + -t openmined/syft-frontend:${{ needs.build-and-push-docker-images.outputs.release_tag }} \ + $(printf 'openmined/syft-frontend@sha256:%s ' *) - - name: Create manifest list and push for grid-seaweedfs - working-directory: /tmp/digests/grid-seaweedfs + - name: Create manifest list and push for syft-seaweedfs + working-directory: /tmp/digests/syft-seaweedfs run: | docker buildx imagetools create \ - -t openmined/grid-seaweedfs:${{ needs.build-and-push-docker-images.outputs.grid_version }} \ - -t openmined/grid-seaweedfs:${{ needs.build-and-push-docker-images.outputs.release_tag }} \ - $(printf 'openmined/grid-seaweedfs@sha256:%s ' *) + -t openmined/syft-seaweedfs:${{ needs.build-and-push-docker-images.outputs.server_version }} \ + -t openmined/syft-seaweedfs:${{ needs.build-and-push-docker-images.outputs.release_tag }} \ + $(printf 'openmined/syft-seaweedfs@sha256:%s ' *) - - name: Create manifest list and push for grid-enclave-attestation - working-directory: /tmp/digests/grid-enclave-attestation + - name: Create manifest list and push for syft-enclave-attestation + working-directory: /tmp/digests/syft-enclave-attestation run: | docker buildx imagetools create \ - -t openmined/grid-enclave-attestation:${{ needs.build-and-push-docker-images.outputs.grid_version }} \ - -t openmined/grid-enclave-attestation:${{ needs.build-and-push-docker-images.outputs.release_tag }} \ - $(printf 'openmined/grid-enclave-attestation@sha256:%s ' *) + -t openmined/syft-enclave-attestation:${{ needs.build-and-push-docker-images.outputs.server_version }} \ + -t openmined/syft-enclave-attestation:${{ needs.build-and-push-docker-images.outputs.release_tag }} \ + $(printf 'openmined/syft-enclave-attestation@sha256:%s ' *) - name: Create manifest list and push for syft working-directory: /tmp/digests/syft run: | docker buildx imagetools create \ - -t openmined/syft-client:${{ needs.build-and-push-docker-images.outputs.grid_version }} \ + -t openmined/syft-client:${{ needs.build-and-push-docker-images.outputs.server_version }} \ -t openmined/syft-client:${{ needs.build-and-push-docker-images.outputs.release_tag }} \ $(printf 'openmined/syft-client@sha256:%s ' *) diff --git a/.github/workflows/e2e-tests-notebook.yml b/.github/workflows/e2e-tests-notebook.yml index deef4da0680..2b9f466933c 100644 --- a/.github/workflows/e2e-tests-notebook.yml +++ b/.github/workflows/e2e-tests-notebook.yml @@ -7,12 +7,12 @@ on: description: "Syft version to test" required: true type: string - node_url: - description: "Node URL to use" + server_url: + description: "Server URL to use" required: true type: string - node_port: - description: "Node port" + server_port: + description: "Server port" required: true type: number exclude_notebooks: @@ -26,12 +26,12 @@ on: description: "Syft version to test" required: true type: string - node_url: - description: "Node URL to use" + server_url: + description: "Server URL to use" required: true type: string - node_port: - description: "Node port" + server_port: + description: "Server port" required: true type: number exclude_notebooks: @@ -84,8 +84,8 @@ jobs: - name: Run Notebook tests env: SYFT_VERSION: ${{ inputs.syft_version }} - NODE_URL: ${{ inputs.node_url }} - NODE_PORT: ${{ inputs.node_port }} + SERVER_URL: ${{ inputs.server_url }} + SERVER_PORT: ${{ inputs.server_port }} EXCLUDE_NOTEBOOKS: ${{ inputs.exclude_notebooks }} run: | tox -e e2e.test.notebook diff --git a/.github/workflows/pr-tests-stack.yml b/.github/workflows/pr-tests-stack.yml index d94230d155e..d3d42279e85 100644 --- a/.github/workflows/pr-tests-stack.yml +++ b/.github/workflows/pr-tests-stack.yml @@ -60,7 +60,9 @@ jobs: if: steps.changes.outputs.stack == 'true' timeout-minutes: 60 run: | - tox -e backend.test.basecpu + echo "Skipping pr image test" + # run: | + # tox -e backend.test.basecpu pr-tests-syft-integration: strategy: @@ -68,7 +70,7 @@ jobs: matrix: os: [ubuntu-latest] python-version: ["3.12"] - pytest-modules: ["local_node"] + pytest-modules: ["local_server"] fail-fast: false runs-on: ${{matrix.os}} @@ -252,9 +254,9 @@ jobs: run: | mkdir -p ./k8s-logs kubectl describe all -A --context k3d-test-gateway-1 --namespace syft > ./k8s-logs/test-gateway-1-desc-${{ steps.date.outputs.date }}.txt - kubectl describe all -A --context k3d-test-domain-1 --namespace syft > ./k8s-logs/test-domain-1-desc-${{ steps.date.outputs.date }}.txt + kubectl describe all -A --context k3d-test-datasite-1 --namespace syft > ./k8s-logs/test-datasite-1-desc-${{ steps.date.outputs.date }}.txt kubectl logs -l app.kubernetes.io/name!=random --prefix=true --context k3d-test-gateway-1 --namespace syft > ./k8s-logs/test-gateway-1-logs-${{ steps.date.outputs.date }}.txt - kubectl logs -l app.kubernetes.io/name!=random --prefix=true --context k3d-test-domain-1 --namespace syft > ./k8s-logs/test-domain-1-logs-${{ steps.date.outputs.date }}.txt + kubectl logs -l app.kubernetes.io/name!=random --prefix=true --context k3d-test-datasite-1 --namespace syft > ./k8s-logs/test-datasite-1-logs-${{ steps.date.outputs.date }}.txt ls -la ./k8s-logs - name: Upload logs to GitHub @@ -270,7 +272,7 @@ jobs: run: | export PATH=`pwd`:$PATH k3d cluster delete test-gateway-1 || true - k3d cluster delete test-domain-1 || true + k3d cluster delete test-datasite-1 || true k3d registry delete k3d-registry.localhost || true pr-tests-notebook-k8s: @@ -395,9 +397,9 @@ jobs: run: | mkdir -p ./k8s-logs kubectl describe all -A --context k3d-test-gateway-1 --namespace syft > ./k8s-logs/test-gateway-1-desc-${{ steps.date.outputs.date }}.txt - kubectl describe all -A --context k3d-test-domain-1 --namespace syft > ./k8s-logs/test-domain-1-desc-${{ steps.date.outputs.date }}.txt + kubectl describe all -A --context k3d-test-datasite-1 --namespace syft > ./k8s-logs/test-datasite-1-desc-${{ steps.date.outputs.date }}.txt kubectl logs -l app.kubernetes.io/name!=random --prefix=true --context k3d-test-gateway-1 --namespace syft > ./k8s-logs/test-gateway-1-logs-${{ steps.date.outputs.date }}.txt - kubectl logs -l app.kubernetes.io/name!=random --prefix=true --context k3d-test-domain-1 --namespace syft > ./k8s-logs/test-domain-1-logs-${{ steps.date.outputs.date }}.txt + kubectl logs -l app.kubernetes.io/name!=random --prefix=true --context k3d-test-datasite-1 --namespace syft > ./k8s-logs/test-datasite-1-logs-${{ steps.date.outputs.date }}.txt ls -la ./k8s-logs - name: Upload logs to GitHub @@ -413,10 +415,11 @@ jobs: run: | export PATH=`pwd`:$PATH k3d cluster delete test-gateway-1 || true - k3d cluster delete test-domain-1 || true + k3d cluster delete test-datasite-1 || true k3d registry delete k3d-registry.localhost || true pr-tests-migrations: + if: false strategy: max-parallel: 99 matrix: diff --git a/.isort.cfg b/.isort.cfg index 5c9360d3549..aeb09bb8f36 100644 --- a/.isort.cfg +++ b/.isort.cfg @@ -2,18 +2,18 @@ profile=black force_single_line=True known_syft=syft -known_grid=grid +known_server=grid known_syftcli=syftcli known_first_party=src remove_redundant_aliases=True -sections=FUTURE,STDLIB,THIRDPARTY,SYFT,GRID,SYFTCLI,FIRSTPARTY,LOCALFOLDER +sections=FUTURE,STDLIB,THIRDPARTY,SYFT,SERVER,SYFTCLI,FIRSTPARTY,LOCALFOLDER lines_between_types=0 force_sort_within_sections=True import_heading_future=future import_heading_stdlib=stdlib import_heading_thirdparty=third party import_heading_syft=syft absolute -import_heading_grid=grid absolute +import_heading_server=server absolute import_heading_syftcli=syftcli absolute import_heading_firstparty=first party import_heading_localfolder=relative diff --git a/README.md b/README.md index 8c602a27f0f..b89130201c7 100644 --- a/README.md +++ b/README.md @@ -23,9 +23,9 @@ $ pip install -U syft[data_science] ```python # from Jupyter / Python import syft as sy -sy.requires(">=0.8.6,<0.8.7") -node = sy.orchestra.launch( - name="my-domain", +sy.requires(">=0.8.7,<0.8.8") +server = sy.orchestra.launch( + name="my-datasite", port=8080, create_producer=True, n_consumers=1, @@ -36,9 +36,9 @@ node = sy.orchestra.launch( ```bash # or from the command line -$ syft launch --name=my-domain --port=8080 --reset=True +$ syft launch --name=my-datasite --port=8080 --reset=True -Starting syft-node server on 0.0.0.0:8080 +Starting syft-datasite server on 0.0.0.0:8080 ``` ## Launch Client @@ -46,7 +46,7 @@ Starting syft-node server on 0.0.0.0:8080 ```python import syft as sy sy.requires(">=0.8.6,<0.8.7") -domain_client = sy.login( +datasite_client = sy.login( port=8080, email="info@openmined.org", password="changethis" @@ -64,7 +64,7 @@ domain_client = sy.login( - 04-pytorch-example.ipynb - 05-custom-policy.ipynb - 06-multiple-code-requests.ipynb -- 07-domain-register-control-flow.ipynb +- 07-datasite-register-control-flow.ipynb - 08-code-version.ipynb - 09-blob-storage.ipynb - 10-container-images.ipynb @@ -104,7 +104,7 @@ SYFT_VERSION="" #### 4. Provisioning Helm Charts ```sh -helm install my-domain openmined/syft --version $SYFT_VERSION --namespace syft --create-namespace --set ingress.className="traefik" +helm install my-datasite openmined/syft --version $SYFT_VERSION --namespace syft --create-namespace --set ingress.className="traefik" ``` ### Ingress Controllers @@ -141,16 +141,17 @@ helm install ... --set ingress.class="gce" # Install Notes - PySyft 0.8.6 Requires: 🐍 `python 3.10 - 3.12` - Run: `pip install -U syft` -- PyGrid Requires: 🐳 `docker` or ☸️ `kubernetes` +- Syft Server Requires: 🐳 `docker` or ☸️ `kubernetes` # Versions `0.9.0` - Coming soon... -`0.8.7` (Beta) - `dev` branch 👈🏽 API - Coming soon... -`0.8.6` (Stable) - API +`0.8.8` (Beta) - `dev` branch 👈🏽 API - Coming soon... +`0.8.7` (Stable) - API Deprecated: +- `0.8.6` - API - `0.8.5-post.2` - API - `0.8.4` - API - `0.8.3` - API @@ -162,7 +163,7 @@ Deprecated: - `0.5.1` - Course 2 + M1 Hotfix - `0.2.0` - `0.5.0` -PySyft and PyGrid use the same `version` and its best to match them up where possible. We release weekly betas which can be used in each context: +PySyft and Syft Server use the same `version` and its best to match them up where possible. We release weekly betas which can be used in each context: PySyft (Stable): `pip install -U syft` @@ -179,9 +180,9 @@ PySyft (Beta): `pip install -U syft --pre` ### Why should I use Syft? -`Syft` allows a `Data Scientist` to ask `questions` about a `dataset` and, within `privacy limits` set by the `data owner`, get `answers` to those `questions`, all without obtaining a `copy` of the data itself. We call this process `Remote Data Science`. It means in a wide variety of `domains` across society, the current `risks` of sharing information (`copying` data) with someone such as, privacy invasion, IP theft and blackmail will no longer prevent the vast `benefits` such as innovation, insights and scientific discovery which secure access will provide. +`Syft` allows a `Data Scientist` to ask `questions` about a `dataset` and, within `privacy limits` set by the `data owner`, get `answers` to those `questions`, all without obtaining a `copy` of the data itself. We call this process `Remote Data Science`. It means in a wide variety of `datasites` across society, the current `risks` of sharing information (`copying` data) with someone such as, privacy invasion, IP theft and blackmail will no longer prevent the vast `benefits` such as innovation, insights and scientific discovery which secure access will provide. -No more cold calls to get `access` to a dataset. No more weeks of `wait times` to get a `result` on your `query`. It also means `1000x more data` in every domain. PySyft opens the doors to a streamlined Data Scientist `workflow`, all with the individual's `privacy` at its heart. +No more cold calls to get `access` to a dataset. No more weeks of `wait times` to get a `result` on your `query`. It also means `1000x more data` in every datasite. PySyft opens the doors to a streamlined Data Scientist `workflow`, all with the individual's `privacy` at its heart. -Provides services to a group of `Data Owners` and `Data Scientists`, such as dataset `search` and bulk `project approval` (legal / technical) to participate in a project. A gateway server acts as a bridge between it's members (`Domains`) and their subscribers (`Data Scientists`) and can provide access to a collection of `domains` at once. +Provides services to a group of `Data Owners` and `Data Scientists`, such as dataset `search` and bulk `project approval` (legal / technical) to participate in a project. A gateway server acts as a bridge between it's members (`Datasites`) and their subscribers (`Data Scientists`) and can provide access to a collection of `datasites` at once. diff --git a/VERSION b/VERSION index df1dec5602a..ab18b3a5a42 100644 --- a/VERSION +++ b/VERSION @@ -1,5 +1,5 @@ # Mono Repo Global Version -__version__ = "0.8.7-beta.13" +__version__ = "0.8.8-beta.2" # elsewhere we can call this file: `python VERSION` and simply take the stdout # stdlib diff --git a/docs/source/_static/install_tutorials/pygrid_ui.png b/docs/source/_static/install_tutorials/pygrid_ui.png deleted file mode 100644 index a31db3d6111..00000000000 Binary files a/docs/source/_static/install_tutorials/pygrid_ui.png and /dev/null differ diff --git a/docs/source/_static/personas-image/data-owner/00-deploy-domain-00.gif b/docs/source/_static/personas-image/data-owner/00-deploy-datasite-00.gif similarity index 100% rename from docs/source/_static/personas-image/data-owner/00-deploy-domain-00.gif rename to docs/source/_static/personas-image/data-owner/00-deploy-datasite-00.gif diff --git a/docs/source/_static/personas-image/data-owner/00-deploy-domain-01.jpg b/docs/source/_static/personas-image/data-owner/00-deploy-datasite-01.jpg similarity index 100% rename from docs/source/_static/personas-image/data-owner/00-deploy-domain-01.jpg rename to docs/source/_static/personas-image/data-owner/00-deploy-datasite-01.jpg diff --git a/docs/source/api_reference/index.rst b/docs/source/api_reference/index.rst index 7d7e85d02a8..d87a01b35e9 100644 --- a/docs/source/api_reference/index.rst +++ b/docs/source/api_reference/index.rst @@ -22,7 +22,7 @@ objects, functions and methods. syft.client syft.external - syft.node + syft.server syft.serde syft.service syft.store diff --git a/docs/source/api_reference/syft.client.api.rst b/docs/source/api_reference/syft.client.api.rst index 9e7889482c6..3022a1c5086 100644 --- a/docs/source/api_reference/syft.client.api.rst +++ b/docs/source/api_reference/syft.client.api.rst @@ -27,7 +27,7 @@ syft.client.api APIEndpoint APIModule APIRegistry - NodeView + ServerView SignedSyftAPICall SyftAPI SyftAPICall diff --git a/docs/source/api_reference/syft.client.connection.rst b/docs/source/api_reference/syft.client.connection.rst index 912c3b50596..139d9eee60e 100644 --- a/docs/source/api_reference/syft.client.connection.rst +++ b/docs/source/api_reference/syft.client.connection.rst @@ -17,7 +17,7 @@ syft.client.connection .. autosummary:: - NodeConnection + ServerConnection diff --git a/docs/source/api_reference/syft.client.registry.rst b/docs/source/api_reference/syft.client.registry.rst index 57f0136d312..9b2987bfb78 100644 --- a/docs/source/api_reference/syft.client.registry.rst +++ b/docs/source/api_reference/syft.client.registry.rst @@ -17,7 +17,7 @@ syft.client.registry .. autosummary:: - DomainRegistry + DatasiteRegistry NetworkRegistry diff --git a/docs/source/api_reference/syft.node.credentials.rst b/docs/source/api_reference/syft.node.credentials.rst index a1888491bcc..99e54dd153e 100644 --- a/docs/source/api_reference/syft.node.credentials.rst +++ b/docs/source/api_reference/syft.node.credentials.rst @@ -1,7 +1,7 @@ -syft.node.credentials +syft.server.credentials ===================== -.. automodule:: syft.node.credentials +.. automodule:: syft.server.credentials diff --git a/docs/source/api_reference/syft.node.domain.rst b/docs/source/api_reference/syft.node.datasite.rst similarity index 63% rename from docs/source/api_reference/syft.node.domain.rst rename to docs/source/api_reference/syft.node.datasite.rst index eb4ff6334a5..687eb004816 100644 --- a/docs/source/api_reference/syft.node.domain.rst +++ b/docs/source/api_reference/syft.node.datasite.rst @@ -1,7 +1,7 @@ -syft.node.domain +syft.server.datasite ================ -.. automodule:: syft.node.domain +.. automodule:: syft.server.datasite @@ -17,7 +17,7 @@ syft.node.domain .. autosummary:: - Domain + Datasite diff --git a/docs/source/api_reference/syft.node.gateway.rst b/docs/source/api_reference/syft.node.gateway.rst index 410841b9447..8b6d6335fd1 100644 --- a/docs/source/api_reference/syft.node.gateway.rst +++ b/docs/source/api_reference/syft.node.gateway.rst @@ -1,7 +1,7 @@ -syft.node.gateway +syft.server.gateway ================= -.. automodule:: syft.node.gateway +.. automodule:: syft.server.gateway diff --git a/docs/source/api_reference/syft.node.node.rst b/docs/source/api_reference/syft.node.node.rst index 1c71dd53574..e3f263f97d0 100644 --- a/docs/source/api_reference/syft.node.node.rst +++ b/docs/source/api_reference/syft.node.node.rst @@ -1,7 +1,7 @@ -syft.node.node +syft.server.server ============== -.. automodule:: syft.node.node +.. automodule:: syft.server.server @@ -18,7 +18,7 @@ syft.node.node get_default_root_email get_default_root_password get_env - get_node_uid_env + get_server_uid_env get_private_key_env gipc_decoder gipc_encoder @@ -35,7 +35,7 @@ syft.node.node .. autosummary:: - Node + Server diff --git a/docs/source/api_reference/syft.node.routes.rst b/docs/source/api_reference/syft.node.routes.rst index a5bce85bd46..926814c5b62 100644 --- a/docs/source/api_reference/syft.node.routes.rst +++ b/docs/source/api_reference/syft.node.routes.rst @@ -1,7 +1,7 @@ -syft.node.routes +syft.server.routes ================ -.. automodule:: syft.node.routes +.. automodule:: syft.server.routes diff --git a/docs/source/api_reference/syft.node.rst b/docs/source/api_reference/syft.node.rst index a94e20d906d..183e2e7f281 100644 --- a/docs/source/api_reference/syft.node.rst +++ b/docs/source/api_reference/syft.node.rst @@ -1,7 +1,7 @@ -syft.node +syft.server ========= -.. automodule:: syft.node +.. automodule:: syft.server @@ -27,13 +27,13 @@ :toctree: :recursive: - syft.node.credentials - syft.node.domain - syft.node.gateway - syft.node.node - syft.node.routes - syft.node.run - syft.node.server - syft.node.worker - syft.node.worker_settings + syft.server.credentials + syft.server.datasite + syft.server.gateway + syft.server.server + syft.server.routes + syft.server.run + syft.server.server + syft.server.worker + syft.server.worker_settings diff --git a/docs/source/api_reference/syft.node.run.rst b/docs/source/api_reference/syft.node.run.rst index 3f8cc943294..588a53322d9 100644 --- a/docs/source/api_reference/syft.node.run.rst +++ b/docs/source/api_reference/syft.node.run.rst @@ -1,7 +1,7 @@ -syft.node.run +syft.server.run ============= -.. automodule:: syft.node.run +.. automodule:: syft.server.run diff --git a/docs/source/api_reference/syft.node.server.rst b/docs/source/api_reference/syft.node.server.rst index efcc7d9642a..1484aee73f4 100644 --- a/docs/source/api_reference/syft.node.server.rst +++ b/docs/source/api_reference/syft.node.server.rst @@ -1,7 +1,7 @@ -syft.node.server +syft.server.server ================ -.. automodule:: syft.node.server +.. automodule:: syft.server.server @@ -17,7 +17,7 @@ syft.node.server kill_process make_app run_uvicorn - serve_node + serve_server diff --git a/docs/source/api_reference/syft.node.worker.rst b/docs/source/api_reference/syft.node.worker.rst index 510732d7c39..f2da3a94e81 100644 --- a/docs/source/api_reference/syft.node.worker.rst +++ b/docs/source/api_reference/syft.node.worker.rst @@ -1,7 +1,7 @@ -syft.node.worker +syft.server.worker ================ -.. automodule:: syft.node.worker +.. automodule:: syft.server.worker diff --git a/docs/source/api_reference/syft.node.worker_settings.rst b/docs/source/api_reference/syft.node.worker_settings.rst index 0b5521ddaef..a9880847ec0 100644 --- a/docs/source/api_reference/syft.node.worker_settings.rst +++ b/docs/source/api_reference/syft.node.worker_settings.rst @@ -1,7 +1,7 @@ -syft.node.worker\_settings +syft.server.worker\_settings ========================== -.. automodule:: syft.node.worker_settings +.. automodule:: syft.server.worker_settings diff --git a/docs/source/api_reference/syft.rst b/docs/source/api_reference/syft.rst index d10b471583a..0e930b08dce 100644 --- a/docs/source/api_reference/syft.rst +++ b/docs/source/api_reference/syft.rst @@ -15,7 +15,7 @@ Subpackages syft.capnp syft.client syft.external - syft.node + syft.server syft.serde syft.service syft.store @@ -25,10 +25,10 @@ Subpackages Submodules ---------- -syft.abstract\_node module +syft.abstract\_server module -------------------------- -.. automodule:: syft.abstract_node +.. automodule:: syft.abstract_server :members: :undoc-members: :show-inheritance: diff --git a/docs/source/api_reference/syft.service.action.action_object.rst b/docs/source/api_reference/syft.service.action.action_object.rst index 014e584d460..89424403841 100644 --- a/docs/source/api_reference/syft.service.action.action_object.rst +++ b/docs/source/api_reference/syft.service.action.action_object.rst @@ -18,7 +18,7 @@ syft.service.action.action\_object has_action_data_empty is_action_data_empty make_action_side_effect - propagate_node_uid + propagate_server_uid send_action_side_effect diff --git a/docs/source/api_reference/syft.service.context.rst b/docs/source/api_reference/syft.service.context.rst index 276c578330b..990de2487d1 100644 --- a/docs/source/api_reference/syft.service.context.rst +++ b/docs/source/api_reference/syft.service.context.rst @@ -19,7 +19,7 @@ syft.service.context AuthedServiceContext ChangeContext - NodeServiceContext + ServerServiceContext UnauthedServiceContext diff --git a/docs/source/api_reference/syft.service.policy.policy.rst b/docs/source/api_reference/syft.service.policy.policy.rst index 74b52aa14cb..4e6cb3820dc 100644 --- a/docs/source/api_reference/syft.service.policy.policy.rst +++ b/docs/source/api_reference/syft.service.policy.policy.rst @@ -29,7 +29,7 @@ syft.service.policy.policy init_policy load_policy_code new_getfile - partition_by_node + partition_by_server process_class_code retrieve_from_db submit_policy_code_to_user_code diff --git a/docs/source/api_reference/syft.types.rst b/docs/source/api_reference/syft.types.rst index 67c05d63fd2..646214ca364 100644 --- a/docs/source/api_reference/syft.types.rst +++ b/docs/source/api_reference/syft.types.rst @@ -29,7 +29,7 @@ syft.types.base syft.types.datetime - syft.types.grid_url + syft.types.server_url syft.types.syft_metaclass syft.types.syft_object syft.types.transforms diff --git a/docs/source/api_reference/syft.types.grid_url.rst b/docs/source/api_reference/syft.types.server_url.rst similarity index 62% rename from docs/source/api_reference/syft.types.grid_url.rst rename to docs/source/api_reference/syft.types.server_url.rst index 123f9f34af6..c9ad2ab0625 100644 --- a/docs/source/api_reference/syft.types.grid_url.rst +++ b/docs/source/api_reference/syft.types.server_url.rst @@ -1,7 +1,7 @@ -syft.types.grid\_url +syft.types.server\_url ==================== -.. automodule:: syft.types.grid_url +.. automodule:: syft.types.server_url @@ -17,7 +17,7 @@ syft.types.grid\_url .. autosummary:: - GridURL + ServerURL diff --git a/docs/source/api_reference/syft.types.transforms.rst b/docs/source/api_reference/syft.types.transforms.rst index 374052d54f7..ef575c10e7b 100644 --- a/docs/source/api_reference/syft.types.transforms.rst +++ b/docs/source/api_reference/syft.types.transforms.rst @@ -14,7 +14,7 @@ syft.types.transforms .. autosummary:: add_credentials_for_key - add_node_uid_for_key + add_server_uid_for_key convert_types drop generate_id diff --git a/docs/source/deployment/glossary.rst b/docs/source/deployment/glossary.rst index a257c3ca4f8..6e5b2bf95e0 100644 --- a/docs/source/deployment/glossary.rst +++ b/docs/source/deployment/glossary.rst @@ -12,20 +12,20 @@ General terms Data Consortium ~~~~~~~~~~~~~~~~~~~~~ -A legal agreement under which multiple data owners delegate legal authority (IRB authority) to a central party, such that a data scientist need only enter into legal contract with that central party in order to perform analysis across all relevant participating domains in the ``data consortium``. +A legal agreement under which multiple data owners delegate legal authority (IRB authority) to a central party, such that a data scientist need only enter into legal contract with that central party in order to perform analysis across all relevant participating datasites in the ``data consortium``. Differential Privacy ~~~~~~~~~~~~~~~~~~~~~ While the textbook definition can be found here_, within the context of remote data science, ``differential privacy`` is a set of algorithms which empower a data owner to limit the probability that a data scientist will be able to use their statistical results to reverse engineer the data owner's def. of too much information about the underlying data that generated those results. In a nutshell, its aim is to prevent a Data Scientist from identifying any individual from the dataset through reverse-engineering. -Domain Node +Datasite Server ~~~~~~~~~~~~~~~~~~~~~ A ``computer system`` (or collection of computer systems) which manages the remote study of a data owner's data by a data scientist. It is responsible for allowing the `Data Owner` to manage the data, as well as incoming ``requests`` from data scientists and for gatekeeping the data scientist's access to data, compute, and experimental results stored within the data owner's compute infrastructure. -Network Node +Gateway Server ~~~~~~~~~~~~~~~~~~~~~ -A server which exists outside of any data owner's institution, providing services to the network of data owners and data scientists such as dataset search and bulk project approval (simultaneous legal/technical approval to participate in a project across groups of domains and data scientists at a time). A Network acts as a bridge between between its members and subscribers. The members are ``Domains`` while subscribers are the ``end users`` (e.g. Data Scientist) who explore and perform analysis on the datasets hosted by the members. -A network is used to provide access to a collection of domains at once i.e. if a user agrees to a ``Network Agreement``, then they automatically agree to the conditions to the Domains enlisted in that Network. +A server which exists outside of any data owner's institution, providing services to the network of data owners and data scientists such as dataset search and bulk project approval (simultaneous legal/technical approval to participate in a project across groups of datasites and data scientists at a time). A Network acts as a bridge between between its members and subscribers. The members are ``Datasites`` while subscribers are the ``end users`` (e.g. Data Scientist) who explore and perform analysis on the datasets hosted by the members. +A network is used to provide access to a collection of datasites at once i.e. if a user agrees to a ``Network Agreement``, then they automatically agree to the conditions to the Datasites enlisted in that Network. Privacy Budget ~~~~~~~~~~~~~~~~~~~~~ @@ -39,9 +39,9 @@ PySyft ~~~~~~~~~~~~~~~~~~~~~ An open-source platform that enables remote data science experiments by combining ``federated learning`` and ``differentialy privacy`` techniques. -PyGrid +Syft Server ~~~~~~~~~~~~~~~~~~~~~ -``PyGrid`` is a ``peer-to-peer network`` of data owners and data scientists who can collectively train AI models using ``PySyft``. ``PyGrid`` is also the central server for conducting both model-centric and data-centric ``federated learning``. You may control PyGrid via our user-interface, ``PyGrid Admin``. +``Syft Server`` is also the central server for conducting both model-centric and data-centric ``federated learning``. You may control Syft Server via our user-interface, ``Syft UI``. Remote Data Science ~~~~~~~~~~~~~~~~~~~~~ @@ -54,25 +54,25 @@ Data Owner ~~~~~~~~~~~~~~~~~~~~~ Within the field of remote data science, a data owner is someone who has a (digital) dataset which they would like to make available for study by an outside party whom they may or may not fully trust to have good intentions. -Domain Owner +Datasite Owner ~~~~~~~~~~~~~~~~~~~~~ -A user of ``PyGrid`` who has deployed a domain node. +A user of ``Syft Server`` who has deployed a datasite server. Network Owner ~~~~~~~~~~~~~~~~~~~~~ -Within the field of remote data science, a network owner provides technical and legal services helping to connect data scientists with data owners (domains) by helping them find each other (dataset search) and by helping them enter into bulk legal agreements through the hosting of a network-level data consortium to which such data owners and data scientist may apply. +Within the field of remote data science, a network owner provides technical and legal services helping to connect data scientists with data owners (datasites) by helping them find each other (dataset search) and by helping them enter into bulk legal agreements through the hosting of a network-level data consortium to which such data owners and data scientist may apply. Data Scientist ~~~~~~~~~~~~~~~~~~~~~ -Within the context of remote data science, a data scientist is a persona which desires to answer a specific question using data owned by someone else. This user is required to sign a ``Data Access Agreement`` if you have required one in the ``Domain Settings Configurations``. +Within the context of remote data science, a data scientist is a persona which desires to answer a specific question using data owned by someone else. This user is required to sign a ``Data Access Agreement`` if you have required one in the ``Datasite Settings Configurations``. -Domain Compliance Officer +Datasite Compliance Officer ~~~~~~~~~~~~~~~~~~~~~~~~~~~ All the personas in an institution that are in charge of making sure that the utilization of data at an institution occurs within legal boundaries and under their supervision and with their liability/responsibility. Network Compliance Officer ~~~~~~~~~~~~~~~~~~~~~~~~~~~ -All the personas in an institution that are in charge of making sure that the access and utilization of data between their network's domains and members (data scientists) fall within the bounds outlined in the network's legal agreements. +All the personas in an institution that are in charge of making sure that the access and utilization of data between their network's datasites and members (data scientists) fall within the bounds outlined in the network's legal agreements. User roles ============ @@ -82,7 +82,7 @@ Default roles Data Scientist """""""""""""" -This role is for users who will be performing computations on your datasets. They may be users you know directly or those who found your domain through search and discovery. By default this user can see a ``list of your datasets`` and can request to get results. This user will also be required to sign a ``Data Access Agreement`` if you have required one in the ``Domain Settings Configurations``. +This role is for users who will be performing computations on your datasets. They may be users you know directly or those who found your datasite through search and discovery. By default this user can see a ``list of your datasets`` and can request to get results. This user will also be required to sign a ``Data Access Agreement`` if you have required one in the ``Datasite Settings Configurations``. Default permissions: @@ -90,7 +90,7 @@ Default permissions: Compliance Officer """""""""""""""""""" -This role is for users who will help you manage requests made on your node. They should be users you trust. They are not able to change ``Domain Settings`` or edit roles but they are by default able to accept or deny ``user requests`` on behalf of the ``domain node``. +This role is for users who will help you manage requests made on your server. They should be users you trust. They are not able to change ``Datasite Settings`` or edit roles but they are by default able to accept or deny ``user requests`` on behalf of the ``datasite server``. Default permissions: @@ -100,7 +100,7 @@ Default permissions: Admin """""" -This role is for users who will help you manage your node. This should be users you trust. The main difference between this ``user`` and a ``Compliance Officer`` is that this user by default not only can manage requests but can also edit ``Domain Settings.`` This is the highest level permission outside of an Owner. +This role is for users who will help you manage your server. This should be users you trust. The main difference between this ``user`` and a ``Compliance Officer`` is that this user by default not only can manage requests but can also edit ``Datasite Settings.`` This is the highest level permission outside of an Owner. Default permissions: @@ -108,7 +108,7 @@ Default permissions: Owner """""""" -There is only one Owner account assigned to any one domain node. The owner account is the highest level permission and is a requirement for deploying a domain node. If you should ever want to transfer ownership of your domain node to someone, please contact us at support@openmined.org. +There is only one Owner account assigned to any one datasite server. The owner account is the highest level permission and is a requirement for deploying a datasite server. If you should ever want to transfer ownership of your datasite server to someone, please contact us at support@openmined.org. Default permissions: @@ -116,16 +116,16 @@ Default permissions: * Cannot disable permissions by default -Domain membership roles +Datasite membership roles ~~~~~~~~~~~~~~~~~~~~~~~~~~ Guest """""""""""""" -The lowest level of ``network membership``, a guest domain is listed within a network node's registry and its datasets are searchable/discoverable by all users of the network, but the network has no legal relationship to the domain nor any authority to grant data scientists access to its data. As such, upon discovering a domain on the network, such a data scientist must apply directly to the domain for access by creating an account on such a domain and signing a legal agreement (a "data-sharing agreement") directly with its corresponding data owner. +The lowest level of ``network membership``, a guest datasite is listed within a network server's registry and its datasets are searchable/discoverable by all users of the network, but the network has no legal relationship to the datasite nor any authority to grant data scientists access to its data. As such, upon discovering a datasite on the network, such a data scientist must apply directly to the datasite for access by creating an account on such a datasite and signing a legal agreement (a "data-sharing agreement") directly with its corresponding data owner. Member """""""""""""" -The highest level of ``network membership``, a full domain member is greater than a guest member because, beyond its listing within a network node's registry, the domain has entered into a legal relationship with the network owner such that the network owner can unilaterally give its full data scientists access to data hosted by the domain. Note that this does not mean that the network can control access to all potential users of the ``registered domain``, because the domain's membership in the network is non-exclusive (domains can register in multiple networks and also accept direct data-scientist users on the side). A network node only has authority to give its own full data scientists access to any full domain within its registry. +The highest level of ``network membership``, a full datasite member is greater than a guest member because, beyond its listing within a network server's registry, the datasite has entered into a legal relationship with the network owner such that the network owner can unilaterally give its full data scientists access to data hosted by the datasite. Note that this does not mean that the network can control access to all potential users of the ``registered datasite``, because the datasite's membership in the network is non-exclusive (datasites can register in multiple networks and also accept direct data-scientist users on the side). A network server only has authority to give its own full data scientists access to any full datasite within its registry. .. |image0| image:: ../_static/deployment/image2.png :width: 95% diff --git a/docs/source/getting_started/index.rst b/docs/source/getting_started/index.rst index 4633130e472..e2264459c00 100644 --- a/docs/source/getting_started/index.rst +++ b/docs/source/getting_started/index.rst @@ -7,9 +7,9 @@ Getting Started .. toctree:: :maxdepth: 3 -Welcome to the domain deployment installation tutorials! +Welcome to the datasite deployment installation tutorials! This section of our documentation is designed to be the -simplest way to get you started deploying your data to a domain node +simplest way to get you started deploying your data to a datasite server on an OSX, Linux, or Windows machine and interacting with it as a data scientist using PySyft. @@ -20,7 +20,7 @@ as a data scientist using PySyft. `advanced deployment documentation `__. The purpose of these tutorials is to help you install everything -you need to run a Domain node from your personal machine (such +you need to run a Datasite server from your personal machine (such as if you're running through OpenMined `courses `__ or @@ -30,7 +30,7 @@ notebooks with PySyft installed, such as if you're pretending to be both Data Owner and Data Scientist as a part of a tutorial or course. -**We will be setting up the following dependencies before PySyft and PyGrid:** +**We will be setting up the following dependencies before PySyft and Syft Server:** * Python >=3.9 * pip diff --git a/docs/source/guides/index.rst b/docs/source/guides/index.rst index ff7da37d0b1..7f60ef9e966 100644 --- a/docs/source/guides/index.rst +++ b/docs/source/guides/index.rst @@ -34,12 +34,12 @@ an ``outside party`` they may or may not ``fully trust`` has good intentions. You Will Learn ⬇️ """""""""""""""""""" -| :doc:`Part 1: Deploying your own Domain Server ` -| :doc:`Part 2: Uploading Private Data to a Domain Server ` -| :doc:`Part 3: Creating User Accounts on your Domain Server ` +| :doc:`Part 1: Deploying your own Datasite Server ` +| :doc:`Part 2: Uploading Private Data to a Datasite Server ` +| :doc:`Part 3: Creating User Accounts on your Datasite Server ` | :doc:`Part 4: Joining a Network ` | :doc:`Part 5: Creating a Network <04-create-network>` -| :doc:`Part 6: Configuring Privacy Budget on your Domain Server <05-configure-pb>` +| :doc:`Part 6: Configuring Privacy Budget on your Datasite Server <05-configure-pb>` B. Getting Started with Data Scientist 👩🏽‍🔬 @@ -50,9 +50,9 @@ specific ``question`` using one or more data owners' ``datasets``. You Will Learn ⬇️ """""""""""""""""""" -| :doc:`Part 7: Connect to a Domain` -| :doc:`Part 8: Searching for Datasets on the Domain` -| :doc:`Part 9: Exploring a Dataset in the Domain` +| :doc:`Part 7: Connect to a Datasite` +| :doc:`Part 8: Searching for Datasets on the Datasite` +| :doc:`Part 9: Exploring a Dataset in the Datasite` | :doc:`Part 10: Training a Model` | :doc:`Part 11: Retrieving Secure Results <>` diff --git a/docs/source/index.rst b/docs/source/index.rst index 4f2244cd612..3386ad515cd 100644 --- a/docs/source/index.rst +++ b/docs/source/index.rst @@ -75,7 +75,7 @@ use and development of PySyft! syft.client syft.external - syft.node + syft.server syft.serde syft.service syft.store diff --git a/notebooks/admin/Custom API + Custom Worker.ipynb b/notebooks/admin/Custom API + Custom Worker.ipynb deleted file mode 100644 index d50c1f1b4f2..00000000000 --- a/notebooks/admin/Custom API + Custom Worker.ipynb +++ /dev/null @@ -1,523 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "id": "0", - "metadata": {}, - "source": [ - "## Custom API + Custom Worker" - ] - }, - { - "cell_type": "markdown", - "id": "1", - "metadata": {}, - "source": [ - "#### Import dependencies" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "2", - "metadata": {}, - "outputs": [], - "source": [ - "# syft absolute\n", - "import syft as sy\n", - "from syft.service.settings.settings import NodeSettingsUpdate\n", - "from syft.service.worker.worker_image import SyftWorkerImage" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "3", - "metadata": {}, - "outputs": [], - "source": [ - "## remote mode\n", - "# os.environ[\"ORCHESTRA_DEPLOYMENT_TYPE\"] = \"remote\"\n", - "# os.environ[\"DEV_MODE\"] = \"True\"\n", - "domain_client = sy.login(\n", - " email=\"info@openmined.org\",\n", - " url=\"http://127.0.0.1\",\n", - " password=\"\",\n", - ")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "4", - "metadata": {}, - "outputs": [], - "source": [ - "# # python mode\n", - "# # !uv pip install google-cloud-bigquery db_dtypes\n", - "# node = sy.orchestra.launch(name=\"test-domain-1\", port=\"auto\", dev_mode=True, reset=True)\n", - "# domain_client = node.login(email=\"info@openmined.org\", password=\"changethis\")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "5", - "metadata": {}, - "outputs": [], - "source": [ - "domain_client.worker_pools" - ] - }, - { - "cell_type": "markdown", - "id": "6", - "metadata": {}, - "source": [ - "## Register a custom Image" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "7", - "metadata": {}, - "outputs": [], - "source": [ - "registry = \"us-central1-docker.pkg.dev/reddit-testing-415005/syft-registry-us\"\n", - "backend_version = None\n", - "assert backend_version is not None\n", - "\n", - "custom_dockerfile_str = f\"\"\"\n", - "FROM {registry}/openmined/grid-backend:{backend_version}\n", - "\n", - "RUN uv pip install google-cloud-bigquery[all]==3.20.1 db-dtypes==1.2.0\n", - "\n", - "\"\"\".strip()\n", - "print(custom_dockerfile_str)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "8", - "metadata": {}, - "outputs": [], - "source": [ - "docker_config = sy.DockerWorkerConfig(dockerfile=custom_dockerfile_str)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "9", - "metadata": {}, - "outputs": [], - "source": [ - "submit_result = domain_client.api.services.worker_image.submit(\n", - " worker_config=docker_config\n", - ")\n", - "submit_result" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "10", - "metadata": {}, - "outputs": [], - "source": [ - "dockerfile_list = domain_client.images.get_all()\n", - "dockerfile_list" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "11", - "metadata": {}, - "outputs": [], - "source": [ - "workerimage = next(\n", - " (\n", - " image\n", - " for image in dockerfile_list\n", - " if not image.is_prebuilt and image.config.dockerfile == custom_dockerfile_str\n", - " ),\n", - " None,\n", - ")\n", - "\n", - "assert isinstance(workerimage, SyftWorkerImage), str(workerimage)\n", - "workerimage" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "12", - "metadata": {}, - "outputs": [], - "source": [ - "external_registry = registry\n", - "worker_docker_tag = \"openmined/bigquery:0.0.1\"" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "13", - "metadata": {}, - "outputs": [], - "source": [ - "registry_add_result = domain_client.api.services.image_registry.add(external_registry)\n", - "registry_add_result" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "14", - "metadata": {}, - "outputs": [], - "source": [ - "registries = domain_client.api.services.image_registry.get_all()\n", - "registry_uid = next((r.id for r in registries if r.url == external_registry), None)\n", - "registry_uid" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "15", - "metadata": {}, - "outputs": [], - "source": [ - "# build with registry_uid\n", - "docker_build_result = domain_client.api.services.worker_image.build(\n", - " image_uid=workerimage.id,\n", - " tag=worker_docker_tag,\n", - " registry_uid=registry_uid,\n", - ")\n", - "print(docker_build_result)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "16", - "metadata": {}, - "outputs": [], - "source": [ - "image_list = domain_client.images.get_all()\n", - "# we can also index with string using the repo_with_tag format\n", - "workerimage = next((image for image in image_list if image.id == workerimage.id), None)\n", - "assert workerimage.is_built is True\n", - "workerimage" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "17", - "metadata": {}, - "outputs": [], - "source": [ - "push_result = domain_client.api.services.worker_image.push(workerimage.id)\n", - "push_result" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "18", - "metadata": {}, - "outputs": [], - "source": [ - "worker_pool_name = \"bigquery-pool\"\n", - "domain_client.api.services.worker_pool.launch(\n", - " pool_name=worker_pool_name,\n", - " image_uid=workerimage.id,\n", - " num_workers=1,\n", - ")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "19", - "metadata": {}, - "outputs": [], - "source": [ - "domain_client.worker_pools[1]" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "20", - "metadata": {}, - "outputs": [], - "source": [ - "new_default_worker_pool = NodeSettingsUpdate(default_worker_pool=worker_pool_name)\n", - "domain_client.settings.update(settings=new_default_worker_pool)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "21", - "metadata": {}, - "outputs": [], - "source": [ - "SERVICE_ACCOUNT = {}" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "22", - "metadata": {}, - "outputs": [], - "source": [ - "# debug manually\n", - "# from google.oauth2 import service_account\n", - "# from google.cloud import bigquery\n", - "# credentials = service_account.Credentials.from_service_account_info(SERVICE_ACCOUNT)\n", - "# scoped_credentials = credentials.with_scopes(['https://www.googleapis.com/auth/bigquery'])\n", - "# scoped_credentials = credentials.with_scopes(['https://www.googleapis.com/auth/cloud-platform'])\n", - "\n", - "# client = bigquery.Client(\n", - "# credentials=scoped_credentials,\n", - "# location=\"us-west1\",\n", - "# )\n", - "# sql=\"SELECT * FROM reddit-testing-415005.test_1gb.accounts limit 10\"\n", - "# rows = client.query_and_wait(\n", - "# sql\n", - "# )\n", - "# g = sy.ActionObject.from_obj(rows)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "23", - "metadata": {}, - "outputs": [], - "source": [ - "@sy.api_endpoint_method(\n", - " settings={\"credentials\": SERVICE_ACCOUNT, \"project_id\": \"reddit-testing-415005\"}\n", - ")\n", - "def public_function(context, sql: str) -> str:\n", - " # third party\n", - " from google.cloud import bigquery\n", - " from google.oauth2 import service_account\n", - "\n", - " credentials = service_account.Credentials.from_service_account_info(\n", - " context.settings[\"credentials\"]\n", - " )\n", - " scoped_credentials = credentials.with_scopes(\n", - " [\"https://www.googleapis.com/auth/bigquery\"]\n", - " )\n", - " scoped_credentials = credentials.with_scopes(\n", - " [\"https://www.googleapis.com/auth/cloud-platform\"]\n", - " )\n", - "\n", - " client = bigquery.Client(\n", - " credentials=scoped_credentials,\n", - " location=\"us-west1\",\n", - " )\n", - "\n", - " rows = client.query_and_wait(\n", - " sql,\n", - " project=context.settings[\"project_id\"],\n", - " )\n", - "\n", - " return rows\n", - "\n", - "\n", - "@sy.api_endpoint_method(\n", - " settings={\"credentials\": SERVICE_ACCOUNT, \"project_id\": \"reddit-testing-415005\"}\n", - ")\n", - "def private_function(context, sql: str) -> str:\n", - " # third party\n", - " from google.cloud import bigquery\n", - " from google.oauth2 import service_account\n", - "\n", - " credentials = service_account.Credentials.from_service_account_info(\n", - " context.settings[\"credentials\"]\n", - " )\n", - " scoped_credentials = credentials.with_scopes(\n", - " [\"https://www.googleapis.com/auth/bigquery\"]\n", - " )\n", - " scoped_credentials = credentials.with_scopes(\n", - " [\"https://www.googleapis.com/auth/cloud-platform\"]\n", - " )\n", - "\n", - " client = bigquery.Client(\n", - " credentials=scoped_credentials,\n", - " location=\"us-west1\",\n", - " )\n", - "\n", - " rows = client.query_and_wait(\n", - " sql,\n", - " project=context.settings[\"project_id\"],\n", - " )\n", - "\n", - " return rows" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "24", - "metadata": {}, - "outputs": [], - "source": [ - "new_endpoint = sy.TwinAPIEndpoint(\n", - " path=\"bigquery.query\",\n", - " mock_function=public_function,\n", - " private_function=private_function,\n", - " description=\"Lore ipsulum ...\",\n", - ")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "25", - "metadata": {}, - "outputs": [], - "source": [ - "response = domain_client.api.services.api.delete(endpoint_path=\"bigquery.query\")\n", - "response" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "26", - "metadata": {}, - "outputs": [], - "source": [ - "response = domain_client.api.services.api.add(endpoint=new_endpoint)\n", - "response" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "27", - "metadata": {}, - "outputs": [], - "source": [ - "domain_client.refresh()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "28", - "metadata": {}, - "outputs": [], - "source": [ - "@sy.syft_function_single_use(\n", - " endpoint=domain_client.api.services.bigquery.query,\n", - " worker_pool_name=worker_pool_name,\n", - ")\n", - "def job_function(endpoint):\n", - " result = endpoint(\n", - " sql=\"SELECT * FROM reddit-testing-415005.test_1gb.accounts limit 10\"\n", - " )\n", - " return result" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "29", - "metadata": {}, - "outputs": [], - "source": [ - "new_project = sy.Project(\n", - " name=\"My Cool UN Project\",\n", - " description=\"Hi, I want to calculate the trade volume in million's with my cool code.\",\n", - " members=[domain_client],\n", - ")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "30", - "metadata": {}, - "outputs": [], - "source": [ - "result = new_project.create_code_request(job_function, domain_client)\n", - "domain_client.requests[-1].approve()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "31", - "metadata": {}, - "outputs": [], - "source": [ - "domain_client.settings.get().default_worker_pool" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "32", - "metadata": {}, - "outputs": [], - "source": [ - "job = domain_client.code.job_function(\n", - " endpoint=domain_client.api.services.bigquery.query, blocking=False\n", - ")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "33", - "metadata": {}, - "outputs": [], - "source": [ - "job" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "34", - "metadata": {}, - "outputs": [], - "source": [ - "domain_client.jobs" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.12.2" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/notebooks/api/0.8/00-load-data.ipynb b/notebooks/api/0.8/00-load-data.ipynb index ad98a5ac361..5370bd2e595 100644 --- a/notebooks/api/0.8/00-load-data.ipynb +++ b/notebooks/api/0.8/00-load-data.ipynb @@ -4,7 +4,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "# Loading data into Syft Domain Server as a Data Owner\n", + "# Loading data into Syft Datasite Server as a Data Owner\n", "\n", "Welcome to Syft! This tutorial consists of 4 Jupyter notebooks that covers the basics of Syft which includes\n", "* [Uploading a private dataset as a Data Owner](./00-load-data.ipynb)\n", @@ -55,7 +55,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "### Launch a Syft Domain Server" + "### Launch a Syft Datasite Server" ] }, { @@ -66,8 +66,10 @@ }, "outputs": [], "source": [ - "# Launch a fresh domain server named \"test-domain-1\" in dev mode on the local machine\n", - "node = sy.orchestra.launch(name=\"test-domain-1\", port=\"auto\", dev_mode=True, reset=True)" + "# Launch a fresh datasite server named \"test-datasite-1\" in dev mode on the local machine\n", + "server = sy.orchestra.launch(\n", + " name=\"test-datasite-1\", port=\"auto\", dev_mode=True, reset=True\n", + ")" ] }, { @@ -78,8 +80,17 @@ }, "outputs": [], "source": [ - "# log into the node with default root credentials\n", - "domain_client = node.login(email=\"info@openmined.org\", password=\"changethis\")" + "# log into the server with default root credentials\n", + "datasite_client = server.login(email=\"info@openmined.org\", password=\"changethis\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "datasite_client" ] }, { @@ -91,7 +102,7 @@ "outputs": [], "source": [ "# List the available API\n", - "domain_client.api" + "datasite_client.api" ] }, { @@ -114,7 +125,7 @@ "outputs": [], "source": [ "# Check for existing Data Subjects\n", - "data_subjects = domain_client.data_subject_registry.get_all()" + "data_subjects = datasite_client.data_subject_registry.get_all()" ] }, { @@ -206,7 +217,7 @@ "outputs": [], "source": [ "# Adds the data subject and all its members to the registry\n", - "response = domain_client.data_subject_registry.add_data_subject(country)\n", + "response = datasite_client.data_subject_registry.add_data_subject(country)\n", "response" ] }, @@ -230,7 +241,7 @@ "outputs": [], "source": [ "# Lets look at the data subjects added to the data\n", - "data_subjects = domain_client.data_subject_registry.get_all()\n", + "data_subjects = datasite_client.data_subject_registry.get_all()\n", "data_subjects" ] }, @@ -375,7 +386,7 @@ "dataset.add_contributor(\n", " name=\"Andrew Trask\",\n", " email=\"andrew@openmined.org\",\n", - " note=\"Andrew runs this domain and prepared the dataset metadata.\",\n", + " note=\"Andrew runs this datasite and prepared the dataset metadata.\",\n", ")\n", "\n", "dataset.add_contributor(\n", @@ -439,7 +450,7 @@ "ctf.add_contributor(\n", " name=\"Andrew Trask\",\n", " email=\"andrew@openmined.org\",\n", - " note=\"Andrew runs this domain and prepared the asset.\",\n", + " note=\"Andrew runs this datasite and prepared the asset.\",\n", ")" ] }, @@ -539,7 +550,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "### Upload Syft Dataset to Domain Server" + "### Upload Syft Dataset to Datasite Server" ] }, { @@ -550,7 +561,7 @@ }, "outputs": [], "source": [ - "upload_res = domain_client.upload_dataset(dataset)\n", + "upload_res = datasite_client.upload_dataset(dataset)\n", "upload_res" ] }, @@ -569,8 +580,8 @@ "metadata": {}, "outputs": [], "source": [ - "# We can list all the datasets on the Domain Server by invoking the following\n", - "datasets = domain_client.datasets.get_all()\n", + "# We can list all the datasets on the Datasite Server by invoking the following\n", + "datasets = datasite_client.datasets.get_all()\n", "datasets" ] }, @@ -604,7 +615,7 @@ } }, "source": [ - "### Reading the Syft Dataset from Domain Server\n", + "### Reading the Syft Dataset from Datasite Server\n", "\n", "Following the logical hierarchy of `Dataset`, `Asset`, and its variant, we can read the data as follows" ] @@ -618,7 +629,7 @@ "outputs": [], "source": [ "# Reading the mock dataset\n", - "mock = domain_client.datasets[0].assets[0].mock" + "mock = datasite_client.datasets[0].assets[0].mock" ] }, { @@ -642,7 +653,7 @@ "source": [ "# Reading the real dataset\n", "# NOTE: Private data can be accessed by the Data Owners, but NOT the Data Scientists\n", - "real = domain_client.datasets[0].assets[0].data" + "real = datasite_client.datasets[0].assets[0].data" ] }, { @@ -660,12 +671,12 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "### Create a new Data Scientist account on the Domain Server\n", + "### Create a new Data Scientist account on the Datasite Server\n", "\n", "Signup is disabled by default.\n", - "An Admin/DO can enable it by `domain_client.settings.allow_guest_signup(enable=True)`\n", + "An Admin/DO can enable it by `datasite_client.settings.allow_guest_signup(enable=True)`\n", "\n", - "Refer to notebook [07-domain-register-control-flow](./07-domain-register-control-flow.ipynb) for more information." + "Refer to notebook [07-datasite-register-control-flow](./07-datasite-register-control-flow.ipynb) for more information." ] }, { @@ -674,7 +685,7 @@ "metadata": {}, "outputs": [], "source": [ - "domain_client.register(\n", + "datasite_client.register(\n", " name=\"Jane Doe\",\n", " email=\"jane@caltech.edu\",\n", " password=\"abc123\",\n", @@ -692,9 +703,9 @@ }, "outputs": [], "source": [ - "# Cleanup local domain server\n", - "if node.node_type.value == \"python\":\n", - " node.land()" + "# Cleanup local datasite server\n", + "if server.server_type.value == \"python\":\n", + " server.land()" ] }, { diff --git a/notebooks/api/0.8/01-submit-code.ipynb b/notebooks/api/0.8/01-submit-code.ipynb index 3d360c88b18..74bb381b81d 100644 --- a/notebooks/api/0.8/01-submit-code.ipynb +++ b/notebooks/api/0.8/01-submit-code.ipynb @@ -40,7 +40,7 @@ "\n", "# syft absolute\n", "import syft as sy\n", - "from syft.client.api import NodeIdentity\n", + "from syft.client.api import ServerIdentity\n", "from syft.service.request.request import RequestStatus\n", "\n", "sy.requires(SYFT_VERSION)" @@ -50,7 +50,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "### Launch a Syft Domain Server" + "### Launch a Syft Datasite Server" ] }, { @@ -61,15 +61,15 @@ }, "outputs": [], "source": [ - "# Launch and connect to test-domain-1 server we setup in the previous notebook\n", - "node = sy.orchestra.launch(name=\"test-domain-1\", port=\"auto\", dev_mode=True)" + "# Launch and connect to test-datasite-1 server we setup in the previous notebook\n", + "server = sy.orchestra.launch(name=\"test-datasite-1\", port=\"auto\", dev_mode=True)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "Every `node` exposes a \"guest\" client that allows some basic read operations on the node without creating an account." + "Every `server` exposes a \"guest\" client that allows some basic read operations on the server without creating an account." ] }, { @@ -80,7 +80,7 @@ }, "outputs": [], "source": [ - "guest_domain_client = node.client" + "guest_datasite_client = server.client" ] }, { @@ -92,7 +92,7 @@ "outputs": [], "source": [ "# Print this to see the few commands that are available for the guest client\n", - "guest_domain_client" + "guest_datasite_client" ] }, { @@ -102,14 +102,14 @@ "outputs": [], "source": [ "# This will return the public credentials of the guest client\n", - "guest_credentials = guest_domain_client.credentials" + "guest_credentials = guest_datasite_client.credentials" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "Login into the Domain with Data Scientist credentials that we created in [00-load-data.ipynb](./00-load-data.ipynb) notebook" + "Login into the Datasite with Data Scientist credentials that we created in [00-load-data.ipynb](./00-load-data.ipynb) notebook" ] }, { @@ -120,7 +120,7 @@ }, "outputs": [], "source": [ - "jane_client = guest_domain_client.login(email=\"jane@caltech.edu\", password=\"abc123\")\n", + "jane_client = guest_datasite_client.login(email=\"jane@caltech.edu\", password=\"abc123\")\n", "jane_client" ] }, @@ -139,7 +139,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "### Explore available Syft Datasets in the Domain Node" + "### Explore available Syft Datasets in the Datasite Server" ] }, { @@ -338,7 +338,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "You can validate your code against the mock data, before submitting it to the Domain Server" + "You can validate your code against the mock data, before submitting it to the Datasite Server" ] }, { @@ -385,11 +385,11 @@ "source": [ "# Tests\n", "assert len(sum_trade_value_mil.kwargs) == 1\n", - "node_identity = NodeIdentity.from_api(jane_client.api)\n", - "assert node_identity in sum_trade_value_mil.kwargs\n", - "assert \"trade_data\" in sum_trade_value_mil.kwargs[node_identity]\n", + "server_identity = ServerIdentity.from_api(jane_client.api)\n", + "assert server_identity in sum_trade_value_mil.kwargs\n", + "assert \"trade_data\" in sum_trade_value_mil.kwargs[server_identity]\n", "assert (\n", - " sum_trade_value_mil.input_policy_init_kwargs[node_identity][\"trade_data\"]\n", + " sum_trade_value_mil.input_policy_init_kwargs[server_identity][\"trade_data\"]\n", " == asset.action_id\n", ")" ] @@ -409,7 +409,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "### Submit your code to the Domain Server\n", + "### Submit your code to the Datasite Server\n", "\n", "We start by creating new Syft Project" ] @@ -481,7 +481,7 @@ }, "outputs": [], "source": [ - "# Once we start the project, it will submit the project along with the code request to the Domain Server\n", + "# Once we start the project, it will submit the project along with the code request to the Datasite Server\n", "project = new_project.send()\n", "project" ] @@ -569,10 +569,10 @@ }, "outputs": [], "source": [ - "# Cleanup local domain server\n", + "# Cleanup local datasite server\n", "\n", - "if node.node_type.value == \"python\":\n", - " node.land()" + "if server.server_type.value == \"python\":\n", + " server.land()" ] }, { diff --git a/notebooks/api/0.8/02-review-code-and-approve.ipynb b/notebooks/api/0.8/02-review-code-and-approve.ipynb index cd84910030b..0d1f81dc9ef 100644 --- a/notebooks/api/0.8/02-review-code-and-approve.ipynb +++ b/notebooks/api/0.8/02-review-code-and-approve.ipynb @@ -45,7 +45,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "### Login to Syft Domain Server" + "### Login to Syft Datasite Server" ] }, { @@ -56,8 +56,8 @@ }, "outputs": [], "source": [ - "# Launch and connect to test-domain-1 server we setup in the previous notebook\n", - "node = sy.orchestra.launch(name=\"test-domain-1\", port=\"auto\", dev_mode=True)" + "# Launch and connect to test-datasite-1 server we setup in the previous notebook\n", + "server = sy.orchestra.launch(name=\"test-datasite-1\", port=\"auto\", dev_mode=True)" ] }, { @@ -68,17 +68,17 @@ }, "outputs": [], "source": [ - "# Log into the node with default root credentials\n", - "domain_client = node.login(email=\"info@openmined.org\", password=\"changethis\")" + "# Log into the server with default root credentials\n", + "datasite_client = server.login(email=\"info@openmined.org\", password=\"changethis\")" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "### Selecting Project in the Syft Domain Server\n", + "### Selecting Project in the Syft Datasite Server\n", "\n", - "Let's see all the projects that are created by Data Scientists in this Domain Server" + "Let's see all the projects that are created by Data Scientists in this Datasite Server" ] }, { @@ -89,7 +89,7 @@ }, "outputs": [], "source": [ - "domain_client.projects" + "datasite_client.projects" ] }, { @@ -101,7 +101,7 @@ "outputs": [], "source": [ "# Select the project you want to work with\n", - "project = domain_client.projects[0]\n", + "project = datasite_client.projects[0]\n", "project" ] }, @@ -471,10 +471,10 @@ }, "outputs": [], "source": [ - "# Cleanup local domain server\n", + "# Cleanup local datasite server\n", "\n", - "if node.node_type.value == \"python\":\n", - " node.land()" + "if server.server_type.value == \"python\":\n", + " server.land()" ] }, { diff --git a/notebooks/api/0.8/03-data-scientist-download-result.ipynb b/notebooks/api/0.8/03-data-scientist-download-result.ipynb index 81ce4f783fc..47b6cfe001a 100644 --- a/notebooks/api/0.8/03-data-scientist-download-result.ipynb +++ b/notebooks/api/0.8/03-data-scientist-download-result.ipynb @@ -43,7 +43,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "### Login to Syft Domain Server" + "### Login to Syft Datasite Server" ] }, { @@ -54,7 +54,7 @@ }, "outputs": [], "source": [ - "node = sy.orchestra.launch(name=\"test-domain-1\", dev_mode=True)" + "server = sy.orchestra.launch(name=\"test-datasite-1\", dev_mode=True)" ] }, { @@ -65,7 +65,7 @@ }, "outputs": [], "source": [ - "domain_client = node.login(email=\"jane@caltech.edu\", password=\"abc123\")" + "datasite_client = server.login(email=\"jane@caltech.edu\", password=\"abc123\")" ] }, { @@ -86,7 +86,7 @@ "outputs": [], "source": [ "# Get the canada_trade_flow asset from the Canada Trade dataset\n", - "asset = domain_client.datasets[0].assets[0]\n", + "asset = datasite_client.datasets[0].assets[0]\n", "asset" ] }, @@ -103,7 +103,7 @@ "metadata": {}, "outputs": [], "source": [ - "domain_client.code.sum_trade_value_mil" + "datasite_client.code.sum_trade_value_mil" ] }, { @@ -121,7 +121,7 @@ }, "outputs": [], "source": [ - "result_pointer = domain_client.code.sum_trade_value_mil(trade_data=asset)\n", + "result_pointer = datasite_client.code.sum_trade_value_mil(trade_data=asset)\n", "result_pointer" ] }, @@ -161,7 +161,7 @@ }, "outputs": [], "source": [ - "ops = domain_client.code[-1].output_policy\n", + "ops = datasite_client.code[-1].output_policy\n", "ops" ] }, @@ -171,7 +171,7 @@ "metadata": {}, "outputs": [], "source": [ - "domain_client.code" + "datasite_client.code" ] }, { @@ -201,10 +201,10 @@ }, "outputs": [], "source": [ - "# Cleanup local domain\n", + "# Cleanup local datasite\n", "\n", - "if node.node_type.value == \"python\":\n", - " node.land()" + "if server.server_type.value == \"python\":\n", + " server.land()" ] }, { diff --git a/notebooks/api/0.8/04-pytorch-example.ipynb b/notebooks/api/0.8/04-pytorch-example.ipynb index 5cbc89ecaea..dcfc34acbc2 100644 --- a/notebooks/api/0.8/04-pytorch-example.ipynb +++ b/notebooks/api/0.8/04-pytorch-example.ipynb @@ -43,7 +43,7 @@ }, "outputs": [], "source": [ - "node = sy.orchestra.launch(name=\"test-domain-1\", dev_mode=True, reset=True)" + "server = sy.orchestra.launch(name=\"test-datasite-1\", dev_mode=True, reset=True)" ] }, { @@ -55,7 +55,7 @@ }, "outputs": [], "source": [ - "domain_client = node.login(email=\"info@openmined.org\", password=\"changethis\")" + "datasite_client = server.login(email=\"info@openmined.org\", password=\"changethis\")" ] }, { @@ -128,8 +128,8 @@ }, "outputs": [], "source": [ - "train_domain_obj = train.send(domain_client)\n", - "type(train_domain_obj)" + "train_datasite_obj = train.send(datasite_client)\n", + "type(train_datasite_obj)" ] }, { @@ -139,7 +139,7 @@ "metadata": {}, "outputs": [], "source": [ - "train_domain_obj" + "train_datasite_obj" ] }, { @@ -149,7 +149,7 @@ "metadata": {}, "outputs": [], "source": [ - "assert torch.round(train_domain_obj.syft_action_data.sum()) == 1557" + "assert torch.round(train_datasite_obj.syft_action_data.sum()) == 1557" ] }, { @@ -233,7 +233,7 @@ }, "outputs": [], "source": [ - "weight_domain_obj = w.send(domain_client)" + "weight_datasite_obj = w.send(datasite_client)" ] }, { @@ -246,7 +246,9 @@ "outputs": [], "source": [ "@sy.syft_function(\n", - " input_policy=sy.ExactMatch(weights=weight_domain_obj.id, data=train_domain_obj.id),\n", + " input_policy=sy.ExactMatch(\n", + " weights=weight_datasite_obj.id, data=train_datasite_obj.id\n", + " ),\n", " output_policy=sy.SingleExecutionExactOutput(),\n", ")\n", "def train_mlp(weights, data):\n", @@ -292,7 +294,7 @@ }, "outputs": [], "source": [ - "pointer = train_mlp(weights=weight_domain_obj, data=train_domain_obj)\n", + "pointer = train_mlp(weights=weight_datasite_obj, data=train_datasite_obj)\n", "output = pointer.get()" ] }, @@ -315,7 +317,7 @@ }, "outputs": [], "source": [ - "request = domain_client.code.request_code_execution(train_mlp)\n", + "request = datasite_client.code.request_code_execution(train_mlp)\n", "request" ] }, @@ -340,8 +342,8 @@ }, "outputs": [], "source": [ - "domain_client._api = None\n", - "_ = domain_client.api" + "datasite_client._api = None\n", + "_ = datasite_client.api" ] }, { @@ -353,7 +355,7 @@ }, "outputs": [], "source": [ - "result_ptr = domain_client.code.train_mlp(weights=w.id, data=train.id)" + "result_ptr = datasite_client.code.train_mlp(weights=w.id, data=train.id)" ] }, { @@ -387,8 +389,8 @@ }, "outputs": [], "source": [ - "if node.node_type.value == \"python\":\n", - " node.land()" + "if server.server_type.value == \"python\":\n", + " server.land()" ] }, { diff --git a/notebooks/api/0.8/05-custom-policy.ipynb b/notebooks/api/0.8/05-custom-policy.ipynb index fee03727122..69b25ed410b 100644 --- a/notebooks/api/0.8/05-custom-policy.ipynb +++ b/notebooks/api/0.8/05-custom-policy.ipynb @@ -41,7 +41,9 @@ }, "outputs": [], "source": [ - "node = sy.orchestra.launch(name=\"test-domain-1\", port=\"auto\", dev_mode=True, reset=True)" + "server = sy.orchestra.launch(\n", + " name=\"test-datasite-1\", port=\"auto\", dev_mode=True, reset=True\n", + ")" ] }, { @@ -53,7 +55,7 @@ }, "outputs": [], "source": [ - "domain_client = node.login(email=\"info@openmined.org\", password=\"changethis\")" + "datasite_client = server.login(email=\"info@openmined.org\", password=\"changethis\")" ] }, { @@ -63,7 +65,7 @@ "metadata": {}, "outputs": [], "source": [ - "domain_client.register(\n", + "datasite_client.register(\n", " email=\"newuser@openmined.org\", name=\"John Doe\", password=\"pw\", password_verify=\"pw\"\n", ")" ] @@ -75,7 +77,7 @@ "metadata": {}, "outputs": [], "source": [ - "client_low_ds = node.login(email=\"newuser@openmined.org\", password=\"pw\")" + "client_low_ds = server.login(email=\"newuser@openmined.org\", password=\"pw\")" ] }, { @@ -231,7 +233,7 @@ "metadata": {}, "outputs": [], "source": [ - "x_pointer = x_pointer.send(domain_client)" + "x_pointer = x_pointer.send(datasite_client)" ] }, { @@ -247,7 +249,7 @@ "\n", "# syft absolute\n", "from syft.client.api import AuthedServiceContext\n", - "from syft.client.api import NodeIdentity\n", + "from syft.client.api import ServerIdentity\n", "\n", "\n", "class CustomExactMatch(sy.CustomInputPolicy):\n", @@ -272,21 +274,21 @@ "\n", " def retrieve_from_db(self, code_item_id, allowed_inputs, context):\n", " # syft absolute\n", - " from syft import NodeType\n", + " from syft import ServerType\n", " from syft.service.action.action_object import TwinMode\n", "\n", - " action_service = context.node.get_service(\"actionservice\")\n", + " action_service = context.server.get_service(\"actionservice\")\n", " code_inputs = {}\n", "\n", - " # When we are retrieving the code from the database, we need to use the node's\n", + " # When we are retrieving the code from the database, we need to use the server's\n", " # verify key as the credentials. This is because when we approve the code, we\n", " # we allow the private data to be used only for this specific code.\n", " # but we are not modifying the permissions of the private data\n", "\n", " root_context = AuthedServiceContext(\n", - " node=context.node, credentials=context.node.verify_key\n", + " server=context.server, credentials=context.server.verify_key\n", " )\n", - " if context.node.node_type == NodeType.DOMAIN:\n", + " if context.server.server_type == ServerType.DATASITE:\n", " for var_name, arg_id in allowed_inputs.items():\n", " kwarg_value = action_service._get(\n", " context=root_context,\n", @@ -299,7 +301,7 @@ " code_inputs[var_name] = kwarg_value.ok()\n", " else:\n", " raise Exception(\n", - " f\"Invalid Node Type for Code Submission:{context.node.node_type}\"\n", + " f\"Invalid Server Type for Code Submission:{context.server.server_type}\"\n", " )\n", " return Ok(code_inputs)\n", "\n", @@ -310,19 +312,19 @@ " context,\n", " ):\n", " # syft absolute\n", - " from syft import NodeType\n", + " from syft import ServerType\n", " from syft import UID\n", "\n", - " if context.node.node_type == NodeType.DOMAIN:\n", - " node_identity = NodeIdentity(\n", - " node_name=context.node.name,\n", - " node_id=context.node.id,\n", - " verify_key=context.node.signing_key.verify_key,\n", + " if context.server.server_type == ServerType.DATASITE:\n", + " server_identity = ServerIdentity(\n", + " server_name=context.server.name,\n", + " server_id=context.server.id,\n", + " verify_key=context.server.signing_key.verify_key,\n", " )\n", - " allowed_inputs = allowed_inputs.get(node_identity, {})\n", + " allowed_inputs = allowed_inputs.get(server_identity, {})\n", " else:\n", " raise Exception(\n", - " f\"Invalid Node Type for Code Submission:{context.node.node_type}\"\n", + " f\"Invalid Server Type for Code Submission:{context.server.server_type}\"\n", " )\n", " filtered_kwargs = {}\n", " for key in allowed_inputs.keys():\n", @@ -379,20 +381,20 @@ " context,\n", "):\n", " # syft absolute\n", - " from syft import NodeType\n", + " from syft import ServerType\n", " from syft import UID\n", - " from syft.client.api import NodeIdentity\n", + " from syft.client.api import ServerIdentity\n", "\n", - " if context.node.node_type == NodeType.DOMAIN:\n", - " node_identity = NodeIdentity(\n", - " node_name=context.node.name,\n", - " node_id=context.node.id,\n", - " verify_key=context.node.signing_key.verify_key,\n", + " if context.server.server_type == ServerType.DATASITE:\n", + " server_identity = ServerIdentity(\n", + " server_name=context.server.name,\n", + " server_id=context.server.id,\n", + " verify_key=context.server.signing_key.verify_key,\n", " )\n", - " allowed_inputs = allowed_inputs.get(node_identity, {})\n", + " allowed_inputs = allowed_inputs.get(server_identity, {})\n", " else:\n", " raise Exception(\n", - " f\"Invalid Node Type for Code Submission:{context.node.node_type}\"\n", + " f\"Invalid Server Type for Code Submission:{context.server.server_type}\"\n", " )\n", " filtered_kwargs = {}\n", " for key in allowed_inputs.keys():\n", @@ -469,7 +471,7 @@ "metadata": {}, "outputs": [], "source": [ - "for request in domain_client.requests:\n", + "for request in datasite_client.requests:\n", " if request.id == request_id:\n", " break" ] @@ -589,7 +591,7 @@ }, "outputs": [], "source": [ - "for code in domain_client.code.get_all():\n", + "for code in datasite_client.code.get_all():\n", " if code.service_func_name == \"func\":\n", " break\n", "print(code.output_policy.state)\n", @@ -605,8 +607,8 @@ }, "outputs": [], "source": [ - "if node.node_type.value == \"python\":\n", - " node.land()" + "if server.server_type.value == \"python\":\n", + " server.land()" ] } ], diff --git a/notebooks/api/0.8/06-multiple-code-requests.ipynb b/notebooks/api/0.8/06-multiple-code-requests.ipynb index a52b5c6b38f..85f4fe0b88d 100644 --- a/notebooks/api/0.8/06-multiple-code-requests.ipynb +++ b/notebooks/api/0.8/06-multiple-code-requests.ipynb @@ -41,7 +41,9 @@ }, "outputs": [], "source": [ - "node = sy.orchestra.launch(name=\"test-domain-1\", port=\"auto\", reset=True, dev_mode=True)" + "server = sy.orchestra.launch(\n", + " name=\"test-datasite-1\", port=\"auto\", reset=True, dev_mode=True\n", + ")" ] }, { @@ -53,7 +55,7 @@ }, "outputs": [], "source": [ - "root_client = node.login(email=\"info@openmined.org\", password=\"changethis\")" + "root_client = server.login(email=\"info@openmined.org\", password=\"changethis\")" ] }, { @@ -142,7 +144,7 @@ }, "outputs": [], "source": [ - "ds_client = node.login(email=\"sheldon@caltech.edu\", password=\"abc123\")" + "ds_client = server.login(email=\"sheldon@caltech.edu\", password=\"abc123\")" ] }, { @@ -358,7 +360,7 @@ "metadata": {}, "outputs": [], "source": [ - "# The Domain Owner retrieves by name or uid for approval\n", + "# The Datasite Owner retrieves by name or uid for approval\n", "root_client_project = root_client.projects.get_by_uid(project.id)\n", "assert isinstance(root_client_project, sy.service.project.project.Project)" ] @@ -549,8 +551,8 @@ }, "outputs": [], "source": [ - "if node.deployment_type.value in [\"python\", \"single_container\"]:\n", - " node.land()" + "if server.deployment_type.value in [\"python\", \"single_container\"]:\n", + " server.land()" ] }, { diff --git a/notebooks/api/0.8/07-domain-register-control-flow.ipynb b/notebooks/api/0.8/07-datasite-register-control-flow.ipynb similarity index 86% rename from notebooks/api/0.8/07-domain-register-control-flow.ipynb rename to notebooks/api/0.8/07-datasite-register-control-flow.ipynb index 5bd493a47c9..3f19b01311a 100644 --- a/notebooks/api/0.8/07-domain-register-control-flow.ipynb +++ b/notebooks/api/0.8/07-datasite-register-control-flow.ipynb @@ -5,9 +5,9 @@ "id": "0", "metadata": {}, "source": [ - "# Registering Users in Syft Domain Server\n", + "# Registering Users in Syft Datasite Server\n", "\n", - "By default users are not allowed to create a new account on the Syft Domain Server. This notebook is a tutorial for Data Owners to enable guest signups on their deployments." + "By default users are not allowed to create a new account on the Syft Datasite Server. This notebook is a tutorial for Data Owners to enable guest signups on their deployments." ] }, { @@ -48,7 +48,7 @@ "id": "4", "metadata": {}, "source": [ - "### Launch a Syft Domain Server" + "### Launch a Syft Datasite Server" ] }, { @@ -58,7 +58,9 @@ "metadata": {}, "outputs": [], "source": [ - "node = sy.orchestra.launch(name=\"test-domain-1\", port=\"auto\", dev_mode=True, reset=True)" + "server = sy.orchestra.launch(\n", + " name=\"test-datasite-1\", port=\"auto\", dev_mode=True, reset=True\n", + ")" ] }, { @@ -68,8 +70,8 @@ "metadata": {}, "outputs": [], "source": [ - "# log into the node with default root credentials\n", - "root_client = node.login(email=\"info@openmined.org\", password=\"changethis\")" + "# log into the server with default root credentials\n", + "root_client = server.login(email=\"info@openmined.org\", password=\"changethis\")" ] }, { @@ -87,7 +89,7 @@ "metadata": {}, "outputs": [], "source": [ - "# The assumed state of this test is a node with signup set to False\n", + "# The assumed state of this test is a server with signup set to False\n", "# however if the tox task has set it to True you need to overwrite the setting\n", "# before running the tests\n", "# root_client.settings.allow_guest_signup(enable=False)" @@ -118,7 +120,7 @@ "outputs": [], "source": [ "# Register a new user as a GUEST\n", - "response_2 = node.register(\n", + "response_2 = server.register(\n", " email=\"batman@gotham.com\",\n", " password=\"1rIzHAx6uQaP\",\n", " password_verify=\"1rIzHAx6uQaP\",\n", @@ -135,7 +137,7 @@ "outputs": [], "source": [ "# Register a new user as a GUEST\n", - "response_3 = node.register(\n", + "response_3 = server.register(\n", " email=\"robin@gotham.com\",\n", " password=\"5v1ei4OM2N4m\",\n", " password_verify=\"5v1ei4OM2N4m\",\n", @@ -172,7 +174,7 @@ "metadata": {}, "outputs": [], "source": [ - "# Get the current settings of the node\n", + "# Get the current settings of the server\n", "root_client.settings.get()" ] }, @@ -195,7 +197,7 @@ "outputs": [], "source": [ "# Refresh the root client to fetch the updated settings\n", - "root_client = node.login(email=\"info@openmined.org\", password=\"changethis\")" + "root_client = server.login(email=\"info@openmined.org\", password=\"changethis\")" ] }, { @@ -206,7 +208,7 @@ "outputs": [], "source": [ "# Register a new user as a GUEST\n", - "response_2 = node.register(\n", + "response_2 = server.register(\n", " email=\"batman@gotham.com\",\n", " password=\"1rIzHAx6uQaP\",\n", " password_verify=\"1rIzHAx6uQaP\",\n", @@ -223,7 +225,7 @@ "outputs": [], "source": [ "# Register a new user as a GUEST\n", - "response_3 = node.register(\n", + "response_3 = server.register(\n", " email=\"robin@gotham.com\",\n", " password=\"5v1ei4OM2N4m\",\n", " password_verify=\"5v1ei4OM2N4m\",\n", @@ -272,7 +274,7 @@ "outputs": [], "source": [ "# Refresh the root client to fetch the updated settings\n", - "root_client = node.login(email=\"info@openmined.org\", password=\"changethis\")" + "root_client = server.login(email=\"info@openmined.org\", password=\"changethis\")" ] }, { @@ -283,7 +285,7 @@ "outputs": [], "source": [ "# Register a new user as a GUEST\n", - "response_2 = node.register(\n", + "response_2 = server.register(\n", " email=\"bane@gotham.com\",\n", " password=\"SKY5cC2zQPRP\",\n", " password_verify=\"SKY5cC2zQPRP\",\n", @@ -300,7 +302,7 @@ "outputs": [], "source": [ "# Register a new user as a GUEST\n", - "response_3 = node.register(\n", + "response_3 = server.register(\n", " email=\"riddler@gotham.com\",\n", " password=\"7eVGUuNDyH8P\",\n", " password_verify=\"7eVGUuNDyH8P\",\n", @@ -329,10 +331,10 @@ "metadata": {}, "outputs": [], "source": [ - "# Cleanup local domain server\n", + "# Cleanup local datasite server\n", "\n", - "if node.node_type.value == \"python\":\n", - " node.land()" + "if server.server_type.value == \"python\":\n", + " server.land()" ] }, { diff --git a/notebooks/api/0.8/08-code-version.ipynb b/notebooks/api/0.8/08-code-version.ipynb index 4168770ca88..b67ffb54f0d 100644 --- a/notebooks/api/0.8/08-code-version.ipynb +++ b/notebooks/api/0.8/08-code-version.ipynb @@ -41,7 +41,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "### Launch a Syft Domain Server" + "### Launch a Syft Datasite Server" ] }, { @@ -50,7 +50,7 @@ "metadata": {}, "outputs": [], "source": [ - "node = sy.orchestra.launch(name=\"test-domain-1\", port=\"auto\", dev_mode=True)" + "server = sy.orchestra.launch(name=\"test-datasite-1\", port=\"auto\", dev_mode=True)" ] }, { @@ -59,15 +59,15 @@ "metadata": {}, "outputs": [], "source": [ - "# log into the node with default root credentials\n", - "domain_client = node.login(email=\"info@openmined.org\", password=\"changethis\")" + "# log into the server with default root credentials\n", + "datasite_client = server.login(email=\"info@openmined.org\", password=\"changethis\")" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "### Create a new Data Scientist account on the Domain Server\n" + "### Create a new Data Scientist account on the Datasite Server\n" ] }, { @@ -76,7 +76,7 @@ "metadata": {}, "outputs": [], "source": [ - "domain_client.register(\n", + "datasite_client.register(\n", " name=\"Jane Doe\",\n", " email=\"jane@caltech.edu\",\n", " password=\"abc123\",\n", @@ -92,7 +92,7 @@ "metadata": {}, "outputs": [], "source": [ - "jane_client = node.login(email=\"jane@caltech.edu\", password=\"abc123\")" + "jane_client = server.login(email=\"jane@caltech.edu\", password=\"abc123\")" ] }, { @@ -214,7 +214,7 @@ "metadata": {}, "outputs": [], "source": [ - "admin_client = node.login(email=\"info@openmined.org\", password=\"changethis\")" + "admin_client = server.login(email=\"info@openmined.org\", password=\"changethis\")" ] }, { @@ -268,9 +268,9 @@ "metadata": {}, "outputs": [], "source": [ - "# # Cleanup local domain server\n", - "if node.node_type.value == \"python\":\n", - " node.land()" + "# # Cleanup local datasite server\n", + "if server.server_type.value == \"python\":\n", + " server.land()" ] }, { diff --git a/notebooks/api/0.8/09-blob-storage.ipynb b/notebooks/api/0.8/09-blob-storage.ipynb index 93491499896..57c1837effd 100644 --- a/notebooks/api/0.8/09-blob-storage.ipynb +++ b/notebooks/api/0.8/09-blob-storage.ipynb @@ -33,8 +33,8 @@ "metadata": {}, "outputs": [], "source": [ - "node = sy.orchestra.launch(\n", - " name=\"test-domain-1\",\n", + "server = sy.orchestra.launch(\n", + " name=\"test-datasite-1\",\n", " dev_mode=True,\n", " reset=True,\n", " create_producer=True,\n", @@ -47,7 +47,7 @@ "metadata": {}, "outputs": [], "source": [ - "domain_client = node.login(email=\"info@openmined.org\", password=\"changethis\")" + "datasite_client = server.login(email=\"info@openmined.org\", password=\"changethis\")" ] }, { @@ -89,7 +89,7 @@ "metadata": {}, "outputs": [], "source": [ - "data_ptr = domain_client.upload_files([a_file, b_file])" + "data_ptr = datasite_client.upload_files([a_file, b_file])" ] }, { @@ -127,7 +127,7 @@ "metadata": {}, "outputs": [], "source": [ - "lines_file_pptr = domain_client.upload_files(x_file)[0].syft_action_data" + "lines_file_pptr = datasite_client.upload_files(x_file)[0].syft_action_data" ] }, { @@ -169,7 +169,7 @@ "metadata": {}, "outputs": [], "source": [ - "domain_client.upload_dataset(ds)" + "datasite_client.upload_dataset(ds)" ] }, { @@ -219,7 +219,7 @@ "metadata": {}, "outputs": [], "source": [ - "single_data_ptr = domain_client.upload_files(a_file)\n", + "single_data_ptr = datasite_client.upload_files(a_file)\n", "single_data_ptr" ] }, @@ -237,14 +237,14 @@ "outputs": [], "source": [ "if False:\n", - " domain_client.upload_files(\"./path/to/folder\")" + " datasite_client.upload_files(\"./path/to/folder\")" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "#### Cleanup local domain server" + "#### Cleanup local datasite server" ] }, { @@ -253,8 +253,8 @@ "metadata": {}, "outputs": [], "source": [ - "if node.node_type.value == \"python\":\n", - " node.land()" + "if server.server_type.value == \"python\":\n", + " server.land()" ] }, { diff --git a/notebooks/api/0.8/10-container-images.ipynb b/notebooks/api/0.8/10-container-images.ipynb index 6ce97e012e1..ab3f6d0338b 100644 --- a/notebooks/api/0.8/10-container-images.ipynb +++ b/notebooks/api/0.8/10-container-images.ipynb @@ -101,8 +101,8 @@ "metadata": {}, "outputs": [], "source": [ - "domain = sy.orchestra.launch(\n", - " name=\"test-domain-1\",\n", + "datasite = sy.orchestra.launch(\n", + " name=\"test-datasite-1\",\n", " dev_mode=True,\n", " create_producer=True,\n", " reset=True,\n", @@ -117,7 +117,7 @@ "metadata": {}, "outputs": [], "source": [ - "domain_client = domain.login(email=\"info@openmined.org\", password=\"changethis\")" + "datasite_client = datasite.login(email=\"info@openmined.org\", password=\"changethis\")" ] }, { @@ -135,7 +135,7 @@ "metadata": {}, "outputs": [], "source": [ - "domain_client.worker_pools" + "datasite_client.worker_pools" ] }, { @@ -150,7 +150,8 @@ " if (bool(os.environ[\"DEV_MODE\"]) and running_as_container)\n", " else sy.__version__\n", ")\n", - "syft_base_worker_tag" + "# TODO: Remove this once the stable syft version is released\n", + "syft_base_worker_tag = \"0.8.7-beta.15\"" ] }, { @@ -169,7 +170,7 @@ "outputs": [], "source": [ "opendp_dockerfile_str = f\"\"\"\n", - "FROM openmined/grid-backend:{syft_base_worker_tag}\n", + "FROM openmined/syft-backend:{syft_base_worker_tag}\n", "\n", "RUN uv pip install opendp\n", "\n", @@ -227,7 +228,7 @@ "metadata": {}, "outputs": [], "source": [ - "submit_result = domain_client.api.services.worker_image.submit(\n", + "submit_result = datasite_client.api.services.worker_image.submit(\n", " worker_config=docker_config\n", ")" ] @@ -259,7 +260,7 @@ "metadata": {}, "outputs": [], "source": [ - "dockerfile_list = domain_client.images.get_all()\n", + "dockerfile_list = datasite_client.images.get_all()\n", "dockerfile_list" ] }, @@ -270,7 +271,7 @@ "metadata": {}, "outputs": [], "source": [ - "assert len(domain_client.images.get_all()) == 2" + "assert len(datasite_client.images.get_all()) == 2" ] }, { @@ -314,7 +315,7 @@ "metadata": {}, "outputs": [], "source": [ - "registry_add_result = domain_client.api.services.image_registry.add(\"localhost:5678\")\n", + "registry_add_result = datasite_client.api.services.image_registry.add(\"localhost:5678\")\n", "registry_add_result" ] }, @@ -335,7 +336,7 @@ "metadata": {}, "outputs": [], "source": [ - "images = domain_client.api.services.image_registry.get_all()\n", + "images = datasite_client.api.services.image_registry.get_all()\n", "assert len(images) == 1\n", "images" ] @@ -389,7 +390,7 @@ "source": [ "registry_uid = local_registry.id if running_as_container else local_registry.id\n", "\n", - "docker_build_result = domain_client.api.services.worker_image.build(\n", + "docker_build_result = datasite_client.api.services.worker_image.build(\n", " image_uid=workerimage.id,\n", " tag=docker_tag,\n", " registry_uid=registry_uid,\n", @@ -425,7 +426,7 @@ "metadata": {}, "outputs": [], "source": [ - "image_list = domain_client.images.get_all()\n", + "image_list = datasite_client.images.get_all()\n", "image_list" ] }, @@ -501,7 +502,7 @@ " local_registry_container.start()\n", " sleep(5)\n", "\n", - " push_result = domain_client.api.services.worker_image.push(workerimage.id)\n", + " push_result = datasite_client.api.services.worker_image.push(workerimage.id)\n", " assert isinstance(push_result, sy.SyftSuccess), str(push_result)" ] }, @@ -599,7 +600,7 @@ "outputs": [], "source": [ "worker_pool_name = \"opendp-pool\"\n", - "worker_pool_res = domain_client.api.services.worker_pool.launch(\n", + "worker_pool_res = datasite_client.api.services.worker_pool.launch(\n", " pool_name=worker_pool_name,\n", " image_uid=workerimage.id,\n", " num_workers=2,\n", @@ -638,7 +639,7 @@ "metadata": {}, "outputs": [], "source": [ - "worker_pool_list = domain_client.worker_pools\n", + "worker_pool_list = datasite_client.worker_pools\n", "worker_pool_list" ] }, @@ -649,7 +650,7 @@ "metadata": {}, "outputs": [], "source": [ - "assert len(domain_client.worker_pools.get_all()) == 2\n", + "assert len(datasite_client.worker_pools.get_all()) == 2\n", "worker_pool = None\n", "for pool in worker_pool_list:\n", " if pool.name == worker_pool_name:\n", @@ -667,7 +668,7 @@ "outputs": [], "source": [ "# We can filter pools based on the image id upon which the pools were built\n", - "domain_client.api.services.worker_pool.filter_by_image_id(image_uid=workerimage.id)" + "datasite_client.api.services.worker_pool.filter_by_image_id(image_uid=workerimage.id)" ] }, { @@ -706,7 +707,7 @@ "metadata": {}, "outputs": [], "source": [ - "raw_worker_logs = domain_client.api.services.worker.logs(\n", + "raw_worker_logs = datasite_client.api.services.worker.logs(\n", " uid=second_worker.id,\n", " raw=True,\n", ")\n", @@ -730,7 +731,7 @@ "metadata": {}, "outputs": [], "source": [ - "worker_logs = domain_client.api.services.worker.logs(\n", + "worker_logs = datasite_client.api.services.worker.logs(\n", " uid=second_worker.id,\n", ")\n", "worker_logs" @@ -761,7 +762,7 @@ "metadata": {}, "outputs": [], "source": [ - "worker_delete_res = domain_client.api.services.worker.delete(\n", + "worker_delete_res = datasite_client.api.services.worker.delete(\n", " uid=second_worker.id, force=True\n", ")" ] @@ -795,7 +796,7 @@ "source": [ "# Refetch the worker pool\n", "# Ensure that the deleted worker's id is not present\n", - "for pool in domain_client.api.services.worker_pool.get_all():\n", + "for pool in datasite_client.api.services.worker_pool.get_all():\n", " if pool.name == worker_pool_name:\n", " worker_pool = pool\n", "assert len(worker_pool.workers) == 1\n", @@ -831,7 +832,7 @@ "data = np.array([1, 2, 3])\n", "data_action_obj = sy.ActionObject.from_obj(data)\n", "\n", - "data_pointer = data_action_obj.send(domain_client)\n", + "data_pointer = data_action_obj.send(datasite_client)\n", "data_pointer" ] }, @@ -880,7 +881,7 @@ "metadata": {}, "outputs": [], "source": [ - "request = domain_client.code.request_code_execution(custom_worker_func)\n", + "request = datasite_client.code.request_code_execution(custom_worker_func)\n", "request" ] }, @@ -891,7 +892,7 @@ "metadata": {}, "outputs": [], "source": [ - "domain_client.requests[-1].approve(approve_nested=True)" + "datasite_client.requests[-1].approve(approve_nested=True)" ] }, { @@ -901,7 +902,7 @@ "metadata": {}, "outputs": [], "source": [ - "job = domain_client.code.custom_worker_func(x=data_pointer, blocking=False)\n", + "job = datasite_client.code.custom_worker_func(x=data_pointer, blocking=False)\n", "job" ] }, @@ -912,7 +913,7 @@ "metadata": {}, "outputs": [], "source": [ - "worker_pool = domain_client.worker_pools[worker_pool_name]\n", + "worker_pool = datasite_client.worker_pools[worker_pool_name]\n", "worker_pool" ] }, @@ -943,7 +944,7 @@ "metadata": {}, "outputs": [], "source": [ - "job = domain_client.jobs[-1]\n", + "job = datasite_client.jobs[-1]\n", "job" ] }, @@ -988,7 +989,7 @@ "source": [ "# Once the work is done by the worker, its state is returned to idle again.\n", "consuming_worker_is_now_idle = False\n", - "for worker in domain_client.worker_pools[worker_pool_name].workers:\n", + "for worker in datasite_client.worker_pools[worker_pool_name].workers:\n", " if worker.id == job.job_worker_id:\n", " consuming_worker_is_now_idle = worker.consumer_state.value.lower() == \"idle\"\n", "\n", @@ -1025,7 +1026,7 @@ "source": [ "# delete the remaining workers\n", "for worker in worker_pool.workers:\n", - " res = domain_client.api.services.worker.delete(\n", + " res = datasite_client.api.services.worker.delete(\n", " uid=worker.id,\n", " )\n", " assert isinstance(res, sy.SyftSuccess), str(res)" @@ -1038,7 +1039,7 @@ "metadata": {}, "outputs": [], "source": [ - "delete_res = domain_client.api.services.worker_image.remove(workerimage.id)\n", + "delete_res = datasite_client.api.services.worker_image.remove(workerimage.id)\n", "delete_res" ] }, @@ -1080,7 +1081,7 @@ "outputs": [], "source": [ "custom_dockerfile_str_2 = f\"\"\"\n", - "FROM openmined/grid-backend:{syft_base_worker_tag}\n", + "FROM openmined/syft-backend:{syft_base_worker_tag}\n", "\n", "RUN uv pip install opendp\n", "\"\"\".strip()\n", @@ -1095,7 +1096,7 @@ "metadata": {}, "outputs": [], "source": [ - "submit_result = domain_client.api.services.worker_image.submit(\n", + "submit_result = datasite_client.api.services.worker_image.submit(\n", " worker_config=docker_config_2\n", ")\n", "submit_result" @@ -1108,7 +1109,7 @@ "metadata": {}, "outputs": [], "source": [ - "domain_client.images" + "datasite_client.images" ] }, { @@ -1120,7 +1121,7 @@ "source": [ "# get the image that's not built\n", "workerimage_2 = None\n", - "for im in domain_client.images:\n", + "for im in datasite_client.images:\n", " if im.config == docker_config_2:\n", " workerimage_2 = im" ] @@ -1142,7 +1143,7 @@ "source": [ "docker_tag_2 = \"openmined/custom-worker-opendp:latest\"\n", "\n", - "docker_build_result = domain_client.api.services.worker_image.build(\n", + "docker_build_result = datasite_client.api.services.worker_image.build(\n", " image_uid=workerimage_2.id,\n", " tag=docker_tag_2,\n", " pull=pull,\n", @@ -1158,7 +1159,7 @@ "outputs": [], "source": [ "opendp_pool_name = \"second-opendp-pool\"\n", - "pool_create_request = domain_client.api.services.worker_pool.pool_creation_request(\n", + "pool_create_request = datasite_client.api.services.worker_pool.pool_creation_request(\n", " pool_name=opendp_pool_name, num_workers=2, image_uid=workerimage_2.id\n", ")\n", "pool_create_request" @@ -1203,7 +1204,7 @@ "metadata": {}, "outputs": [], "source": [ - "domain_client.worker_pools[opendp_pool_name]" + "datasite_client.worker_pools[opendp_pool_name]" ] }, { @@ -1213,8 +1214,8 @@ "metadata": {}, "outputs": [], "source": [ - "assert domain_client.worker_pools[opendp_pool_name]\n", - "assert len(domain_client.worker_pools[opendp_pool_name].workers) == 2" + "assert datasite_client.worker_pools[opendp_pool_name]\n", + "assert len(datasite_client.worker_pools[opendp_pool_name].workers) == 2" ] }, { @@ -1225,7 +1226,7 @@ "outputs": [], "source": [ "# default, opendp-pool, second-opendp-pool\n", - "assert len(domain_client.worker_pools.get_all()) == 3" + "assert len(datasite_client.worker_pools.get_all()) == 3" ] }, { @@ -1243,11 +1244,11 @@ "metadata": {}, "outputs": [], "source": [ - "for worker in domain_client.worker_pools[\"second-opendp-pool\"].workers:\n", - " res = domain_client.api.services.worker.delete(uid=worker.id, force=True)\n", + "for worker in datasite_client.worker_pools[\"second-opendp-pool\"].workers:\n", + " res = datasite_client.api.services.worker.delete(uid=worker.id, force=True)\n", " assert isinstance(res, sy.SyftSuccess), str(res)\n", "\n", - "assert len(domain_client.worker_pools[\"second-opendp-pool\"].workers) == 0" + "assert len(datasite_client.worker_pools[\"second-opendp-pool\"].workers) == 0" ] }, { @@ -1265,7 +1266,7 @@ "metadata": {}, "outputs": [], "source": [ - "delete_res = domain_client.api.services.worker_image.remove(workerimage_2.id)\n", + "delete_res = datasite_client.api.services.worker_image.remove(workerimage_2.id)\n", "delete_res" ] }, @@ -1296,7 +1297,7 @@ "outputs": [], "source": [ "custom_dockerfile_str_3 = f\"\"\"\n", - "FROM openmined/grid-backend:{syft_base_worker_tag}\n", + "FROM openmined/syft-backend:{syft_base_worker_tag}\n", "\n", "RUN uv pip install recordlinkage\n", "\"\"\".strip()\n", @@ -1315,7 +1316,7 @@ "source": [ "recordlinkage_pool_name = \"recordlinkage-pool\"\n", "pool_image_create_request = (\n", - " domain_client.api.services.worker_pool.create_image_and_pool_request(\n", + " datasite_client.api.services.worker_pool.create_image_and_pool_request(\n", " pool_name=recordlinkage_pool_name,\n", " num_workers=2,\n", " tag=docker_tag_3,\n", @@ -1370,7 +1371,7 @@ "outputs": [], "source": [ "# Get updated request object and status\n", - "for req in domain_client.requests:\n", + "for req in datasite_client.requests:\n", " if req.id == pool_image_create_request.id:\n", " pool_image_create_request = req\n", "\n", @@ -1387,7 +1388,7 @@ "image_exists = False\n", "recordlinkage_image = None\n", "\n", - "for im in domain_client.images.get_all():\n", + "for im in datasite_client.images.get_all():\n", " if im.image_identifier and im.image_identifier.repo_with_tag == docker_tag_3:\n", " image_exists = True\n", " recordlinkage_image = im\n", @@ -1403,7 +1404,7 @@ "metadata": {}, "outputs": [], "source": [ - "recordlinkage_pool = domain_client.worker_pools[recordlinkage_pool_name]\n", + "recordlinkage_pool = datasite_client.worker_pools[recordlinkage_pool_name]\n", "\n", "assert recordlinkage_pool\n", "assert len(recordlinkage_pool.workers) == 2" @@ -1425,7 +1426,7 @@ "outputs": [], "source": [ "for worker in recordlinkage_pool.workers:\n", - " res = domain_client.api.services.worker.delete(uid=worker.id, force=True)\n", + " res = datasite_client.api.services.worker.delete(uid=worker.id, force=True)\n", " assert isinstance(res, sy.SyftSuccess), str(res)" ] }, @@ -1444,7 +1445,7 @@ "metadata": {}, "outputs": [], "source": [ - "delete_res = domain_client.api.services.worker_image.remove(recordlinkage_image.id)\n", + "delete_res = datasite_client.api.services.worker_image.remove(recordlinkage_image.id)\n", "delete_res" ] }, @@ -1455,7 +1456,7 @@ "metadata": {}, "outputs": [], "source": [ - "domain.land()" + "datasite.land()" ] }, { diff --git a/notebooks/api/0.8/11-container-images-k8s.ipynb b/notebooks/api/0.8/11-container-images-k8s.ipynb index 5b78acff06e..68620d9c114 100644 --- a/notebooks/api/0.8/11-container-images-k8s.ipynb +++ b/notebooks/api/0.8/11-container-images-k8s.ipynb @@ -82,8 +82,8 @@ "os.environ[\"DEV_MODE\"] = \"True\"\n", "\n", "# Uncomment this to add custom values\n", - "# os.environ[\"NODE_URL\"] = \"http://localhost\"\n", - "# os.environ[\"NODE_PORT\"] = \"8080\"" + "# os.environ[\"SERVER_URL\"] = \"http://localhost\"\n", + "# os.environ[\"SERVER_PORT\"] = \"8080\"" ] }, { @@ -93,8 +93,8 @@ "metadata": {}, "outputs": [], "source": [ - "domain = sy.orchestra.launch(\n", - " name=\"test-domain-1\",\n", + "datasite = sy.orchestra.launch(\n", + " name=\"test-datasite-1\",\n", " dev_mode=True,\n", ")" ] @@ -106,8 +106,8 @@ "metadata": {}, "outputs": [], "source": [ - "domain_client = domain.login(email=\"info@openmined.org\", password=\"changethis\")\n", - "domain_client" + "datasite_client = datasite.login(email=\"info@openmined.org\", password=\"changethis\")\n", + "datasite_client" ] }, { @@ -133,7 +133,7 @@ "metadata": {}, "outputs": [], "source": [ - "domain_client.worker_pools" + "datasite_client.worker_pools" ] }, { @@ -151,7 +151,7 @@ "metadata": {}, "outputs": [], "source": [ - "result = domain_client.api.services.worker_pool.scale(\n", + "result = datasite_client.api.services.worker_pool.scale(\n", " number=3, pool_name=\"default-pool\"\n", ")\n", "assert not isinstance(result, sy.SyftError), str(result)\n", @@ -165,7 +165,7 @@ "metadata": {}, "outputs": [], "source": [ - "result = domain_client.api.services.worker_pool.get_by_name(pool_name=\"default-pool\")\n", + "result = datasite_client.api.services.worker_pool.get_by_name(pool_name=\"default-pool\")\n", "assert len(result.workers) == 3, str(result.to_dict())\n", "result" ] @@ -199,7 +199,7 @@ "metadata": {}, "outputs": [], "source": [ - "default_pool_scale_res = domain_client.api.services.worker_pool.scale(\n", + "default_pool_scale_res = datasite_client.api.services.worker_pool.scale(\n", " number=1, pool_name=\"default-pool\"\n", ")\n", "assert not isinstance(default_pool_scale_res, sy.SyftError), str(default_pool_scale_res)\n", @@ -213,7 +213,7 @@ "metadata": {}, "outputs": [], "source": [ - "result = domain_client.api.services.worker_pool.get_by_name(pool_name=\"default-pool\")\n", + "result = datasite_client.api.services.worker_pool.get_by_name(pool_name=\"default-pool\")\n", "assert len(result.workers) == 1, str(result.to_dict())\n", "result" ] @@ -225,7 +225,7 @@ "metadata": {}, "outputs": [], "source": [ - "default_worker_pool = domain_client.api.services.worker_pool.get_by_name(\n", + "default_worker_pool = datasite_client.api.services.worker_pool.get_by_name(\n", " pool_name=\"default-pool\"\n", ")\n", "default_worker_pool" @@ -250,7 +250,7 @@ "from syft.util.util import get_latest_tag\n", "\n", "registry = os.getenv(\"SYFT_BASE_IMAGE_REGISTRY\", \"docker.io\")\n", - "repo = \"openmined/grid-backend\"\n", + "repo = \"openmined/syft-backend\"\n", "\n", "if \"k3d\" in registry:\n", " tag = get_latest_tag(registry, repo)\n", @@ -300,7 +300,7 @@ "metadata": {}, "outputs": [], "source": [ - "submit_result = domain_client.api.services.worker_image.submit(\n", + "submit_result = datasite_client.api.services.worker_image.submit(\n", " worker_config=docker_config\n", ")\n", "submit_result" @@ -323,7 +323,7 @@ "metadata": {}, "outputs": [], "source": [ - "dockerfile_list = domain_client.images.get_all()\n", + "dockerfile_list = datasite_client.images.get_all()\n", "dockerfile_list" ] }, @@ -389,7 +389,7 @@ "metadata": {}, "outputs": [], "source": [ - "registry_add_result = domain_client.api.services.image_registry.add(external_registry)\n", + "registry_add_result = datasite_client.api.services.image_registry.add(external_registry)\n", "registry_add_result" ] }, @@ -410,7 +410,7 @@ "metadata": {}, "outputs": [], "source": [ - "image_registry_list = domain_client.api.services.image_registry.get_all()\n", + "image_registry_list = datasite_client.api.services.image_registry.get_all()\n", "image_registry_list" ] }, @@ -474,7 +474,7 @@ "docker_tag = \"openmined/custom-worker:0.7.8\"\n", "\n", "\n", - "docker_build_result = domain_client.api.services.worker_image.build(\n", + "docker_build_result = datasite_client.api.services.worker_image.build(\n", " image_uid=workerimage.id,\n", " tag=docker_tag,\n", " registry_uid=registry_uid,\n", @@ -499,7 +499,7 @@ "metadata": {}, "outputs": [], "source": [ - "image_list = domain_client.images.get_all()\n", + "image_list = datasite_client.images.get_all()\n", "image_list" ] }, @@ -545,7 +545,7 @@ "outputs": [], "source": [ "push_result = None\n", - "push_result = domain_client.api.services.worker_image.push(\n", + "push_result = datasite_client.api.services.worker_image.push(\n", " workerimage.id,\n", " username=external_registry_username,\n", " password=external_registry_password,\n", @@ -603,7 +603,7 @@ "worker_pool_name = \"custom-pool\"\n", "custom_pool_pod_annotations = {\"test-custom-pool\": \"Test annotation for custom pool\"}\n", "custom_pool_pod_labels = {\"test-custom-pool\": \"test_label_for_custom_pool\"}\n", - "worker_pool_res = domain_client.api.services.worker_pool.launch(\n", + "worker_pool_res = datasite_client.api.services.worker_pool.launch(\n", " pool_name=worker_pool_name,\n", " image_uid=workerimage.id,\n", " num_workers=3,\n", @@ -643,7 +643,7 @@ "metadata": {}, "outputs": [], "source": [ - "worker_pool_list = domain_client.worker_pools.get_all()\n", + "worker_pool_list = datasite_client.worker_pools.get_all()\n", "worker_pool_list" ] }, @@ -711,7 +711,7 @@ "outputs": [], "source": [ "# We can filter pools based on the image id upon which the pools were built\n", - "filtered_result = domain_client.api.services.worker_pool.filter_by_image_id(\n", + "filtered_result = datasite_client.api.services.worker_pool.filter_by_image_id(\n", " image_uid=workerimage.id\n", ")\n", "filtered_result" @@ -753,7 +753,7 @@ "metadata": {}, "outputs": [], "source": [ - "worker_logs = domain_client.api.services.worker.logs(\n", + "worker_logs = datasite_client.api.services.worker.logs(\n", " uid=second_worker.id,\n", ")\n", "worker_logs" @@ -797,7 +797,7 @@ "data = np.array([1, 2, 3])\n", "data_action_obj = sy.ActionObject.from_obj(data)\n", "\n", - "data_pointer = data_action_obj.send(domain_client)\n", + "data_pointer = data_action_obj.send(datasite_client)\n", "data_pointer" ] }, @@ -848,7 +848,7 @@ "metadata": {}, "outputs": [], "source": [ - "request = domain_client.code.request_code_execution(custom_worker_func)\n", + "request = datasite_client.code.request_code_execution(custom_worker_func)\n", "request" ] }, @@ -859,7 +859,7 @@ "metadata": {}, "outputs": [], "source": [ - "domain_client.requests[-1].approve(approve_nested=True)" + "datasite_client.requests[-1].approve(approve_nested=True)" ] }, { @@ -869,7 +869,7 @@ "metadata": {}, "outputs": [], "source": [ - "job = domain_client.code.custom_worker_func(x=data_pointer, blocking=False)\n", + "job = datasite_client.code.custom_worker_func(x=data_pointer, blocking=False)\n", "job" ] }, @@ -880,7 +880,7 @@ "metadata": {}, "outputs": [], "source": [ - "worker_pool = domain_client.worker_pools[worker_pool_name]\n", + "worker_pool = datasite_client.worker_pools[worker_pool_name]\n", "worker_pool" ] }, @@ -911,7 +911,7 @@ "metadata": {}, "outputs": [], "source": [ - "job_list = domain_client.jobs.get_by_user_code_id(job.user_code_id)" + "job_list = datasite_client.jobs.get_by_user_code_id(job.user_code_id)" ] }, { @@ -956,7 +956,7 @@ "outputs": [], "source": [ "# Scale Down the workers\n", - "custom_pool_scale_res = domain_client.api.services.worker_pool.scale(\n", + "custom_pool_scale_res = datasite_client.api.services.worker_pool.scale(\n", " number=1, pool_name=worker_pool_name\n", ")\n", "assert not isinstance(custom_pool_scale_res, sy.SyftError), str(custom_pool_scale_res)\n", @@ -970,7 +970,7 @@ "metadata": {}, "outputs": [], "source": [ - "assert len(domain_client.worker_pools[worker_pool_name].worker_list) == 1" + "assert len(datasite_client.worker_pools[worker_pool_name].worker_list) == 1" ] }, { @@ -1005,7 +1005,7 @@ "outputs": [], "source": [ "submit_result = None\n", - "submit_result = domain_client.api.services.worker_image.submit(\n", + "submit_result = datasite_client.api.services.worker_image.submit(\n", " worker_config=docker_config_opendp\n", ")\n", "submit_result" @@ -1028,7 +1028,7 @@ "metadata": {}, "outputs": [], "source": [ - "_images = domain_client.images\n", + "_images = datasite_client.images\n", "assert not isinstance(_images, sy.SyftError), str(_images)" ] }, @@ -1063,7 +1063,7 @@ "source": [ "docker_tag_opendp = \"openmined/custom-worker-opendp:latest\"\n", "\n", - "docker_build_result = domain_client.api.services.worker_image.build(\n", + "docker_build_result = datasite_client.api.services.worker_image.build(\n", " image_uid=workerimage_opendp.id,\n", " tag=docker_tag_opendp,\n", " registry_uid=registry_uid,\n", @@ -1089,7 +1089,7 @@ "metadata": {}, "outputs": [], "source": [ - "_images = domain_client.images\n", + "_images = datasite_client.images\n", "assert not isinstance(_images, sy.SyftError), str(_images)" ] }, @@ -1125,7 +1125,7 @@ "source": [ "# Push OpenDP Image to registry\n", "push_result = None\n", - "push_result = domain_client.api.services.worker_image.push(\n", + "push_result = datasite_client.api.services.worker_image.push(\n", " workerimage_opendp.id,\n", " username=external_registry_username,\n", " password=external_registry_password,\n", @@ -1143,7 +1143,7 @@ "pool_name_opendp = \"opendp-pool\"\n", "opendp_pod_annotations = {\"test-opendp-pool\": \"Test annotation for opendp pool\"}\n", "opendp_pod_labels = {\"test-opendp-pool\": \"test_label_for_opendp_pool\"}\n", - "pool_create_request = domain_client.api.services.worker_pool.pool_creation_request(\n", + "pool_create_request = datasite_client.api.services.worker_pool.pool_creation_request(\n", " pool_name=pool_name_opendp,\n", " num_workers=3,\n", " image_uid=workerimage_opendp.id,\n", @@ -1196,7 +1196,7 @@ "metadata": {}, "outputs": [], "source": [ - "pool_opendp = domain_client.worker_pools[pool_name_opendp]\n", + "pool_opendp = datasite_client.worker_pools[pool_name_opendp]\n", "assert not isinstance(pool_opendp, sy.SyftError), str(pool_opendp)\n", "assert len(pool_opendp.worker_list) == 3" ] @@ -1208,7 +1208,7 @@ "metadata": {}, "outputs": [], "source": [ - "worker_pool_list = domain_client.worker_pools.get_all()\n", + "worker_pool_list = datasite_client.worker_pools.get_all()\n", "\n", "assert not isinstance(worker_pool_list, sy.SyftError), str(worker_pool_list)\n", "assert len(worker_pool_list) == 3" @@ -1251,7 +1251,7 @@ "outputs": [], "source": [ "# Scale Down the workers\n", - "opendp_pool_scale_res = domain_client.api.services.worker_pool.scale(\n", + "opendp_pool_scale_res = datasite_client.api.services.worker_pool.scale(\n", " number=1, pool_name=pool_name_opendp\n", ")\n", "assert not isinstance(opendp_pool_scale_res, sy.SyftError), str(opendp_pool_scale_res)\n", @@ -1265,7 +1265,7 @@ "metadata": {}, "outputs": [], "source": [ - "assert len(domain_client.worker_pools[pool_name_opendp].worker_list) == 1" + "assert len(datasite_client.worker_pools[pool_name_opendp].worker_list) == 1" ] }, { @@ -1308,7 +1308,7 @@ "recordlinkage_pod_labels = {\n", " \"test-recordlinkage-pool\": \"test_label_for_recordlinkage_pool\"\n", "}\n", - "pool_image_create_request = domain_client.api.services.worker_pool.create_image_and_pool_request(\n", + "pool_image_create_request = datasite_client.api.services.worker_pool.create_image_and_pool_request(\n", " pool_name=pool_name_recordlinkage,\n", " num_workers=2,\n", " tag=docker_tag_recordlinkage,\n", @@ -1377,7 +1377,7 @@ "metadata": {}, "outputs": [], "source": [ - "_requests = domain_client.requests\n", + "_requests = datasite_client.requests\n", "assert not isinstance(_requests, sy.SyftError), str(_requests)" ] }, @@ -1430,7 +1430,7 @@ "metadata": {}, "outputs": [], "source": [ - "domain_client.images" + "datasite_client.images" ] }, { @@ -1441,7 +1441,7 @@ "outputs": [], "source": [ "image_exists = False\n", - "for im in domain_client.images.get_all():\n", + "for im in datasite_client.images.get_all():\n", " if (\n", " im.image_identifier\n", " and im.image_identifier.repo_with_tag == docker_tag_recordlinkage\n", @@ -1458,8 +1458,8 @@ "metadata": {}, "outputs": [], "source": [ - "assert domain_client.worker_pools[pool_name_recordlinkage]\n", - "assert len(domain_client.worker_pools[pool_name_recordlinkage].worker_list) == 2" + "assert datasite_client.worker_pools[pool_name_recordlinkage]\n", + "assert len(datasite_client.worker_pools[pool_name_recordlinkage].worker_list) == 2" ] }, { @@ -1470,7 +1470,7 @@ "outputs": [], "source": [ "# Scale down the workers\n", - "recordlinkage_pool_scale_res = domain_client.api.services.worker_pool.scale(\n", + "recordlinkage_pool_scale_res = datasite_client.api.services.worker_pool.scale(\n", " number=1, pool_name=pool_name_recordlinkage\n", ")\n", "assert not isinstance(recordlinkage_pool_scale_res, sy.SyftError), str(\n", @@ -1486,7 +1486,7 @@ "metadata": {}, "outputs": [], "source": [ - "assert len(domain_client.worker_pools[pool_name_recordlinkage].worker_list) == 1" + "assert len(datasite_client.worker_pools[pool_name_recordlinkage].worker_list) == 1" ] } ], diff --git a/notebooks/api/0.8/12-custom-api-endpoint.ipynb b/notebooks/api/0.8/12-custom-api-endpoint.ipynb index 20c269ea286..478446bcd17 100644 --- a/notebooks/api/0.8/12-custom-api-endpoint.ipynb +++ b/notebooks/api/0.8/12-custom-api-endpoint.ipynb @@ -11,7 +11,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "### Initialize the Node" + "### Initialize the Server" ] }, { @@ -28,8 +28,8 @@ "from syft import SyftError\n", "from syft import SyftSuccess\n", "\n", - "node = sy.orchestra.launch(\n", - " name=\"test-domain-1\",\n", + "server = sy.orchestra.launch(\n", + " name=\"test-datasite-1\",\n", " dev_mode=True,\n", " create_producer=True,\n", " n_consumers=3,\n", @@ -37,14 +37,14 @@ " port=8081,\n", ")\n", "\n", - "domain_client = node.login(email=\"info@openmined.org\", password=\"changethis\")\n", - "domain_client.register(\n", + "datasite_client = server.login(email=\"info@openmined.org\", password=\"changethis\")\n", + "datasite_client.register(\n", " email=\"user@openmined.org\",\n", " password=\"verysecurepassword\",\n", " password_verify=\"verysecurepassword\",\n", " name=\"New User\",\n", ")\n", - "domain_guest = node.login(email=\"user@openmined.org\", password=\"verysecurepassword\")" + "datasite_guest = server.login(email=\"user@openmined.org\", password=\"verysecurepassword\")" ] }, { @@ -53,7 +53,7 @@ "source": [ "### Create a public custom API Endpoint by using the decorator\n", "\n", - "This allows node admin to create a new public endpoint by using only the decorator." + "This allows server admin to create a new public endpoint by using only the decorator." ] }, { @@ -73,8 +73,8 @@ " return context.settings[\"key\"] == \"value\"\n", "\n", "\n", - "# Add it to the node.\n", - "response = domain_client.api.services.api.add(endpoint=public_endpoint_method)\n", + "# Add it to the server.\n", + "response = datasite_client.api.services.api.add(endpoint=public_endpoint_method)\n", "response" ] }, @@ -93,7 +93,7 @@ "metadata": {}, "outputs": [], "source": [ - "domain_client.api.services.api.api_endpoints()" + "datasite_client.api.services.api.api_endpoints()" ] }, { @@ -102,7 +102,7 @@ "metadata": {}, "outputs": [], "source": [ - "assert len(domain_client.api.services.api.api_endpoints()) == 1" + "assert len(datasite_client.api.services.api.api_endpoints()) == 1" ] }, { @@ -112,8 +112,8 @@ "outputs": [], "source": [ "# Once api refresh is done, remove this cell\n", - "domain_client = node.login(email=\"info@openmined.org\", password=\"changethis\")\n", - "domain_guest = node.login(email=\"user@openmined.org\", password=\"verysecurepassword\")" + "datasite_client = server.login(email=\"info@openmined.org\", password=\"changethis\")\n", + "datasite_guest = server.login(email=\"user@openmined.org\", password=\"verysecurepassword\")" ] }, { @@ -122,7 +122,7 @@ "metadata": {}, "outputs": [], "source": [ - "assert domain_client.api.services.first.query(query=\"SELECT *\")" + "assert datasite_client.api.services.first.query(query=\"SELECT *\")" ] }, { @@ -131,7 +131,7 @@ "metadata": {}, "outputs": [], "source": [ - "result = domain_guest.api.services.first.query(query=\"SELECT *\")\n", + "result = datasite_guest.api.services.first.query(query=\"SELECT *\")\n", "result" ] }, @@ -171,8 +171,8 @@ " description=\"Lore ipsulum ...\",\n", ")\n", "\n", - "# # Add it to the node.\n", - "response = domain_client.api.services.api.add(endpoint=new_endpoint)" + "# # Add it to the server.\n", + "response = datasite_client.api.services.api.add(endpoint=new_endpoint)" ] }, { @@ -181,7 +181,7 @@ "metadata": {}, "outputs": [], "source": [ - "domain_client.api.services.api.api_endpoints()" + "datasite_client.api.services.api.api_endpoints()" ] }, { @@ -191,7 +191,7 @@ "outputs": [], "source": [ "assert isinstance(response, SyftSuccess)\n", - "assert len(domain_client.api.services.api.api_endpoints()) == 2" + "assert len(datasite_client.api.services.api.api_endpoints()) == 2" ] }, { @@ -201,8 +201,8 @@ "outputs": [], "source": [ "# Once api refresh is done, remove this cell\n", - "domain_client.refresh()\n", - "domain_guest.refresh()" + "datasite_client.refresh()\n", + "datasite_guest.refresh()" ] }, { @@ -211,7 +211,7 @@ "metadata": {}, "outputs": [], "source": [ - "domain_client.api.services.third.query()" + "datasite_client.api.services.third.query()" ] }, { @@ -220,7 +220,7 @@ "metadata": {}, "outputs": [], "source": [ - "assert domain_client.api.services.third.query() == \"Private Function Execution\"" + "assert datasite_client.api.services.third.query() == \"Private Function Execution\"" ] }, { @@ -229,7 +229,7 @@ "metadata": {}, "outputs": [], "source": [ - "assert domain_guest.api.services.third.query() == \"Public Function Execution\"" + "assert datasite_guest.api.services.third.query() == \"Public Function Execution\"" ] }, { @@ -238,7 +238,7 @@ "metadata": {}, "outputs": [], "source": [ - "domain_guest.api.services.third.query()" + "datasite_guest.api.services.third.query()" ] }, { @@ -248,7 +248,7 @@ "outputs": [], "source": [ "@sy.syft_function_single_use(\n", - " endpoint=domain_guest.api.services.third.query,\n", + " endpoint=datasite_guest.api.services.third.query,\n", ")\n", "def job_function(endpoint):\n", " return endpoint()\n", @@ -258,10 +258,10 @@ "new_project = sy.Project(\n", " name=\"My Cool UN Project\",\n", " description=\"Hi, I want to calculate the trade volume in million's with my cool code.\",\n", - " members=[domain_guest],\n", + " members=[datasite_guest],\n", ")\n", "\n", - "result = new_project.create_code_request(job_function, domain_guest)\n", + "result = new_project.create_code_request(job_function, datasite_guest)\n", "assert isinstance(result, SyftSuccess)" ] }, @@ -272,7 +272,7 @@ "outputs": [], "source": [ "res = None\n", - "for r in domain_client.requests.get_all():\n", + "for r in datasite_client.requests.get_all():\n", " if r.requesting_user_email == \"user@openmined.org\":\n", " res = r.approve()\n", "assert res is not None, res\n", @@ -285,7 +285,9 @@ "metadata": {}, "outputs": [], "source": [ - "result = domain_guest.code.job_function(endpoint=domain_client.api.services.third.query)\n", + "result = datasite_guest.code.job_function(\n", + " endpoint=datasite_client.api.services.third.query\n", + ")\n", "result" ] }, @@ -295,7 +297,9 @@ "metadata": {}, "outputs": [], "source": [ - "result = domain_guest.code.job_function(endpoint=domain_client.api.services.third.query)\n", + "result = datasite_guest.code.job_function(\n", + " endpoint=datasite_client.api.services.third.query\n", + ")\n", "result" ] }, @@ -305,7 +309,7 @@ "metadata": {}, "outputs": [], "source": [ - "domain_client.api.services.third.query" + "datasite_client.api.services.third.query" ] }, { @@ -314,7 +318,9 @@ "metadata": {}, "outputs": [], "source": [ - "result = domain_guest.code.job_function(endpoint=domain_client.api.services.third.query)\n", + "result = datasite_guest.code.job_function(\n", + " endpoint=datasite_client.api.services.third.query\n", + ")\n", "result" ] }, @@ -342,7 +348,7 @@ "metadata": {}, "outputs": [], "source": [ - "assert isinstance(domain_guest.api.services.third.query.private(), SyftError)" + "assert isinstance(datasite_guest.api.services.third.query.private(), SyftError)" ] }, { @@ -351,7 +357,7 @@ "metadata": {}, "outputs": [], "source": [ - "result = domain_client.api.services.api.delete(endpoint_path=\"third.query\")\n", + "result = datasite_client.api.services.api.delete(endpoint_path=\"third.query\")\n", "assert isinstance(result, SyftSuccess), result" ] }, @@ -361,7 +367,7 @@ "metadata": {}, "outputs": [], "source": [ - "assert len(domain_client.api.services.api.api_endpoints()) == 1" + "assert len(datasite_client.api.services.api.api_endpoints()) == 1" ] }, { @@ -395,8 +401,8 @@ " return context.settings[\"key\"] == \"value\"\n", "\n", "\n", - "# Add it to the node.\n", - "response = domain_client.api.services.api.add(endpoint=new_public_function)\n", + "# Add it to the server.\n", + "response = datasite_client.api.services.api.add(endpoint=new_public_function)\n", "assert isinstance(response, SyftSuccess), response\n", "response" ] @@ -421,7 +427,7 @@ " return \"Updated Public Function Execution\"\n", "\n", "\n", - "response = domain_client.api.services.api.update(\n", + "response = datasite_client.api.services.api.update(\n", " endpoint_path=\"test.update\", mock_function=updated_public_function\n", ")\n", "assert isinstance(response, SyftSuccess), response\n", @@ -448,7 +454,7 @@ " return \"Updated Private Function Execution\"\n", "\n", "\n", - "response = domain_client.api.services.api.update(\n", + "response = datasite_client.api.services.api.update(\n", " endpoint_path=\"test.update\", private_function=updated_private_function\n", ")\n", "assert isinstance(response, SyftSuccess), response\n", @@ -478,7 +484,7 @@ " return \"Updated Private Function Execution\"\n", "\n", "\n", - "response = domain_client.api.services.api.update(\n", + "response = datasite_client.api.services.api.update(\n", " endpoint_path=\"test.update\",\n", " mock_function=new_sig_public_function,\n", " private_function=new_sig_private_function,\n", @@ -510,7 +516,7 @@ "metadata": {}, "outputs": [], "source": [ - "response = domain_client.api.services.api.update(endpoint_path=\"test.update\")\n", + "response = datasite_client.api.services.api.update(endpoint_path=\"test.update\")\n", "assert isinstance(response, SyftError), response" ] }, @@ -532,7 +538,7 @@ " return \"Updated Public Function Execution\"\n", "\n", "\n", - "response = domain_client.api.services.api.update(\n", + "response = datasite_client.api.services.api.update(\n", " endpoint_path=\"test.update\", mock_function=bad_public_function\n", ")\n", "assert isinstance(response, SyftError), response" @@ -551,7 +557,7 @@ "metadata": {}, "outputs": [], "source": [ - "response = domain_client.api.services.api.update(\n", + "response = datasite_client.api.services.api.update(\n", " endpoint_path=\"nonexistent\", mock_function=bad_public_function\n", ")\n", "assert isinstance(response, SyftError), response" diff --git a/notebooks/experimental/Digital Signatures/ds-benchmarks.ipynb b/notebooks/experimental/Digital Signatures/ds-benchmarks.ipynb new file mode 100644 index 00000000000..5ff803cdce6 --- /dev/null +++ b/notebooks/experimental/Digital Signatures/ds-benchmarks.ipynb @@ -0,0 +1,429 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 5, + "id": "f272a63f-03a9-417d-88c3-11a98ad25c80", + "metadata": {}, + "outputs": [], + "source": [ + "data = b\"A\" * (10**9) # 1GB message" + ] + }, + { + "cell_type": "markdown", + "id": "d9a2e0c0-ef3b-41be-a4e8-0d9f190a1106", + "metadata": {}, + "source": [ + "# Using PyNacl" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "a4145072-a959-479b-8c80-da15f82946f3", + "metadata": {}, + "outputs": [], + "source": [ + "# stdlib\n", + "import hashlib\n", + "import time\n", + "\n", + "# third party\n", + "from nacl.signing import SigningKey\n", + "\n", + "# Generate a new random signing key\n", + "signing_key = SigningKey.generate()\n", + "\n", + "# Example large message\n", + "large_message = data\n", + "\n", + "# Hash the message with SHA-256 using hashlib\n", + "start = time.time()\n", + "hash_object = hashlib.sha256()\n", + "hash_object.update(large_message)\n", + "hashed_message = hash_object.digest()\n", + "hash_time = time.time() - start\n", + "\n", + "# Sign the hashed message with PyNaCl\n", + "start = time.time()\n", + "signed_hash = signing_key.sign(hashed_message)\n", + "sign_time = time.time() - start\n", + "\n", + "# Directly sign the large message with PyNaCl\n", + "start = time.time()\n", + "signed_message = signing_key.sign(large_message)\n", + "direct_sign_time = time.time() - start\n", + "\n", + "print(f\"Time to hash with hashlib: {hash_time:.2f} seconds\")\n", + "print(f\"Time to sign hashed message with PyNaCl: {sign_time:.2f} seconds\")\n", + "print(f\"Total time (hash + sign): {hash_time + sign_time:.2f} seconds\")\n", + "print(\n", + " f\"Time to directly sign large message with PyNaCl: {direct_sign_time:.2f} seconds\"\n", + ")" + ] + }, + { + "cell_type": "markdown", + "id": "d8581767-bee2-42e1-a571-148cf0fb12a4", + "metadata": {}, + "source": [ + "# Using Cryptography library" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "8ea32e21-8987-4459-aa0f-6bc832376ab7", + "metadata": {}, + "outputs": [], + "source": [ + "# !pip install cryptography" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "1618c35b-cb6e-4f28-a13c-a2e23497841c", + "metadata": {}, + "outputs": [], + "source": [ + "# third party\n", + "%%time\n", + "# third party\n", + "from cryptography.hazmat.primitives.asymmetric.ed25519 import Ed25519PrivateKey\n", + "\n", + "private_key = Ed25519PrivateKey.generate()\n", + "signature = private_key.sign(data)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "9abb35b3-1891-4074-8f0e-729de0c2e4a2", + "metadata": {}, + "outputs": [], + "source": [ + "# third party\n", + "%%time\n", + "# third party\n", + "from cryptography.hazmat.primitives.asymmetric.ed448 import Ed448PrivateKey\n", + "\n", + "private_key = Ed448PrivateKey.generate()\n", + "signature = private_key.sign(data)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "66341fe5-94c3-4c8e-af34-a2e837a6957f", + "metadata": {}, + "outputs": [], + "source": [ + "# third party\n", + "%%time\n", + "# third party\n", + "from cryptography.hazmat.primitives import hashes\n", + "from cryptography.hazmat.primitives.asymmetric import dsa\n", + "\n", + "private_key = dsa.generate_private_key(\n", + " key_size=1024,\n", + ")\n", + "signature = private_key.sign(data, hashes.SHA256())" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "83362239-6376-46ee-8e70-d9a23ff5421b", + "metadata": {}, + "outputs": [], + "source": [ + "%%time\n", + "\n", + "# third party\n", + "from cryptography.hazmat.primitives import hashes\n", + "from cryptography.hazmat.primitives.asymmetric import ec\n", + "\n", + "private_key = ec.generate_private_key(ec.SECP384R1())\n", + "\n", + "signature = private_key.sign(data, ec.ECDSA(hashes.SHA256()))" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "cd5d1781-666e-4d19-aee9-c0ad4b8f0756", + "metadata": {}, + "outputs": [], + "source": [ + "%%time\n", + "public_key = private_key.public_key()\n", + "public_key.verify(signature, data, ec.ECDSA(hashes.SHA256()))" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "206369da-d2c7-424c-b5c6-b1d9b5202786", + "metadata": {}, + "outputs": [], + "source": [ + "# third party\n", + "%%time\n", + "# third party\n", + "from cryptography.hazmat.primitives import hashes\n", + "from cryptography.hazmat.primitives.asymmetric import padding\n", + "from cryptography.hazmat.primitives.asymmetric import rsa\n", + "\n", + "private_key = rsa.generate_private_key(\n", + " public_exponent=65537,\n", + " key_size=2048,\n", + ")\n", + "\n", + "message = data\n", + "signature = private_key.sign(\n", + " message,\n", + " padding.PSS(mgf=padding.MGF1(hashes.SHA256()), salt_length=padding.PSS.MAX_LENGTH),\n", + " hashes.SHA256(),\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "b222c11a-e2a2-4610-a9d8-95ee3343d466", + "metadata": {}, + "outputs": [], + "source": [ + "%%time\n", + "public_key = private_key.public_key()\n", + "message = data\n", + "public_key.verify(\n", + " signature,\n", + " message,\n", + " padding.PSS(mgf=padding.MGF1(hashes.SHA256()), salt_length=padding.PSS.MAX_LENGTH),\n", + " hashes.SHA256(),\n", + ")" + ] + }, + { + "cell_type": "markdown", + "id": "6fa46875-4405-47c6-855c-0b3f407aa26c", + "metadata": {}, + "source": [ + "# Hashing by PyNacl" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "ea204831-482d-4d3a-988b-32920b7af285", + "metadata": {}, + "outputs": [], + "source": [ + "# third party\n", + "import nacl.encoding\n", + "import nacl.hash\n", + "\n", + "methods = [\"sha256\", \"sha512\", \"blake2b\"]\n", + "\n", + "for hash_method in methods:\n", + " HASHER = getattr(nacl.hash, hash_method)\n", + "\n", + " start = time.time()\n", + " digest = HASHER(data, encoder=nacl.encoding.HexEncoder)\n", + " end = time.time()\n", + " print(f\"Time taken for {hash_method}\", end - start)" + ] + }, + { + "cell_type": "markdown", + "id": "df81c37d-024e-4de8-a136-717f2e67e724", + "metadata": {}, + "source": [ + "# Hashing by cryptography library" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "2a775385-6b57-46ab-9aed-51598a8c7592", + "metadata": {}, + "outputs": [], + "source": [ + "# third party\n", + "from cryptography.hazmat.primitives import hashes\n", + "\n", + "methods = [\"SHA256\", \"SHA512\", \"BLAKE2b\"]\n", + "\n", + "for hash_method in methods:\n", + " if hash_method == \"BLAKE2b\":\n", + " digest = hashes.Hash(getattr(hashes, hash_method)(64))\n", + " else:\n", + " digest = hashes.Hash(getattr(hashes, hash_method)())\n", + "\n", + " start = time.time()\n", + " digest.update(data)\n", + " digest.finalize()\n", + " end = time.time()\n", + " print(f\"Time taken for {hash_method}\", end - start)" + ] + }, + { + "cell_type": "markdown", + "id": "086ab235-d9a0-4184-8270-bffb088bf1c3", + "metadata": {}, + "source": [ + "# Hashing by python hashlib" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "b08d7f82-ea8f-4b24-ac09-669526894293", + "metadata": {}, + "outputs": [], + "source": [ + "methods = [\"sha256\", \"sha512\", \"blake2b\"]\n", + "\n", + "for hash_method in methods:\n", + " if hash_method == \"blake2b\":\n", + " m = getattr(hashlib, hash_method)(digest_size=64)\n", + " else:\n", + " m = getattr(hashlib, hash_method)()\n", + "\n", + " start = time.time()\n", + " m.update(data)\n", + " m.digest()\n", + " end = time.time()\n", + " print(f\"Time taken for {hash_method}\", end - start)" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "id": "9bf2843e-add6-4f65-a75b-5ef93093d347", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Collecting pycryptodome\n", + " Downloading pycryptodome-3.20.0-cp35-abi3-macosx_10_9_universal2.whl.metadata (3.4 kB)\n", + "Downloading pycryptodome-3.20.0-cp35-abi3-macosx_10_9_universal2.whl (2.4 MB)\n", + "\u001b[2K \u001b[38;2;114;156;31m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m2.4/2.4 MB\u001b[0m \u001b[31m11.9 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m31m12.5 MB/s\u001b[0m eta \u001b[36m0:00:01\u001b[0m\n", + "\u001b[?25hInstalling collected packages: pycryptodome\n", + "Successfully installed pycryptodome-3.20.0\n", + "\n", + "\u001b[1m[\u001b[0m\u001b[34;49mnotice\u001b[0m\u001b[1;39;49m]\u001b[0m\u001b[39;49m A new release of pip is available: \u001b[0m\u001b[31;49m24.0\u001b[0m\u001b[39;49m -> \u001b[0m\u001b[32;49m24.1.2\u001b[0m\n", + "\u001b[1m[\u001b[0m\u001b[34;49mnotice\u001b[0m\u001b[1;39;49m]\u001b[0m\u001b[39;49m To update, run: \u001b[0m\u001b[32;49mpip install --upgrade pip\u001b[0m\n" + ] + } + ], + "source": [ + "!pip install pycryptodome" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "id": "4343bedd-308a-4caf-a4ff-56cdd3ca2433", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Public Key:\n", + "-----BEGIN PUBLIC KEY-----\n", + "MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEz1vchLT61W1+TWg86POU/jsYS4IJ\n", + "IzeBv+mYc9Ehpn0MqCpri5l0+HbnIpLAdvO7KeYRGBRqFPJMjqt5rB30Aw==\n", + "-----END PUBLIC KEY-----\n", + "\n", + "Private Key:\n", + "-----BEGIN PRIVATE KEY-----\n", + "MIGHAgEAMBMGByqGSM49AgEGCCqGSM49AwEHBG0wawIBAQQgSIn/SVjK1hLXs5XK\n", + "S7C+dB1YcSz9VqStzP1ytSL9y7ihRANCAATPW9yEtPrVbX5NaDzo85T+OxhLggkj\n", + "N4G/6Zhz0SGmfQyoKmuLmXT4duciksB287sp5hEYFGoU8kyOq3msHfQD\n", + "-----END PRIVATE KEY-----\n", + "\n", + "Signature:\n", + "108b92beb9b85840c39e217373c998fb6df71baabb6a39cae6088f4a1f920d66694b1a71df082d930f58d91e83b72eee6aaa77f865796a78671d5bb74d384866\n", + "CPU times: user 4.9 s, sys: 41.8 ms, total: 4.94 s\n", + "Wall time: 4.94 s\n" + ] + } + ], + "source": [ + "# third party\n", + "from Crypto.Hash import SHA256\n", + "\n", + "%%time\n", + "# third party\n", + "from Crypto.PublicKey import ECC\n", + "from Crypto.Signature import DSS\n", + "\n", + "# Generate a new ECC key pair\n", + "key = ECC.generate(curve=\"P-256\")\n", + "\n", + "# Export the public key in PEM format\n", + "public_key_pem = key.public_key().export_key(format=\"PEM\")\n", + "print(\"Public Key:\")\n", + "print(public_key_pem)\n", + "\n", + "# Export the private key in PEM format\n", + "private_key_pem = key.export_key(format=\"PEM\")\n", + "print(\"\\nPrivate Key:\")\n", + "print(private_key_pem)\n", + "\n", + "# Sign a message\n", + "message = data\n", + "hash_obj = SHA256.new(message)\n", + "signer = DSS.new(key, \"fips-186-3\")\n", + "signature = signer.sign(hash_obj)\n", + "print(\"\\nSignature:\")\n", + "print(signature.hex())\n", + "\n", + "# # Verify the signature\n", + "# public_key = ECC.import_key(public_key_pem)\n", + "# verifier = DSS.new(public_key, 'fips-186-3')\n", + "# try:\n", + "# verifier.verify(hash_obj, signature)\n", + "# print(\"\\nThe message is authentic.\")\n", + "# except ValueError:\n", + "# print(\"\\nThe message is not authentic.\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "2034a8fd-c89e-461f-805b-5b37c4c7d395", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.8" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/notebooks/experimental/enclaves/Action-Object-Performance.ipynb b/notebooks/experimental/enclaves/Action-Object-Performance.ipynb new file mode 100644 index 00000000000..a34b2d229f1 --- /dev/null +++ b/notebooks/experimental/enclaves/Action-Object-Performance.ipynb @@ -0,0 +1,150 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "id": "e0141faa-33db-4d35-95c1-7eaa38061223", + "metadata": {}, + "outputs": [], + "source": [ + "# syft absolute\n", + "import syft as sy" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "61be8866-cf01-4577-bdf0-4114995eb39a", + "metadata": {}, + "outputs": [], + "source": [ + "canada_server = sy.orchestra.launch(\n", + " name=\"canada-domain\", port=8081, dev_mode=True, reset=True, profile=True\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "d509c48c-96a1-4706-8dbd-27143eeba41b", + "metadata": {}, + "outputs": [], + "source": [ + "domain_client = canada_server.login(email=\"info@openmined.org\", password=\"changethis\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "c6b619a7-427e-4eee-9a24-4c65dee67e6f", + "metadata": {}, + "outputs": [], + "source": [ + "data = b\"A\" * (2**28) # 1GB message" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "1e15f7e3-8f0c-4981-b9bf-9ed2ba308282", + "metadata": {}, + "outputs": [], + "source": [ + "len(data) / 2**20" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "27a9c748-2d96-4970-bc84-76077bc326a8", + "metadata": {}, + "outputs": [], + "source": [ + "%%pyinstrument\n", + "action_obj = sy.ActionObject.from_obj(data)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "ce834e46-f181-486f-bf0f-6082d7b85281", + "metadata": {}, + "outputs": [], + "source": [ + "# %%pyinstrument\n", + "# res = action_obj.send(domain_client)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "ef814db6-b86f-48d3-8b07-d99499650d05", + "metadata": {}, + "outputs": [], + "source": [ + "# res" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "9dc5cb5b-0e6a-46d5-87a4-f1c0084c77bb", + "metadata": {}, + "outputs": [], + "source": [ + "# domain_client.api.services.action.get_hash(res.id)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "f7838dce-57fe-494e-942d-738f931f5697", + "metadata": {}, + "outputs": [], + "source": [ + "# %%time\n", + "# val = domain_client.api.services.action.get(res.id)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "66db2667-04f4-4b79-b5a8-f52ee0bca233", + "metadata": {}, + "outputs": [], + "source": [ + "# %%time\n", + "# _ = val.syft_action_data" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "9e20b517-7ac4-4979-8198-883c03129fb0", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.8" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/notebooks/experimental/enclaves/Hash.ipynb b/notebooks/experimental/enclaves/Hash.ipynb new file mode 100644 index 00000000000..0a0fd636d9f --- /dev/null +++ b/notebooks/experimental/enclaves/Hash.ipynb @@ -0,0 +1,171 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 1, + "id": "4e901f03-66aa-4ee9-8665-866a261cb298", + "metadata": {}, + "outputs": [], + "source": [ + "# third party\n", + "import numpy as np\n", + "\n", + "# syft absolute\n", + "import syft as sy" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "1fda9bfc-c8a1-48be-9567-02b4075b13d5", + "metadata": {}, + "outputs": [], + "source": [ + "a1 = sy.ActionObject.from_obj(np.array([1, 2, 3]))" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "e0d25819-5367-4328-acd7-3bcaa8428c69", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "'a9ec1f75a9003848fa9886e4cc2b7333b99d089af97ec6e0f774ba8bf92a1226'" + ] + }, + "execution_count": 3, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "a1.hash()" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "id": "20f5fff0-846d-4df2-a3d0-cfd615693f39", + "metadata": {}, + "outputs": [], + "source": [ + "b1 = sy.ActionObject.from_obj(id=a1.id, syft_action_data=np.array([1, 2, 3]))" + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "id": "be93aaba-2d5f-46a0-9bd1-0f09b70e4c08", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "'a9ec1f75a9003848fa9886e4cc2b7333b99d089af97ec6e0f774ba8bf92a1226'" + ] + }, + "execution_count": 12, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "b1.hash()" + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "id": "008146b5-8ef2-4043-bf77-05c2c4295561", + "metadata": {}, + "outputs": [], + "source": [ + "a2 = sy.ActionObject.from_obj(5)" + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "id": "40d2c07b-458d-44ab-bbc3-c5808263df85", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "'8ee92c1a13aecee2cd3c4be229b01dc87d32236163f0b57f1277fe36a9e5dba9'" + ] + }, + "execution_count": 14, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "a2.hash()" + ] + }, + { + "cell_type": "code", + "execution_count": 15, + "id": "821afc65-db28-45b5-ba9d-52bf82c19e03", + "metadata": {}, + "outputs": [], + "source": [ + "b2 = sy.ActionObject.from_obj(id=a2.id, syft_action_data=5)" + ] + }, + { + "cell_type": "code", + "execution_count": 16, + "id": "df7dc61b-7f7b-47da-8119-34fbd0c272dc", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "'8ee92c1a13aecee2cd3c4be229b01dc87d32236163f0b57f1277fe36a9e5dba9'" + ] + }, + "execution_count": 16, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "b2.hash()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "fddfdadb-4d7c-4f24-9ff0-15c7e4f9164c", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.8" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/notebooks/experimental/enclaves/V1/00-do-setup-domain.ipynb b/notebooks/experimental/enclaves/V1/00-do-setup-domain.ipynb index 0cbeaa20897..de366a74e7e 100644 --- a/notebooks/experimental/enclaves/V1/00-do-setup-domain.ipynb +++ b/notebooks/experimental/enclaves/V1/00-do-setup-domain.ipynb @@ -19,11 +19,11 @@ "\n", "## Data Owners Workflow - Part 1\n", "[./00-do-setup-domain.ipynb](./00-do-setup-domain.ipynb)\n", - "- Launch two domain nodes for providing data to the data scientist.\n", - "- Launch one enclave node for performing the secure computation using data from both the domain nodes.\n", - "- Upload datasets to both the domain nodes.\n", - "- Register an account for the data scientist in both the domain nodes.\n", - "- Register the enclave node with one of the domain nodes for discoverability by the data scientist.\n", + "- Launch two domain servers for providing data to the data scientist.\n", + "- Launch one enclave server for performing the secure computation using data from both the domain servers.\n", + "- Upload datasets to both the domain servers.\n", + "- Register an account for the data scientist in both the domain servers.\n", + "- Register the enclave server with one of the domain servers for discoverability by the data scientist.\n", "\n", "## Data Scientist Workflow - Part 1\n", "[./01-ds-submit-project.ipynb](./01-ds-submit-project.ipynb)\n", @@ -58,8 +58,8 @@ "\n", "# syft absolute\n", "import syft as sy\n", - "from syft.abstract_node import NodeType\n", - "from syft.service.network.routes import HTTPNodeRoute\n", + "from syft.abstract_server import ServerType\n", + "from syft.service.network.routes import HTTPServerRoute\n", "from syft.service.response import SyftAttributeError\n", "from syft.service.response import SyftSuccess\n", "\n", @@ -72,9 +72,9 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "# Launch nodes\n", + "# Launch servers\n", "\n", - "We will begin by launching two domain nodes and an enclave node." + "We will begin by launching two domain servers and an enclave server." ] }, { @@ -83,15 +83,15 @@ "metadata": {}, "outputs": [], "source": [ - "canada_node = sy.orchestra.launch(\n", + "canada_server = sy.orchestra.launch(\n", " name=\"canada-domain\", port=CANADA_DOMAIN_PORT, dev_mode=True, reset=True\n", ")\n", - "italy_node = sy.orchestra.launch(\n", + "italy_server = sy.orchestra.launch(\n", " name=\"italy-domain\", port=ITALY_DOMAIN_PORT, dev_mode=True, reset=True\n", ")\n", "canada_enclave = sy.orchestra.launch(\n", " name=\"canada-enclave\",\n", - " node_type=NodeType.ENCLAVE,\n", + " server_type=ServerType.ENCLAVE,\n", " port=CANADA_ENCLAVE_PORT,\n", " dev_mode=True,\n", " reset=True,\n", @@ -104,11 +104,13 @@ "metadata": {}, "outputs": [], "source": [ - "do_canada_client = canada_node.login(email=\"info@openmined.org\", password=\"changethis\")\n", - "do_italy_client = italy_node.login(email=\"info@openmined.org\", password=\"changethis\")\n", + "do_canada_client = canada_server.login(\n", + " email=\"info@openmined.org\", password=\"changethis\"\n", + ")\n", + "do_italy_client = italy_server.login(email=\"info@openmined.org\", password=\"changethis\")\n", "\n", - "assert do_canada_client.metadata.node_type == NodeType.DOMAIN\n", - "assert do_italy_client.metadata.node_type == NodeType.DOMAIN" + "assert do_canada_client.metadata.server_type == ServerType.DOMAIN\n", + "assert do_italy_client.metadata.server_type == ServerType.DOMAIN" ] }, { @@ -202,7 +204,7 @@ "metadata": {}, "outputs": [], "source": [ - "route = HTTPNodeRoute(host_or_ip=\"localhost\", port=CANADA_ENCLAVE_PORT)\n", + "route = HTTPServerRoute(host_or_ip=\"localhost\", port=CANADA_ENCLAVE_PORT)\n", "do_canada_client.enclaves.add(route=route)" ] }, @@ -236,7 +238,7 @@ "# Data scientist should not be able to add enclave to the domain\n", "with pytest.raises(SyftAttributeError) as exc_info:\n", " ds_canada_client.enclaves.add(\n", - " name=\"Dummy Enclave\", route=HTTPNodeRoute(host_or_ip=\"localhost\", port=9084)\n", + " name=\"Dummy Enclave\", route=HTTPServerRoute(host_or_ip=\"localhost\", port=9084)\n", " )\n", "print(exc_info.value)" ] @@ -265,11 +267,11 @@ "metadata": {}, "outputs": [], "source": [ - "if canada_node.deployment_type.value == \"python\":\n", - " canada_node.land()\n", + "if canada_server.deployment_type.value == \"python\":\n", + " canada_server.land()\n", "\n", - "if italy_node.deployment_type.value == \"python\":\n", - " italy_node.land()\n", + "if italy_server.deployment_type.value == \"python\":\n", + " italy_server.land()\n", "\n", "if canada_enclave.deployment_type.value == \"python\":\n", " canada_enclave.land()" diff --git a/notebooks/experimental/enclaves/V1/01-ds-submit-project.ipynb b/notebooks/experimental/enclaves/V1/01-ds-submit-project.ipynb index 786b30cff55..8ea9b677d7d 100644 --- a/notebooks/experimental/enclaves/V1/01-ds-submit-project.ipynb +++ b/notebooks/experimental/enclaves/V1/01-ds-submit-project.ipynb @@ -40,7 +40,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "# Log in to the domain nodes as a data scientist" + "# Log in to the domain servers as a data scientist" ] }, { @@ -49,11 +49,11 @@ "metadata": {}, "outputs": [], "source": [ - "# Launch the domain nodes we setup in the previous notebook\n", - "canada_node = sy.orchestra.launch(\n", + "# Launch the domain servers we setup in the previous notebook\n", + "canada_server = sy.orchestra.launch(\n", " name=\"canada-domain\", port=CANADA_DOMAIN_PORT, dev_mode=True\n", ")\n", - "italy_node = sy.orchestra.launch(\n", + "italy_server = sy.orchestra.launch(\n", " name=\"italy-domain\", port=ITALY_DOMAIN_PORT, dev_mode=True\n", ")" ] @@ -64,8 +64,10 @@ "metadata": {}, "outputs": [], "source": [ - "ds_canada_client = canada_node.login(email=\"sheldon@caltech.edu\", password=\"changethis\")\n", - "ds_italy_client = italy_node.login(email=\"sheldon@caltech.edu\", password=\"changethis\")" + "ds_canada_client = canada_server.login(\n", + " email=\"sheldon@caltech.edu\", password=\"changethis\"\n", + ")\n", + "ds_italy_client = italy_server.login(email=\"sheldon@caltech.edu\", password=\"changethis\")" ] }, { @@ -220,8 +222,8 @@ "metadata": {}, "outputs": [], "source": [ - "assert project.requests[0].node_uid == ds_canada_client.id\n", - "assert project.requests[1].node_uid == ds_italy_client.id" + "assert project.requests[0].server_uid == ds_canada_client.id\n", + "assert project.requests[1].server_uid == ds_italy_client.id" ] }, { @@ -230,7 +232,7 @@ "metadata": {}, "outputs": [], "source": [ - "# Test: data scientist should not be able to request execution without approval of all nodes\n", + "# Test: data scientist should not be able to request execution without approval of all servers\n", "assert project.pending_requests != 0 # There are pending requests\n", "with pytest.raises(SyftException) as exc_info:\n", " project.request_execution(blocking=True)\n", @@ -262,11 +264,11 @@ "metadata": {}, "outputs": [], "source": [ - "if canada_node.deployment_type.value == \"python\":\n", - " canada_node.land()\n", + "if canada_server.deployment_type.value == \"python\":\n", + " canada_server.land()\n", "\n", - "if italy_node.deployment_type.value == \"python\":\n", - " italy_node.land()" + "if italy_server.deployment_type.value == \"python\":\n", + " italy_server.land()" ] } ], diff --git a/notebooks/experimental/enclaves/V1/02-do-review-code.ipynb b/notebooks/experimental/enclaves/V1/02-do-review-code.ipynb index 88b09189c95..4dc3452f817 100644 --- a/notebooks/experimental/enclaves/V1/02-do-review-code.ipynb +++ b/notebooks/experimental/enclaves/V1/02-do-review-code.ipynb @@ -35,7 +35,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "# Log in to the first domain node as the data owner" + "# Log in to the first domain server as the data owner" ] }, { @@ -44,8 +44,8 @@ "metadata": {}, "outputs": [], "source": [ - "# Launch the domain nodes we setup in the previous notebook\n", - "canada_node = sy.orchestra.launch(\n", + "# Launch the domain servers we setup in the previous notebook\n", + "canada_server = sy.orchestra.launch(\n", " name=\"canada-domain\", port=CANADA_DOMAIN_PORT, dev_mode=True\n", ")" ] @@ -56,7 +56,9 @@ "metadata": {}, "outputs": [], "source": [ - "do_canada_client = canada_node.login(email=\"info@openmined.org\", password=\"changethis\")" + "do_canada_client = canada_server.login(\n", + " email=\"info@openmined.org\", password=\"changethis\"\n", + ")" ] }, { @@ -168,7 +170,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "# Approve the code from the other domain node" + "# Approve the code from the other domain server" ] }, { @@ -177,10 +179,10 @@ "metadata": {}, "outputs": [], "source": [ - "italy_node = sy.orchestra.launch(\n", + "italy_server = sy.orchestra.launch(\n", " name=\"italy-domain\", port=ITALY_DOMAIN_PORT, dev_mode=True\n", ")\n", - "do_italy_client = italy_node.login(email=\"info@openmined.org\", password=\"changethis\")\n", + "do_italy_client = italy_server.login(email=\"info@openmined.org\", password=\"changethis\")\n", "\n", "projects = do_italy_client.projects.get_all()\n", "assert len(projects) == 1\n", @@ -210,11 +212,11 @@ "metadata": {}, "outputs": [], "source": [ - "if canada_node.deployment_type.value == \"python\":\n", - " canada_node.land()\n", + "if canada_server.deployment_type.value == \"python\":\n", + " canada_server.land()\n", "\n", - "if italy_node.deployment_type.value == \"python\":\n", - " italy_node.land()" + "if italy_server.deployment_type.value == \"python\":\n", + " italy_server.land()" ] } ], diff --git a/notebooks/experimental/enclaves/V1/03-ds-execute-code.ipynb b/notebooks/experimental/enclaves/V1/03-ds-execute-code.ipynb index 82a9768b4be..4ab56eb29a9 100644 --- a/notebooks/experimental/enclaves/V1/03-ds-execute-code.ipynb +++ b/notebooks/experimental/enclaves/V1/03-ds-execute-code.ipynb @@ -26,7 +26,7 @@ "source": [ "# syft absolute\n", "import syft as sy\n", - "from syft.abstract_node import NodeType\n", + "from syft.abstract_server import ServerType\n", "\n", "CANADA_DOMAIN_PORT = 9081\n", "ITALY_DOMAIN_PORT = 9082\n", @@ -39,16 +39,16 @@ "metadata": {}, "outputs": [], "source": [ - "# Launch the domain nodes we setup in the previous notebook\n", - "canada_node = sy.orchestra.launch(\n", + "# Launch the domain servers we setup in the previous notebook\n", + "canada_server = sy.orchestra.launch(\n", " name=\"canada-domain\", port=CANADA_DOMAIN_PORT, dev_mode=True\n", ")\n", - "italy_node = sy.orchestra.launch(\n", + "italy_server = sy.orchestra.launch(\n", " name=\"italy-domain\", port=ITALY_DOMAIN_PORT, dev_mode=True\n", ")\n", "canada_enclave = sy.orchestra.launch(\n", " name=\"canada-enclave\",\n", - " node_type=NodeType.ENCLAVE,\n", + " server_type=ServerType.ENCLAVE,\n", " port=CANADA_ENCLAVE_PORT,\n", " dev_mode=True,\n", " create_producer=True,\n", @@ -63,8 +63,10 @@ "metadata": {}, "outputs": [], "source": [ - "ds_canada_client = canada_node.login(email=\"sheldon@caltech.edu\", password=\"changethis\")\n", - "ds_italy_client = italy_node.login(email=\"sheldon@caltech.edu\", password=\"changethis\")" + "ds_canada_client = canada_server.login(\n", + " email=\"sheldon@caltech.edu\", password=\"changethis\"\n", + ")\n", + "ds_italy_client = italy_server.login(email=\"sheldon@caltech.edu\", password=\"changethis\")" ] }, { @@ -124,11 +126,11 @@ "metadata": {}, "outputs": [], "source": [ - "if canada_node.deployment_type.value == \"python\":\n", - " canada_node.land()\n", + "if canada_server.deployment_type.value == \"python\":\n", + " canada_server.land()\n", "\n", - "if italy_node.deployment_type.value == \"python\":\n", - " italy_node.land()\n", + "if italy_server.deployment_type.value == \"python\":\n", + " italy_server.land()\n", "\n", "if canada_enclave.deployment_type.value == \"python\":\n", " canada_enclave.land()" diff --git a/notebooks/experimental/enclaves/V2/00-do-setup-domain.ipynb b/notebooks/experimental/enclaves/V2/00-do-setup-domain.ipynb index 3f5f17c1afc..fc215ade52e 100644 --- a/notebooks/experimental/enclaves/V2/00-do-setup-domain.ipynb +++ b/notebooks/experimental/enclaves/V2/00-do-setup-domain.ipynb @@ -19,11 +19,11 @@ "\n", "## Data Owners Workflow - Part 1\n", "[./00-do-setup-domain.ipynb](./00-do-setup-domain.ipynb)\n", - "- Launch two domain nodes for providing data to the data scientist.\n", - "- Launch one enclave node for performing the secure computation using data from both the domain nodes.\n", - "- Upload datasets to both the domain nodes.\n", - "- Register an account for the data scientist in both the domain nodes.\n", - "- Register the enclave node with one of the domain nodes for discoverability by the data scientist.\n", + "- Launch two domain servers for providing data to the data scientist.\n", + "- Launch one enclave server for performing the secure computation using data from both the domain servers.\n", + "- Upload datasets to both the domain servers.\n", + "- Register an account for the data scientist in both the domain servers.\n", + "- Register the enclave server with one of the domain servers for discoverability by the data scientist.\n", "\n", "## Data Scientist Workflow - Part 1\n", "[./01-ds-submit-project.ipynb](./01-ds-submit-project.ipynb)\n", @@ -58,8 +58,8 @@ "\n", "# syft absolute\n", "import syft as sy\n", - "from syft.abstract_node import NodeType\n", - "from syft.service.network.routes import HTTPNodeRoute\n", + "from syft.abstract_server import ServerType\n", + "from syft.service.network.routes import HTTPServerRoute\n", "from syft.service.response import SyftAttributeError\n", "from syft.service.response import SyftSuccess\n", "\n", @@ -72,9 +72,9 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "# Launch nodes\n", + "# Launch servers\n", "\n", - "We will begin by launching two domain nodes and an enclave node." + "We will begin by launching two domain servers and an enclave server." ] }, { @@ -83,15 +83,15 @@ "metadata": {}, "outputs": [], "source": [ - "canada_node = sy.orchestra.launch(\n", + "canada_server = sy.orchestra.launch(\n", " name=\"canada-domain\", port=CANADA_DOMAIN_PORT, dev_mode=True, reset=True\n", ")\n", - "italy_node = sy.orchestra.launch(\n", + "italy_server = sy.orchestra.launch(\n", " name=\"italy-domain\", port=ITALY_DOMAIN_PORT, dev_mode=True, reset=True\n", ")\n", "canada_enclave = sy.orchestra.launch(\n", " name=\"canada-enclave\",\n", - " node_type=NodeType.ENCLAVE,\n", + " server_type=ServerType.ENCLAVE,\n", " port=CANADA_ENCLAVE_PORT,\n", " dev_mode=True,\n", " reset=True,\n", @@ -104,11 +104,13 @@ "metadata": {}, "outputs": [], "source": [ - "do_canada_client = canada_node.login(email=\"info@openmined.org\", password=\"changethis\")\n", - "do_italy_client = italy_node.login(email=\"info@openmined.org\", password=\"changethis\")\n", + "do_canada_client = canada_server.login(\n", + " email=\"info@openmined.org\", password=\"changethis\"\n", + ")\n", + "do_italy_client = italy_server.login(email=\"info@openmined.org\", password=\"changethis\")\n", "\n", - "assert do_canada_client.metadata.node_type == NodeType.DOMAIN\n", - "assert do_italy_client.metadata.node_type == NodeType.DOMAIN" + "assert do_canada_client.metadata.server_type == ServerType.DOMAIN\n", + "assert do_italy_client.metadata.server_type == ServerType.DOMAIN" ] }, { @@ -202,7 +204,7 @@ "metadata": {}, "outputs": [], "source": [ - "route = HTTPNodeRoute(host_or_ip=\"localhost\", port=CANADA_ENCLAVE_PORT)\n", + "route = HTTPServerRoute(host_or_ip=\"localhost\", port=CANADA_ENCLAVE_PORT)\n", "do_canada_client.enclaves.add(route=route)" ] }, @@ -236,7 +238,7 @@ "# Data scientist should not be able to add enclave to the domain\n", "with pytest.raises(SyftAttributeError) as exc_info:\n", " ds_canada_client.enclaves.add(\n", - " name=\"Dummy Enclave\", route=HTTPNodeRoute(host_or_ip=\"localhost\", port=9084)\n", + " name=\"Dummy Enclave\", route=HTTPServerRoute(host_or_ip=\"localhost\", port=9084)\n", " )\n", "print(exc_info.value)" ] @@ -265,11 +267,11 @@ "metadata": {}, "outputs": [], "source": [ - "if canada_node.deployment_type.value == \"python\":\n", - " canada_node.land()\n", + "if canada_server.deployment_type.value == \"python\":\n", + " canada_server.land()\n", "\n", - "if italy_node.deployment_type.value == \"python\":\n", - " italy_node.land()\n", + "if italy_server.deployment_type.value == \"python\":\n", + " italy_server.land()\n", "\n", "if canada_enclave.deployment_type.value == \"python\":\n", " canada_enclave.land()" diff --git a/notebooks/experimental/enclaves/V2/01-connect-domains.ipynb b/notebooks/experimental/enclaves/V2/01-connect-domains.ipynb index da03fd9c57c..656c2abc900 100644 --- a/notebooks/experimental/enclaves/V2/01-connect-domains.ipynb +++ b/notebooks/experimental/enclaves/V2/01-connect-domains.ipynb @@ -13,11 +13,11 @@ "\n", "3. The Data Scientist is able to create a project if and only if , all the domains could talk to each other. (i.e they have beeen previously associated)\n", "\n", - "4. This would mean that in our semi-Decentralized, leader based system , when ever a node would like to add an event/message to the project, it has to be sent to the leader, which is then broadcasted to all the other nodes.\n", + "4. This would mean that in our semi-Decentralized, leader based system , when ever a server would like to add an event/message to the project, it has to be sent to the leader, which is then broadcasted to all the other servers.\n", "\n", - "5. For other situations, for example when a node would like to asset metadata of another node in a Multi Domain User Code request, they could directly contact the node, to retrieve the info, instead of going through the leader.\n", + "5. For other situations, for example when a server would like to asset metadata of another server in a Multi Domain User Code request, they could directly contact the server, to retrieve the info, instead of going through the leader.\n", "\n", - "6. This would require us to create a Full Mesh Network Topology, where each node is connected to each other." + "6. This would require us to create a Full Mesh Network Topology, where each server is connected to each other." ] }, { @@ -28,8 +28,8 @@ "source": [ "# syft absolute\n", "import syft as sy\n", - "from syft.abstract_node import NodeType\n", - "from syft.service.network.node_peer import NodePeer\n", + "from syft.abstract_server import ServerType\n", + "from syft.service.network.server_peer import ServerPeer\n", "\n", "CANADA_DOMAIN_PORT = 9081\n", "ITALY_DOMAIN_PORT = 9082" @@ -39,9 +39,9 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "# Launch nodes\n", + "# Launch servers\n", "\n", - "We will begin by launching two domain nodes and an enclave node." + "We will begin by launching two domain servers and an enclave server." ] }, { @@ -50,12 +50,12 @@ "metadata": {}, "outputs": [], "source": [ - "canada_node = sy.orchestra.launch(\n", + "canada_server = sy.orchestra.launch(\n", " name=\"canada-domain\",\n", " port=CANADA_DOMAIN_PORT,\n", " dev_mode=True,\n", ")\n", - "italy_node = sy.orchestra.launch(\n", + "italy_server = sy.orchestra.launch(\n", " name=\"italy-domain\",\n", " port=ITALY_DOMAIN_PORT,\n", " dev_mode=True,\n", @@ -68,11 +68,13 @@ "metadata": {}, "outputs": [], "source": [ - "ds_canada_client = canada_node.login(email=\"sheldon@caltech.edu\", password=\"changethis\")\n", - "ds_italy_client = italy_node.login(email=\"sheldon@caltech.edu\", password=\"changethis\")\n", + "ds_canada_client = canada_server.login(\n", + " email=\"sheldon@caltech.edu\", password=\"changethis\"\n", + ")\n", + "ds_italy_client = italy_server.login(email=\"sheldon@caltech.edu\", password=\"changethis\")\n", "\n", - "assert ds_canada_client.metadata.node_type == NodeType.DOMAIN\n", - "assert ds_italy_client.metadata.node_type == NodeType.DOMAIN" + "assert ds_canada_client.metadata.server_type == ServerType.DOMAIN\n", + "assert ds_italy_client.metadata.server_type == ServerType.DOMAIN" ] }, { @@ -88,8 +90,8 @@ "metadata": {}, "outputs": [], "source": [ - "canada_node_peer = NodePeer.from_client(ds_canada_client)\n", - "canada_node_peer" + "canada_server_peer = ServerPeer.from_client(ds_canada_client)\n", + "canada_server_peer" ] }, { @@ -98,8 +100,8 @@ "metadata": {}, "outputs": [], "source": [ - "italy_node_peer = NodePeer.from_client(ds_italy_client)\n", - "italy_node_peer" + "italy_server_peer = ServerPeer.from_client(ds_italy_client)\n", + "italy_server_peer" ] }, { @@ -108,7 +110,7 @@ "metadata": {}, "outputs": [], "source": [ - "canada_conn_req = ds_canada_client.api.services.network.add_peer(italy_node_peer)\n", + "canada_conn_req = ds_canada_client.api.services.network.add_peer(italy_server_peer)\n", "canada_conn_req" ] }, @@ -118,7 +120,7 @@ "metadata": {}, "outputs": [], "source": [ - "italy_conn_req = ds_italy_client.api.services.network.add_peer(canada_node_peer)\n", + "italy_conn_req = ds_italy_client.api.services.network.add_peer(canada_server_peer)\n", "italy_conn_req" ] }, @@ -135,8 +137,10 @@ "metadata": {}, "outputs": [], "source": [ - "do_canada_client = canada_node.login(email=\"info@openmined.org\", password=\"changethis\")\n", - "do_italy_client = italy_node.login(email=\"info@openmined.org\", password=\"changethis\")" + "do_canada_client = canada_server.login(\n", + " email=\"info@openmined.org\", password=\"changethis\"\n", + ")\n", + "do_italy_client = italy_server.login(email=\"info@openmined.org\", password=\"changethis\")" ] }, { @@ -203,7 +207,7 @@ "outputs": [], "source": [ "# syft absolute\n", - "from syft.service.project.project import check_route_reachability" + "from syft.service.network.utils import check_route_reachability" ] }, { @@ -228,11 +232,11 @@ "metadata": {}, "outputs": [], "source": [ - "if canada_node.deployment_type.value == \"python\":\n", - " canada_node.land()\n", + "if canada_server.deployment_type.value == \"python\":\n", + " canada_server.land()\n", "\n", - "if italy_node.deployment_type.value == \"python\":\n", - " italy_node.land()" + "if italy_server.deployment_type.value == \"python\":\n", + " italy_server.land()" ] }, { @@ -259,7 +263,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.11.8" + "version": "3.11.7" } }, "nbformat": 4, diff --git a/notebooks/experimental/enclaves/V2/02-ds-submit-project.ipynb b/notebooks/experimental/enclaves/V2/02-ds-submit-project.ipynb index ba6d6fc0147..105b26d2ade 100644 --- a/notebooks/experimental/enclaves/V2/02-ds-submit-project.ipynb +++ b/notebooks/experimental/enclaves/V2/02-ds-submit-project.ipynb @@ -37,7 +37,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "# Log in to the domain nodes as a data scientist" + "# Log in to the domain servers as a data scientist" ] }, { @@ -46,11 +46,11 @@ "metadata": {}, "outputs": [], "source": [ - "# Launch the domain nodes we setup in the previous notebook\n", - "canada_node = sy.orchestra.launch(\n", + "# Launch the domain servers we setup in the previous notebook\n", + "canada_server = sy.orchestra.launch(\n", " name=\"canada-domain\", port=CANADA_DOMAIN_PORT, dev_mode=True\n", ")\n", - "italy_node = sy.orchestra.launch(\n", + "italy_server = sy.orchestra.launch(\n", " name=\"italy-domain\", port=ITALY_DOMAIN_PORT, dev_mode=True\n", ")" ] @@ -61,8 +61,10 @@ "metadata": {}, "outputs": [], "source": [ - "ds_canada_client = canada_node.login(email=\"sheldon@caltech.edu\", password=\"changethis\")\n", - "ds_italy_client = italy_node.login(email=\"sheldon@caltech.edu\", password=\"changethis\")" + "ds_canada_client = canada_server.login(\n", + " email=\"sheldon@caltech.edu\", password=\"changethis\"\n", + ")\n", + "ds_italy_client = italy_server.login(email=\"sheldon@caltech.edu\", password=\"changethis\")" ] }, { @@ -252,11 +254,11 @@ "metadata": {}, "outputs": [], "source": [ - "if canada_node.deployment_type.value == \"python\":\n", - " canada_node.land()\n", + "if canada_server.deployment_type.value == \"python\":\n", + " canada_server.land()\n", "\n", - "if italy_node.deployment_type.value == \"python\":\n", - " italy_node.land()" + "if italy_server.deployment_type.value == \"python\":\n", + " italy_server.land()" ] }, { diff --git a/notebooks/experimental/enclaves/V2/03-do-review-code.ipynb b/notebooks/experimental/enclaves/V2/03-do-review-code.ipynb index 97d88f05b6f..9b4a5dfd1d3 100644 --- a/notebooks/experimental/enclaves/V2/03-do-review-code.ipynb +++ b/notebooks/experimental/enclaves/V2/03-do-review-code.ipynb @@ -44,12 +44,12 @@ "metadata": {}, "outputs": [], "source": [ - "# Launch the domain nodes we setup in the previous notebook\n", - "canada_node = sy.orchestra.launch(\n", + "# Launch the domain servers we setup in the previous notebook\n", + "canada_server = sy.orchestra.launch(\n", " name=\"canada-domain\", port=CANADA_DOMAIN_PORT, dev_mode=True\n", ")\n", "\n", - "italy_node = sy.orchestra.launch(\n", + "italy_server = sy.orchestra.launch(\n", " name=\"italy-domain\", port=ITALY_DOMAIN_PORT, dev_mode=True\n", ")" ] @@ -58,7 +58,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "# Log in to the first domain node as the data owner" + "# Log in to the first domain server as the data owner" ] }, { @@ -67,7 +67,9 @@ "metadata": {}, "outputs": [], "source": [ - "do_canada_client = canada_node.login(email=\"info@openmined.org\", password=\"changethis\")" + "do_canada_client = canada_server.login(\n", + " email=\"info@openmined.org\", password=\"changethis\"\n", + ")" ] }, { @@ -222,7 +224,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "# Approve the code from the other domain node" + "# Approve the code from the other domain server" ] }, { @@ -231,7 +233,7 @@ "metadata": {}, "outputs": [], "source": [ - "do_italy_client = italy_node.login(email=\"info@openmined.org\", password=\"changethis\")" + "do_italy_client = italy_server.login(email=\"info@openmined.org\", password=\"changethis\")" ] }, { @@ -326,11 +328,11 @@ "metadata": {}, "outputs": [], "source": [ - "if canada_node.deployment_type.value == \"python\":\n", - " canada_node.land()\n", + "if canada_server.deployment_type.value == \"python\":\n", + " canada_server.land()\n", "\n", - "if italy_node.deployment_type.value == \"python\":\n", - " italy_node.land()" + "if italy_server.deployment_type.value == \"python\":\n", + " italy_server.land()" ] }, { diff --git a/notebooks/experimental/enclaves/V2/04-ds-execute-code.ipynb b/notebooks/experimental/enclaves/V2/04-ds-execute-code.ipynb index f6337840449..a16665b9cf7 100644 --- a/notebooks/experimental/enclaves/V2/04-ds-execute-code.ipynb +++ b/notebooks/experimental/enclaves/V2/04-ds-execute-code.ipynb @@ -26,7 +26,7 @@ "source": [ "# syft absolute\n", "import syft as sy\n", - "from syft.abstract_node import NodeType\n", + "from syft.abstract_server import ServerType\n", "\n", "CANADA_DOMAIN_PORT = 9081\n", "ITALY_DOMAIN_PORT = 9082\n", @@ -39,16 +39,16 @@ "metadata": {}, "outputs": [], "source": [ - "# Launch the domain nodes we setup in the previous notebook\n", - "canada_node = sy.orchestra.launch(\n", + "# Launch the domain servers we setup in the previous notebook\n", + "canada_server = sy.orchestra.launch(\n", " name=\"canada-domain\", port=CANADA_DOMAIN_PORT, dev_mode=True\n", ")\n", - "italy_node = sy.orchestra.launch(\n", + "italy_server = sy.orchestra.launch(\n", " name=\"italy-domain\", port=ITALY_DOMAIN_PORT, dev_mode=True\n", ")\n", "canada_enclave = sy.orchestra.launch(\n", " name=\"canada-enclave\",\n", - " node_type=NodeType.ENCLAVE,\n", + " server_type=ServerType.ENCLAVE,\n", " port=CANADA_ENCLAVE_PORT,\n", " dev_mode=True,\n", " create_producer=True,\n", @@ -63,8 +63,10 @@ "metadata": {}, "outputs": [], "source": [ - "ds_canada_client = canada_node.login(email=\"sheldon@caltech.edu\", password=\"changethis\")\n", - "ds_italy_client = italy_node.login(email=\"sheldon@caltech.edu\", password=\"changethis\")" + "ds_canada_client = canada_server.login(\n", + " email=\"sheldon@caltech.edu\", password=\"changethis\"\n", + ")\n", + "ds_italy_client = italy_server.login(email=\"sheldon@caltech.edu\", password=\"changethis\")" ] }, { @@ -156,11 +158,11 @@ "metadata": {}, "outputs": [], "source": [ - "if canada_node.deployment_type.value == \"python\":\n", - " canada_node.land()\n", + "if canada_server.deployment_type.value == \"python\":\n", + " canada_server.land()\n", "\n", - "if italy_node.deployment_type.value == \"python\":\n", - " italy_node.land()\n", + "if italy_server.deployment_type.value == \"python\":\n", + " italy_server.land()\n", "\n", "if canada_enclave.deployment_type.value == \"python\":\n", " canada_enclave.land()" diff --git a/notebooks/experimental/enclaves/V2/V2-Enclave-Single-Notebook.ipynb b/notebooks/experimental/enclaves/V2/V2-Enclave-Single-Notebook.ipynb index 90ebdd79de6..1244ecdcb16 100644 --- a/notebooks/experimental/enclaves/V2/V2-Enclave-Single-Notebook.ipynb +++ b/notebooks/experimental/enclaves/V2/V2-Enclave-Single-Notebook.ipynb @@ -12,24 +12,24 @@ "\n", "# syft absolute\n", "import syft as sy\n", - "from syft.abstract_node import NodeType\n", + "from syft.abstract_server import ServerType\n", "from syft.service.code.user_code import UserCodeStatus\n", - "from syft.service.network.node_peer import NodePeer\n", - "from syft.service.network.routes import HTTPNodeRoute\n", + "from syft.service.network.routes import HTTPServerRoute\n", + "from syft.service.network.server_peer import ServerPeer\n", + "from syft.service.network.utils import check_route_reachability\n", "from syft.service.project.project import ProjectCode\n", - "from syft.service.project.project import check_route_reachability\n", "from syft.service.response import SyftSuccess\n", "from syft.types.uid import UID\n", "\n", - "CANADA_DOMAIN_PORT = 9081\n", - "ITALY_DOMAIN_PORT = 9082\n", + "CANADA_DATASITE_PORT = 9081\n", + "ITALY_DATASITE_PORT = 9082\n", "CANADA_ENCLAVE_HOST = None\n", "CANADA_ENCLAVE_PORT = 9083\n", "\n", "CPU_ENCLAVE = \"13.90.101.161\"\n", "GPU_ENCLAVE = \"172.176.204.136\"\n", "#! Uncomment below line to run the code on a pre-provisioned remote Enclave\n", - "CANADA_ENCLAVE_HOST = CPU_ENCLAVE" + "CANADA_ENCLAVE_HOST = None" ] }, { @@ -37,9 +37,9 @@ "id": "1", "metadata": {}, "source": [ - "# Launch nodes\n", + "# Launch servers\n", "\n", - "We will begin by launching two domain nodes and an enclave node." + "We will begin by launching two datasite servers and an enclave server." ] }, { @@ -48,10 +48,10 @@ "metadata": {}, "source": [ "### For Kubernetes\n", - "To run the nodes in kubernetes, run the below commands and wait till the cluster becomes ready.\n", + "To run the servers in kubernetes, run the below commands and wait till the cluster becomes ready.\n", "```bash\n", - "CLUSTER_NAME=canada-domain CLUSTER_HTTP_PORT=9081 tox -e dev.k8s.launch.domain\n", - "CLUSTER_NAME=italy-domain CLUSTER_HTTP_PORT=9082 tox -e dev.k8s.launch.domain\n", + "CLUSTER_NAME=canada-datasite CLUSTER_HTTP_PORT=9081 tox -e dev.k8s.launch.datasite\n", + "CLUSTER_NAME=italy-datasite CLUSTER_HTTP_PORT=9082 tox -e dev.k8s.launch.datasite\n", "CLUSTER_NAME=canada-enclave CLUSTER_HTTP_PORT=9083 tox -e dev.k8s.launch.enclave\n", "```" ] @@ -63,15 +63,15 @@ "metadata": {}, "outputs": [], "source": [ - "canada_node = sy.orchestra.launch(\n", - " name=\"canada-domain\", port=CANADA_DOMAIN_PORT, dev_mode=True, reset=True\n", + "canada_server = sy.orchestra.launch(\n", + " name=\"canada-datasite\", port=CANADA_DATASITE_PORT, dev_mode=True, reset=True\n", ")\n", - "italy_node = sy.orchestra.launch(\n", - " name=\"italy-domain\", port=ITALY_DOMAIN_PORT, dev_mode=True, reset=True\n", + "italy_server = sy.orchestra.launch(\n", + " name=\"italy-datasite\", port=ITALY_DATASITE_PORT, dev_mode=True, reset=True\n", ")\n", "enclave_kwargs = {\n", " \"name\": \"canada-enclave\",\n", - " \"node_type\": NodeType.ENCLAVE,\n", + " \"server_type\": ServerType.ENCLAVE,\n", " \"port\": CANADA_ENCLAVE_PORT,\n", " \"create_producer\": True,\n", " \"n_consumers\": 3,\n", @@ -91,11 +91,13 @@ "metadata": {}, "outputs": [], "source": [ - "do_canada_client = canada_node.login(email=\"info@openmined.org\", password=\"changethis\")\n", - "do_italy_client = italy_node.login(email=\"info@openmined.org\", password=\"changethis\")\n", + "do_canada_client = canada_server.login(\n", + " email=\"info@openmined.org\", password=\"changethis\"\n", + ")\n", + "do_italy_client = italy_server.login(email=\"info@openmined.org\", password=\"changethis\")\n", "\n", - "assert do_canada_client.metadata.node_type == NodeType.DOMAIN\n", - "assert do_italy_client.metadata.node_type == NodeType.DOMAIN" + "assert do_canada_client.metadata.server_type == ServerType.DATASITE\n", + "assert do_italy_client.metadata.server_type == ServerType.DATASITE" ] }, { @@ -103,7 +105,7 @@ "id": "5", "metadata": {}, "source": [ - "# Upload datasets to both domains" + "# Upload datasets to both datasites" ] }, { @@ -162,7 +164,7 @@ "id": "9", "metadata": {}, "source": [ - "# Create account for data scientist on both the domains" + "# Create account for data scientist on both the datasites" ] }, { @@ -187,7 +189,7 @@ "id": "11", "metadata": {}, "source": [ - "# Register the enclave with Canada domain" + "# Register the enclave with Canada datasite" ] }, { @@ -197,7 +199,7 @@ "metadata": {}, "outputs": [], "source": [ - "route = HTTPNodeRoute(host_or_ip=canada_enclave.url, port=canada_enclave.port)\n", + "route = HTTPServerRoute(host_or_ip=canada_enclave.url, port=canada_enclave.port)\n", "do_canada_client.enclaves.add(route=route)" ] }, @@ -227,8 +229,10 @@ "metadata": {}, "outputs": [], "source": [ - "ds_canada_client = canada_node.login(email=\"sheldon@caltech.edu\", password=\"changethis\")\n", - "ds_italy_client = italy_node.login(email=\"sheldon@caltech.edu\", password=\"changethis\")" + "ds_canada_client = canada_server.login(\n", + " email=\"sheldon@caltech.edu\", password=\"changethis\"\n", + ")\n", + "ds_italy_client = italy_server.login(email=\"sheldon@caltech.edu\", password=\"changethis\")" ] }, { @@ -246,8 +250,8 @@ "metadata": {}, "outputs": [], "source": [ - "canada_node_peer = NodePeer.from_client(ds_canada_client)\n", - "canada_node_peer" + "canada_server_peer = ServerPeer.from_client(ds_canada_client)\n", + "canada_server_peer" ] }, { @@ -257,8 +261,8 @@ "metadata": {}, "outputs": [], "source": [ - "italy_node_peer = NodePeer.from_client(ds_italy_client)\n", - "italy_node_peer" + "italy_server_peer = ServerPeer.from_client(ds_italy_client)\n", + "italy_server_peer" ] }, { @@ -268,7 +272,7 @@ "metadata": {}, "outputs": [], "source": [ - "canada_conn_req = ds_canada_client.api.services.network.add_peer(italy_node_peer)\n", + "canada_conn_req = ds_canada_client.api.services.network.add_peer(italy_server_peer)\n", "canada_conn_req" ] }, @@ -279,7 +283,7 @@ "metadata": {}, "outputs": [], "source": [ - "italy_conn_req = ds_italy_client.api.services.network.add_peer(canada_node_peer)\n", + "italy_conn_req = ds_italy_client.api.services.network.add_peer(canada_server_peer)\n", "italy_conn_req" ] }, @@ -318,7 +322,7 @@ "id": "24", "metadata": {}, "source": [ - "# Find datasets across multiple domains" + "# Find datasets across multiple datasites" ] }, { @@ -643,7 +647,7 @@ "id": "48", "metadata": {}, "source": [ - "# Cleanup local domain servers" + "# Cleanup local datasite servers" ] }, { @@ -653,15 +657,23 @@ "metadata": {}, "outputs": [], "source": [ - "if canada_node.deployment_type.value == \"python\":\n", - " canada_node.land()\n", + "if canada_server.deployment_type.value == \"python\":\n", + " canada_server.land()\n", "\n", - "if italy_node.deployment_type.value == \"python\":\n", - " italy_node.land()\n", + "if italy_server.deployment_type.value == \"python\":\n", + " italy_server.land()\n", "\n", "if canada_enclave.deployment_type.value == \"python\":\n", " canada_enclave.land()" ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "0b8747c8-5e4c-4115-aa8f-74c8a14c4461", + "metadata": {}, + "outputs": [], + "source": [] } ], "metadata": { diff --git a/notebooks/experimental/enclaves/V3/V3-Enclave-Model-HostingSingle-Notebook.ipynb b/notebooks/experimental/enclaves/V3/V3-Enclave-Model-HostingSingle-Notebook.ipynb index 608e3be0eb5..ea89c0c934c 100644 --- a/notebooks/experimental/enclaves/V3/V3-Enclave-Model-HostingSingle-Notebook.ipynb +++ b/notebooks/experimental/enclaves/V3/V3-Enclave-Model-HostingSingle-Notebook.ipynb @@ -1,5 +1,15 @@ { "cells": [ + { + "cell_type": "code", + "execution_count": null, + "id": "c42f9351-1ed8-4b71-9625-1ea8b9db6ee6", + "metadata": {}, + "outputs": [], + "source": [ + "# pip install transformers==4.41.2 torch==2.3.1" + ] + }, { "cell_type": "code", "execution_count": null, @@ -11,21 +21,46 @@ "\n", "# syft absolute\n", "import syft as sy\n", - "from syft.abstract_node import NodeType\n", + "from syft.abstract_server import ServerType\n", "from syft.service.code.user_code import UserCodeStatus\n", - "from syft.service.network.node_peer import NodePeer\n", - "from syft.service.network.routes import HTTPNodeRoute\n", "from syft.service.project.project import ProjectCode\n", - "from syft.service.project.project import check_route_reachability\n", "from syft.service.response import SyftSuccess\n", - "from syft.types.uid import UID\n", + "from syft.types.uid import UID" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "a59a76cd-f307-4e53-9b09-1c6898dcb7fb", + "metadata": {}, + "outputs": [], + "source": [ + "# This noteboooks works with\n", + "# 1. in-memory workers\n", + "# 2. Local Kubernetes Clusters\n", + "# 3. Remote Kubernetes Cluster\n", + "\n", + "# *_DEPLOY_TO = \n", + "# value can be python or remote\n", + "\n", + "GLOBAL_DEPLOY_TO = \"python\" # Set this is to \"remote\" for kubernetes testing\n", + "\n", + "# CANADA_DEPLOYMENT_SETTINGS - Datasite\n", + "CANADA_DATASITE_DEPLOY_TO = GLOBAL_DEPLOY_TO\n", + "CANADA_DATASITE_HOST = \"localhost\"\n", + "CANADA_DATASITE_PORT = 9081\n", + "CANADA_DATASITE_PASSWORD = \"changethis\"\n", + "\n", + "# ITALY_DEPLOYMENT_SETTINGS - Datasite\n", + "ITALY_DATASITE_DEPLOY_TO = GLOBAL_DEPLOY_TO\n", + "ITALY_DATASITE_HOST = \"localhost\"\n", + "ITALY_DATASITE_PORT = 9082\n", + "ITALY_DATASITE_PASSWORD = \"changethis\"\n", "\n", - "CANADA_DOMAIN_PORT = 9081\n", - "ITALY_DOMAIN_PORT = 9082\n", - "CANADA_ENCLAVE_HOST = None\n", - "CANADA_ENCLAVE_PORT = 9083\n", - "#! Uncomment below line to run the code on the remote SEV-SNP CPU Enclave\n", - "# CANADA_ENCLAVE_HOST = \"13.90.101.161\"" + "# CANADA_DEPLOYMENT_SETTINGS - Enclave\n", + "CANADA_ENCLAVE_DEPLOY_TO = GLOBAL_DEPLOY_TO\n", + "CANADA_ENCLAVE_HOST = \"localhost\"\n", + "CANADA_ENCLAVE_PORT = 9083" ] }, { @@ -33,9 +68,9 @@ "id": "1", "metadata": {}, "source": [ - "# Launch nodes\n", + "# Launch servers\n", "\n", - "We will begin by launching two domain nodes and an enclave node." + "We will begin by launching two domain servers and an enclave server." ] }, { @@ -44,11 +79,20 @@ "metadata": {}, "source": [ "### For Kubernetes\n", - "To run the nodes in kubernetes, run the below commands and wait till the cluster becomes ready.\n", + "To run the servers in kubernetes, run the below commands and wait till the cluster becomes ready.\n", "```bash\n", - "CLUSTER_NAME=canada-domain CLUSTER_HTTP_PORT=9081 tox -e dev.k8s.launch.domain\n", - "CLUSTER_NAME=italy-domain CLUSTER_HTTP_PORT=9082 tox -e dev.k8s.launch.domain\n", + "CLUSTER_NAME=canada-server CLUSTER_HTTP_PORT=9081 tox -e dev.k8s.launch.datasite\n", + "CLUSTER_NAME=italy-server CLUSTER_HTTP_PORT=9082 tox -e dev.k8s.launch.datasite\n", "CLUSTER_NAME=canada-enclave CLUSTER_HTTP_PORT=9083 tox -e dev.k8s.launch.enclave\n", + "```\n", + "\n", + "To reset the servers invoke this at the root of the pysyft directory\n", + "\n", + "This is also be done in parallel shells for faster reset\n", + "```bash\n", + "./scripts/reset_k8s.sh k3d-canada-server syft\n", + "./scripts/reset_k8s.sh k3d-italy-server syft\n", + "./scripts/reset_k8s.sh k3d-canada-enclave syft\n", "```" ] }, @@ -56,28 +100,39 @@ "cell_type": "code", "execution_count": null, "id": "3", - "metadata": {}, + "metadata": { + "scrolled": true + }, "outputs": [], "source": [ - "canada_node = sy.orchestra.launch(\n", - " name=\"canada-domain\", port=CANADA_DOMAIN_PORT, dev_mode=True, reset=True\n", + "canada_server = sy.orchestra.launch(\n", + " name=\"canada-datasite\",\n", + " dev_mode=True,\n", + " reset=True,\n", + " deploy_to=CANADA_DATASITE_DEPLOY_TO,\n", + " host=CANADA_DATASITE_HOST,\n", + " port=CANADA_DATASITE_PORT,\n", ")\n", - "italy_node = sy.orchestra.launch(\n", - " name=\"italy-domain\", port=ITALY_DOMAIN_PORT, dev_mode=True, reset=True\n", + "italy_server = sy.orchestra.launch(\n", + " name=\"italy-datasite\",\n", + " dev_mode=True,\n", + " reset=True,\n", + " deploy_to=ITALY_DATASITE_DEPLOY_TO,\n", + " host=ITALY_DATASITE_HOST,\n", + " port=ITALY_DATASITE_PORT,\n", ")\n", - "enclave_kwargs = {\n", - " \"name\": \"canada-enclave\",\n", - " \"node_type\": NodeType.ENCLAVE,\n", - " \"port\": CANADA_ENCLAVE_PORT,\n", - " \"create_producer\": True,\n", - " \"n_consumers\": 3,\n", - " \"dev_mode\": True,\n", - " \"reset\": True,\n", - "}\n", - "if CANADA_ENCLAVE_HOST:\n", - " enclave_kwargs.update({\"deploy_to\": \"remote\", \"host\": CANADA_ENCLAVE_HOST})\n", "\n", - "canada_enclave = sy.orchestra.launch(**enclave_kwargs)" + "canada_enclave = sy.orchestra.launch(\n", + " name=\"canada-enclave\",\n", + " server_type=ServerType.ENCLAVE,\n", + " dev_mode=True,\n", + " reset=True,\n", + " create_producer=True,\n", + " n_consumers=3,\n", + " deploy_to=CANADA_ENCLAVE_DEPLOY_TO,\n", + " host=CANADA_ENCLAVE_HOST,\n", + " port=CANADA_ENCLAVE_PORT,\n", + ")" ] }, { @@ -87,11 +142,15 @@ "metadata": {}, "outputs": [], "source": [ - "do_canada_client = canada_node.login(email=\"info@openmined.org\", password=\"changethis\")\n", - "do_italy_client = italy_node.login(email=\"info@openmined.org\", password=\"changethis\")\n", + "do_canada_client = canada_server.login(\n", + " email=\"info@openmined.org\", password=CANADA_DATASITE_PASSWORD\n", + ")\n", + "do_italy_client = italy_server.login(\n", + " email=\"info@openmined.org\", password=ITALY_DATASITE_PASSWORD\n", + ")\n", "\n", - "assert do_canada_client.metadata.node_type == NodeType.DOMAIN\n", - "assert do_italy_client.metadata.node_type == NodeType.DOMAIN" + "assert do_canada_client.metadata.server_type == ServerType.DATASITE\n", + "assert do_italy_client.metadata.server_type == ServerType.DATASITE" ] }, { @@ -110,9 +169,8 @@ "outputs": [], "source": [ "@sy.syft_model(name=\"gpt2\")\n", - "class GPT2ModelCls(sy.SyftModelClass):\n", + "class GPT2Model(sy.SyftModelClass):\n", " def __user_init__(self, assets: list) -> None:\n", - " # !TODO: how does we configure the model to use the mock model folder\n", " model_folder = assets[0].model_folder\n", "\n", " # third party\n", @@ -121,6 +179,11 @@ "\n", " self.model = AutoModelForCausalLM.from_pretrained(model_folder)\n", " self.tokenizer = AutoTokenizer.from_pretrained(model_folder)\n", + " self.pad_token_id = (\n", + " self.tokenizer.pad_token_id\n", + " if self.tokenizer.pad_token_id\n", + " else self.tokenizer.eos_token_id\n", + " )\n", "\n", " def inference(self, prompt: str, raw=False, **kwargs) -> str:\n", " input_ids = self.tokenizer(prompt, return_tensors=\"pt\").input_ids\n", @@ -129,6 +192,7 @@ " do_sample=True,\n", " temperature=0.9,\n", " max_length=100,\n", + " pad_token_id=self.pad_token_id,\n", " **kwargs,\n", " )\n", " if raw:\n", @@ -149,13 +213,169 @@ "metadata": {}, "outputs": [], "source": [ - "model = sy.Model(name=\"GPT2\", code=GPT2ModelCls)\n", - "model.set_description(\n", - " \"GPT-2 is a transformers model pretrained on a very large corpus of English data in a self-supervised fashion. \"\n", - " \"This means it was pretrained on the raw texts only, with no humans labelling them in any way \"\n", - " \"(which is why it can use lots of publicly available data) with an automatic process to generate inputs and labels \"\n", - " \" from those texts. More precisely, it was trained to guess the next word in sentences.\"\n", - ")\n", + "model = sy.Model(name=\"GPT2\", code=GPT2Model)\n", + "model_card = \"\"\"\n", + "# GPT-2\n", + "\n", + "Test the whole generation capabilities here: https://transformer.huggingface.co/doc/gpt2-large\n", + "\n", + "Pretrained model on English language using a causal language modeling (CLM) objective. It was introduced in\n", + "[this paper](https://d4mucfpksywv.cloudfront.net/better-language-models/language_models_are_unsupervised_multitask_learners.pdf)\n", + "and first released at [this page](https://openai.com/blog/better-language-models/).\n", + "\n", + "Disclaimer: The team releasing GPT-2 also wrote a\n", + "[model card](https://github.com/openai/gpt-2/blob/master/model_card.md) for their model. Content from this model card\n", + "has been written by the Hugging Face team to complete the information they provided and give specific examples of bias.\n", + "\n", + "## Model description\n", + "\n", + "GPT-2 is a transformers model pretrained on a very large corpus of English data in a self-supervised fashion. This\n", + "means it was pretrained on the raw texts only, with no humans labelling them in any way (which is why it can use lots\n", + "of publicly available data) with an automatic process to generate inputs and labels from those texts. More precisely,\n", + "it was trained to guess the next word in sentences.\n", + "\n", + "More precisely, inputs are sequences of continuous text of a certain length and the targets are the same sequence,\n", + "shifted one token (word or piece of word) to the right. The model uses internally a mask-mechanism to make sure the\n", + "predictions for the token `i` only uses the inputs from `1` to `i` but not the future tokens.\n", + "\n", + "This way, the model learns an inner representation of the English language that can then be used to extract features\n", + "useful for downstream tasks. The model is best at what it was pretrained for however, which is generating texts from a\n", + "prompt.\n", + "\n", + "This is the **smallest** version of GPT-2, with 124M parameters.\n", + "\n", + "**Related Models:** [GPT-Large](https://huggingface.co/gpt2-large), [GPT-Medium](https://huggingface.co/gpt2-medium) and [GPT-XL](https://huggingface.co/gpt2-xl)\n", + "\n", + "## Intended uses & limitations\n", + "\n", + "You can use the raw model for text generation or fine-tune it to a downstream task. See the\n", + "[model hub](https://huggingface.co/models?filter=gpt2) to look for fine-tuned versions on a task that interests you.\n", + "\n", + "### How to use\n", + "\n", + "You can use this model directly with a pipeline for text generation. Since the generation relies on some randomness, we\n", + "set a seed for reproducibility:\n", + "\n", + "```python\n", + ">>> from transformers import pipeline, set_seed\n", + ">>> generator = pipeline('text-generation', model='gpt2')\n", + ">>> set_seed(42)\n", + ">>> generator(\"Hello, I'm a language model,\", max_length=30, num_return_sequences=5)\n", + "\n", + "[{'generated_text': \"Hello, I'm a language model, a language for thinking, a language for expressing thoughts.\"},\n", + " {'generated_text': \"Hello, I'm a language model, a compiler, a compiler library, I just want to know how I build this kind of stuff. I don\"},\n", + " {'generated_text': \"Hello, I'm a language model, and also have more than a few of your own, but I understand that they're going to need some help\"},\n", + " {'generated_text': \"Hello, I'm a language model, a system model. I want to know my language so that it might be more interesting, more user-friendly\"},\n", + " {'generated_text': 'Hello, I\\'m a language model, not a language model\"\\n\\nThe concept of \"no-tricks\" comes in handy later with new'}]\n", + "```\n", + "\n", + "Here is how to use this model to get the features of a given text in PyTorch:\n", + "\n", + "```python\n", + "from transformers import GPT2Tokenizer, GPT2Model\n", + "tokenizer = GPT2Tokenizer.from_pretrained('gpt2')\n", + "model = GPT2Model.from_pretrained('gpt2')\n", + "text = \"Replace me by any text you'd like.\"\n", + "encoded_input = tokenizer(text, return_tensors='pt')\n", + "output = model(**encoded_input)\n", + "```\n", + "\n", + "and in TensorFlow:\n", + "\n", + "```python\n", + "from transformers import GPT2Tokenizer, TFGPT2Model\n", + "tokenizer = GPT2Tokenizer.from_pretrained('gpt2')\n", + "model = TFGPT2Model.from_pretrained('gpt2')\n", + "text = \"Replace me by any text you'd like.\"\n", + "encoded_input = tokenizer(text, return_tensors='tf')\n", + "output = model(encoded_input)\n", + "```\n", + "\n", + "### Limitations and bias\n", + "\n", + "The training data used for this model has not been released as a dataset one can browse. We know it contains a lot of\n", + "unfiltered content from the internet, which is far from neutral. As the openAI team themselves point out in their\n", + "[model card](https://github.com/openai/gpt-2/blob/master/model_card.md#out-of-scope-use-cases):\n", + "\n", + "> Because large-scale language models like GPT-2 do not distinguish fact from fiction, we don’t support use-cases\n", + "> that require the generated text to be true.\n", + ">\n", + "> Additionally, language models like GPT-2 reflect the biases inherent to the systems they were trained on, so we do\n", + "> not recommend that they be deployed into systems that interact with humans > unless the deployers first carry out a\n", + "> study of biases relevant to the intended use-case. We found no statistically significant difference in gender, race,\n", + "> and religious bias probes between 774M and 1.5B, implying all versions of GPT-2 should be approached with similar\n", + "> levels of caution around use cases that are sensitive to biases around human attributes.\n", + "\n", + "Here's an example of how the model can have biased predictions:\n", + "\n", + "```python\n", + ">>> from transformers import pipeline, set_seed\n", + ">>> generator = pipeline('text-generation', model='gpt2')\n", + ">>> set_seed(42)\n", + ">>> generator(\"The White man worked as a\", max_length=10, num_return_sequences=5)\n", + "\n", + "[{'generated_text': 'The White man worked as a mannequin for'},\n", + " {'generated_text': 'The White man worked as a maniser of the'},\n", + " {'generated_text': 'The White man worked as a bus conductor by day'},\n", + " {'generated_text': 'The White man worked as a plumber at the'},\n", + " {'generated_text': 'The White man worked as a journalist. He had'}]\n", + "\n", + ">>> set_seed(42)\n", + ">>> generator(\"The Black man worked as a\", max_length=10, num_return_sequences=5)\n", + "\n", + "[{'generated_text': 'The Black man worked as a man at a restaurant'},\n", + " {'generated_text': 'The Black man worked as a car salesman in a'},\n", + " {'generated_text': 'The Black man worked as a police sergeant at the'},\n", + " {'generated_text': 'The Black man worked as a man-eating monster'},\n", + " {'generated_text': 'The Black man worked as a slave, and was'}]\n", + "```\n", + "\n", + "This bias will also affect all fine-tuned versions of this model.\n", + "\n", + "## Training data\n", + "\n", + "The OpenAI team wanted to train this model on a corpus as large as possible. To build it, they scraped all the web\n", + "pages from outbound links on Reddit which received at least 3 karma. Note that all Wikipedia pages were removed from\n", + "this dataset, so the model was not trained on any part of Wikipedia. The resulting dataset (called WebText) weights\n", + "40GB of texts but has not been publicly released. You can find a list of the top 1,000 domains present in WebText\n", + "[here](https://github.com/openai/gpt-2/blob/master/domains.txt).\n", + "\n", + "## Training procedure\n", + "\n", + "### Preprocessing\n", + "\n", + "The texts are tokenized using a byte-level version of Byte Pair Encoding (BPE) (for unicode characters) and a\n", + "vocabulary size of 50,257. The inputs are sequences of 1024 consecutive tokens.\n", + "\n", + "The larger model was trained on 256 cloud TPU v3 cores. The training duration was not disclosed, nor were the exact\n", + "details of training.\n", + "\n", + "## Evaluation results\n", + "\n", + "The model achieves the following results without any fine-tuning (zero-shot):\n", + "\n", + "| Dataset | LAMBADA | LAMBADA | CBT-CN | CBT-NE | WikiText2 | PTB | enwiki8 | text8 | WikiText103 | 1BW |\n", + "|:--------:|:-------:|:-------:|:------:|:------:|:---------:|:------:|:-------:|:------:|:-----------:|:-----:|\n", + "| (metric) | (PPL) | (ACC) | (ACC) | (ACC) | (PPL) | (PPL) | (BPB) | (BPC) | (PPL) | (PPL) |\n", + "| | 35.13 | 45.99 | 87.65 | 83.4 | 29.41 | 65.85 | 1.16 | 1,17 | 37.50 | 75.20 |\n", + "\n", + "\n", + "### BibTeX entry and citation info\n", + "\n", + "```bibtex\n", + "@article{radford2019language,\n", + " title={Language Models are Unsupervised Multitask Learners},\n", + " author={Radford, Alec and Wu, Jeff and Child, Rewon and Luan, David and Amodei, Dario and Sutskever, Ilya},\n", + " year={2019}\n", + "}\n", + "```\n", + "\n", + "\n", + "\t\n", + "\n", + "\"\"\"\n", + "\n", + "model.set_card(model_card)\n", "model.add_citation(\n", " \"Radford, Alec and Wu, Jeff and Child, Rewon and Luan, David and Amodei, Dario and Sutskever, Ilya\"\n", ")\n", @@ -172,7 +392,9 @@ "cell_type": "code", "execution_count": null, "id": "8", - "metadata": {}, + "metadata": { + "scrolled": true + }, "outputs": [], "source": [ "# third party\n", @@ -252,8 +474,7 @@ "metadata": {}, "outputs": [], "source": [ - "model.add_asset(asset)\n", - "model" + "model.add_asset(asset)" ] }, { @@ -271,9 +492,40 @@ "metadata": {}, "outputs": [], "source": [ + "%%time\n", "do_canada_client.upload_model(model)" ] }, + { + "cell_type": "code", + "execution_count": null, + "id": "833a3601-f4b9-4063-a7db-eaa438d668b9", + "metadata": {}, + "outputs": [], + "source": [ + "do_canada_client.models[0]" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "d00e5284-da20-4179-b252-57b0b2dfc0e7", + "metadata": {}, + "outputs": [], + "source": [ + "do_canada_client.models[0].model_code" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "8800ba20-7136-412e-85f2-e5199c2d0d09", + "metadata": {}, + "outputs": [], + "source": [ + "do_canada_client.models[0].assets[0]" + ] + }, { "cell_type": "markdown", "id": "17", @@ -302,8 +554,14 @@ " \"My name is José, I\",\n", "]\n", "evals_asset.set_obj(real_asset)\n", - "# TODO: set a proper mock dataset\n", - "evals_asset.set_mock(real_asset, mock_is_real=True)\n", + "\n", + "mock_asset = [\n", + " \"My name is Aisha, I\",\n", + " \"My name is David, I\",\n", + " \"My name is Lina, I\",\n", + " \"My name is Omar, I\",\n", + "]\n", + "evals_asset.set_mock(mock_asset, mock_is_real=False)\n", "\n", "\n", "evals_dataset.add_asset(evals_asset)\n", @@ -332,6 +590,36 @@ "assert len(do_italy_client.datasets.get_all()) == 1" ] }, + { + "cell_type": "code", + "execution_count": null, + "id": "4d5099bb-374d-4c57-97b9-472aad9f6c43", + "metadata": {}, + "outputs": [], + "source": [ + "do_italy_client.datasets" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "7cf5a53a-caab-4ff1-ab87-d46e8fdcb124", + "metadata": {}, + "outputs": [], + "source": [ + "asset = do_italy_client.datasets[0].assets[0]" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "4c7949a7-899b-46be-b0da-0f7c492a7756", + "metadata": {}, + "outputs": [], + "source": [ + "asset" + ] + }, { "cell_type": "markdown", "id": "21", @@ -372,8 +660,7 @@ "metadata": {}, "outputs": [], "source": [ - "route = HTTPNodeRoute(host_or_ip=canada_enclave.url, port=canada_enclave.port)\n", - "do_canada_client.enclaves.add(route=route)" + "do_canada_client.enclaves.add(url=f\"http://{CANADA_ENCLAVE_HOST}:{CANADA_ENCLAVE_PORT}\")" ] }, { @@ -402,8 +689,10 @@ "metadata": {}, "outputs": [], "source": [ - "ds_canada_client = canada_node.login(email=\"sheldon@caltech.edu\", password=\"changethis\")\n", - "ds_italy_client = italy_node.login(email=\"sheldon@caltech.edu\", password=\"changethis\")" + "ds_canada_client = canada_server.login(\n", + " email=\"sheldon@caltech.edu\", password=\"changethis\"\n", + ")\n", + "ds_italy_client = italy_server.login(email=\"sheldon@caltech.edu\", password=\"changethis\")" ] }, { @@ -417,75 +706,71 @@ { "cell_type": "code", "execution_count": null, - "id": "29", + "id": "34", "metadata": {}, "outputs": [], "source": [ - "canada_node_peer = NodePeer.from_client(ds_canada_client)\n", - "canada_node_peer" + "sy.exchange_routes(clients=[ds_canada_client, ds_italy_client])" ] }, { "cell_type": "code", "execution_count": null, - "id": "30", + "id": "751b26f4-17fb-4c38-bae6-4649b38d4647", "metadata": {}, "outputs": [], "source": [ - "italy_node_peer = NodePeer.from_client(ds_italy_client)\n", - "italy_node_peer" + "do_canada_client.requests" ] }, { "cell_type": "code", "execution_count": null, - "id": "31", + "id": "1bbe6974-f50c-46b6-9366-93f4d4c58a5e", "metadata": {}, "outputs": [], "source": [ - "canada_conn_req = ds_canada_client.api.services.network.add_peer(italy_node_peer)\n", - "canada_conn_req" + "do_canada_client.requests[0].approve()" ] }, { "cell_type": "code", "execution_count": null, - "id": "32", + "id": "11adb5a7-1676-4ca1-a67f-ba1086d48f79", "metadata": {}, "outputs": [], "source": [ - "italy_conn_req = ds_italy_client.api.services.network.add_peer(canada_node_peer)\n", - "italy_conn_req" + "do_italy_client.requests" ] }, { "cell_type": "code", "execution_count": null, - "id": "33", + "id": "3910e334-59b4-47b1-a845-1b4aa1c49b20", "metadata": {}, "outputs": [], "source": [ - "do_canada_client.requests[-1].approve()" + "do_italy_client.requests[0].approve()" ] }, { "cell_type": "code", "execution_count": null, - "id": "34", + "id": "35", "metadata": {}, "outputs": [], "source": [ - "do_italy_client.requests[-1].approve()" + "sy.exchange_routes([ds_canada_client, ds_italy_client])" ] }, { "cell_type": "code", "execution_count": null, - "id": "35", + "id": "f2d63d9e-12b7-4871-bb6a-81d0e8ad6f93", "metadata": {}, "outputs": [], "source": [ - "check_route_reachability([ds_canada_client, ds_italy_client])" + "# sy.check_route_reachability([ds_canada_client,ds_italy_client])" ] }, { @@ -549,7 +834,6 @@ "\n", "\n", "@sy.syft_function(\n", - " # evals=gpt2_gender_bias_evals.assets[\"name-prompts\"],\n", " input_policy=sy.ExactMatch(\n", " evals=gpt2_gender_bias_evals_asset,\n", " model=gpt2_model,\n", @@ -557,20 +841,12 @@ " output_policy=sy.SingleExecutionExactOutput(),\n", " runtime_policy=sy.RunOnEnclave(\n", " provider=enclave,\n", - " # image=sy.DockerWorkerConfig(dockerfile=dockerfile_str),\n", - " # workers_num=4,\n", - " # worker_pool_name=worker_pool_name,\n", - " # timeout=300,\n", - " # result_persistence={\"storage_path\": \"/data/enclave\", \"retention_policy\": \"30d\"}\n", " ),\n", ")\n", "def run_inference(evals, model):\n", - " print(\"Entered User Code model\", model, type(model))\n", - " print(\"Entered User Code evals\", evals, type(evals))\n", " results = []\n", " for prompt in evals:\n", " result = model.inference(prompt)\n", - " print(f\"processing prompt - {prompt}\")\n", " results.append(result)\n", "\n", " return results" @@ -583,13 +859,13 @@ "metadata": {}, "outputs": [], "source": [ - "# Check result of execution on mock data\n", - "# TODO: Re-enable mock flow\n", - "# mock_result = compute_census_matches(\n", - "# canada_census_data=canada_census_data.mock,\n", - "# italy_census_data=italy_census_data.mock,\n", - "# )\n", - "# mock_result" + "# Mock Model Flow\n", + "mock_result = run_inference(\n", + " model=gpt2_model.mock,\n", + " evals=gpt2_gender_bias_evals_asset.mock,\n", + " syft_no_server=True,\n", + ")\n", + "mock_result" ] }, { @@ -730,6 +1006,16 @@ "code = project.code[0]" ] }, + { + "cell_type": "code", + "execution_count": null, + "id": "3c15a6a4-ba0b-463e-bc45-1f785a904d37", + "metadata": {}, + "outputs": [], + "source": [ + "project.id" + ] + }, { "cell_type": "code", "execution_count": null, @@ -740,6 +1026,16 @@ "code.setup_enclave()" ] }, + { + "cell_type": "code", + "execution_count": null, + "id": "5d1168cf-79e1-4e6d-b00b-25416728d7a2", + "metadata": {}, + "outputs": [], + "source": [ + "code.view_attestation_report(attestation_type=\"CPU\")" + ] + }, { "cell_type": "code", "execution_count": null, @@ -747,6 +1043,7 @@ "metadata": {}, "outputs": [], "source": [ + "%%time\n", "code.request_asset_transfer()" ] }, @@ -757,6 +1054,7 @@ "metadata": {}, "outputs": [], "source": [ + "%%time\n", "code.request_execution()" ] }, @@ -768,8 +1066,18 @@ "outputs": [], "source": [ "result = code.get_result()\n", - "for res in result:\n", - " print(res)\n", + "result.output" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "bdd42445-9741-4f8b-aa71-2e44c171ac1c", + "metadata": {}, + "outputs": [], + "source": [ + "for o in result.output:\n", + " print(o)\n", " print(\"\\n\\n\")" ] }, @@ -781,10 +1089,10 @@ "outputs": [], "source": [ "# Or you can call all of the above in one line using the following\n", - "result = code.orchestrate_enclave_execution()\n", - "for res in result:\n", - " print(res)\n", - " print(\"\\n\\n\")" + "# result = code.orchestrate_enclave_execution()\n", + "# for res in result.output:\n", + "# print(res)\n", + "# print(\"\\n\\n\")" ] }, { @@ -802,11 +1110,11 @@ "metadata": {}, "outputs": [], "source": [ - "if canada_node.deployment_type.value == \"python\":\n", - " canada_node.land()\n", + "if canada_server.deployment_type.value == \"python\":\n", + " canada_server.land()\n", "\n", - "if italy_node.deployment_type.value == \"python\":\n", - " italy_node.land()\n", + "if italy_server.deployment_type.value == \"python\":\n", + " italy_server.land()\n", "\n", "if canada_enclave.deployment_type.value == \"python\":\n", " canada_enclave.land()" @@ -815,7 +1123,7 @@ { "cell_type": "code", "execution_count": null, - "id": "61", + "id": "f4353bd6-0a69-4b3b-b686-b915a07b9027", "metadata": {}, "outputs": [], "source": [] diff --git a/notebooks/experimental/model-hosting/3. model-hosting-iteration2.ipynb b/notebooks/experimental/model-hosting/3. model-hosting-iteration2.ipynb index ed4c7cf3a72..339899a5c62 100644 --- a/notebooks/experimental/model-hosting/3. model-hosting-iteration2.ipynb +++ b/notebooks/experimental/model-hosting/3. model-hosting-iteration2.ipynb @@ -85,7 +85,7 @@ "output_type": "stream", "text": [ "\n", - "WARNING: private key is based on node name: canada-domain in dev_mode. Don't run this in production.\n" + "WARNING: private key is based on server name: canada-domain in dev_mode. Don't run this in production.\n" ] }, { @@ -108,10 +108,10 @@ { "data": { "text/html": [ - "
SyftInfo:
You have launched a development node at http://0.0.0.0:53752.It is intended only for local use.

" + "
SyftInfo:
You have launched a development server at http://0.0.0.0:53752.It is intended only for local use.

" ], "text/plain": [ - "SyftInfo: You have launched a development node at http://0.0.0.0:53752.It is intended only for local use." + "SyftInfo: You have launched a development server at http://0.0.0.0:53752.It is intended only for local use." ] }, "metadata": {}, @@ -119,8 +119,8 @@ } ], "source": [ - "# Launch the domain nodes we setup in the previous notebook\n", - "canada_node = sy.orchestra.launch(\n", + "# Launch the domain servers we setup in the previous notebook\n", + "canada_server = sy.orchestra.launch(\n", " name=\"canada-domain\", port=\"auto\", dev_mode=True, reset=True\n", ")" ] @@ -157,7 +157,7 @@ } ], "source": [ - "domain_client = canada_node.login(email=\"info@openmined.org\", password=\"changethis\")" + "domain_client = canada_server.login(email=\"info@openmined.org\", password=\"changethis\")" ] }, { @@ -2940,10 +2940,10 @@ { "data": { "text/html": [ - "
SyftSuccess:
Dataset uploaded to 'canada-domain'. To see the datasets uploaded by a client on this node, use command `[your_client].datasets`

" + "
SyftSuccess:
Dataset uploaded to 'canada-domain'. To see the datasets uploaded by a client on this server, use command `[your_client].datasets`

" ], "text/plain": [ - "SyftSuccess: Dataset uploaded to 'canada-domain'. To see the datasets uploaded by a client on this node, use command `[your_client].datasets`" + "SyftSuccess: Dataset uploaded to 'canada-domain'. To see the datasets uploaded by a client on this server, use command `[your_client].datasets`" ] }, "execution_count": 19, @@ -5611,7 +5611,7 @@ } ], "source": [ - "ds_client = canada_node.login(\n", + "ds_client = canada_server.login(\n", " email=\"sheldon@caltech.edu\",\n", " password=\"changethis\",\n", ")" @@ -8279,7 +8279,7 @@ } ], "source": [ - "prompt_action_obj.syft_node_location = ds_client.id\n", + "prompt_action_obj.syft_server_location = ds_client.id\n", "prompt_action_obj.syft_client_verify_key = ds_client.api.signing_key.verify_key\n", "prompt_action_obj._save_to_blob_storage()" ] @@ -8392,16 +8392,16 @@ "name": "stdout", "output_type": "stream", "text": [ - "SyftInfo: Closing the node after time_alive=300 (the default value)\n" + "SyftInfo: Closing the server after time_alive=300 (the default value)\n" ] }, { "data": { "text/html": [ - "
SyftInfo:
You have launched a development node at http://0.0.0.0:None.It is intended only for local use.

" + "
SyftInfo:
You have launched a development server at http://0.0.0.0:None.It is intended only for local use.

" ], "text/plain": [ - "SyftInfo: You have launched a development node at http://0.0.0.0:None.It is intended only for local use." + "SyftInfo: You have launched a development server at http://0.0.0.0:None.It is intended only for local use." ] }, "metadata": {}, @@ -8411,7 +8411,7 @@ "name": "stdout", "output_type": "stream", "text": [ - "Logged into as \n" + "Logged into as \n" ] }, { @@ -8433,8 +8433,8 @@ "INFO: 127.0.0.1:53833 - \"POST /api/v2/api_call HTTP/1.1\" 200 OK\n", "INFO: 127.0.0.1:53835 - \"POST /api/v2/api_call HTTP/1.1\" 200 OK\n", "INFO: 127.0.0.1:53837 - \"POST /api/v2/api_call HTTP/1.1\" 200 OK\n", - "Approving request on change run_eval for domain ephemeral_node_run_eval_1771\n", - "SyftInfo: Landing the ephmeral node...\n" + "Approving request on change run_eval for domain ephemeral_server_run_eval_1771\n", + "SyftInfo: Landing the ephmeral server...\n" ] }, { diff --git a/notebooks/model-hosting.ipynb b/notebooks/model-hosting.ipynb index bf4633cdc87..1a8b26eeb5f 100644 --- a/notebooks/model-hosting.ipynb +++ b/notebooks/model-hosting.ipynb @@ -58,8 +58,8 @@ "metadata": {}, "outputs": [], "source": [ - "# Launch the domain nodes we setup in the previous notebook\n", - "canada_node = sy.orchestra.launch(\n", + "# Launch the domain servers we setup in the previous notebook\n", + "canada_server = sy.orchestra.launch(\n", " name=\"canada-domain\",\n", " port=\"auto\",\n", " dev_mode=True,\n", @@ -74,7 +74,7 @@ "metadata": {}, "outputs": [], "source": [ - "domain_client = canada_node.login(email=\"info@openmined.org\", password=\"changethis\")" + "domain_client = canada_server.login(email=\"info@openmined.org\", password=\"changethis\")" ] }, { @@ -665,9 +665,9 @@ ], "metadata": { "kernelspec": { - "display_name": "Python 3 (ipykernel)", + "display_name": "python311-leJXwuFJ", "language": "python", - "name": "python3" + "name": "python311-lejxwufj" }, "language_info": { "codemirror_mode": { diff --git a/notebooks/scenarios/bigquery/05-custom-api-endpoints.ipynb b/notebooks/scenarios/bigquery/05-custom-api-endpoints.ipynb index 47ff98c6d80..7fe15c502da 100644 --- a/notebooks/scenarios/bigquery/05-custom-api-endpoints.ipynb +++ b/notebooks/scenarios/bigquery/05-custom-api-endpoints.ipynb @@ -1,45 +1,45 @@ { - "cells": [ - { - "cell_type": "code", - "execution_count": null, - "id": "248f62a8-94b6-481d-a32b-03d5c2b67ae0", - "metadata": {}, - "outputs": [], - "source": [ - "# -- adding custom endpoints that use custom pools\n", - "# -- mock vs private\n", - "# -- context.node\n", - "# -- settings and state\n", - "# -- service account via settings or k8s labels\n", - "# -- how to clear state\n", - "# -- how to implement a basic rate limiter using context.user\n", - "# -- creating a big query endpoint with a try catch\n", - "# -- how to delete / replace api endpoints\n", - "# -- testing endpoints and their mock / private versions\n", - "# -- executing a large query and checking blob storage" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "id": "248f62a8-94b6-481d-a32b-03d5c2b67ae0", + "metadata": {}, + "outputs": [], + "source": [ + "# -- adding custom endpoints that use custom pools\n", + "# -- mock vs private\n", + "# -- context.server\n", + "# -- settings and state\n", + "# -- service account via settings or k8s labels\n", + "# -- how to clear state\n", + "# -- how to implement a basic rate limiter using context.user\n", + "# -- creating a big query endpoint with a try catch\n", + "# -- how to delete / replace api endpoints\n", + "# -- testing endpoints and their mock / private versions\n", + "# -- executing a large query and checking blob storage" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.12.2" + } }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.12.2" - } - }, - "nbformat": 4, - "nbformat_minor": 5 + "nbformat": 4, + "nbformat_minor": 5 } diff --git a/notebooks/scenarios/enclave/01-primary-domain-setup.ipynb b/notebooks/scenarios/enclave/01-primary-datasite-setup.ipynb similarity index 99% rename from notebooks/scenarios/enclave/01-primary-domain-setup.ipynb rename to notebooks/scenarios/enclave/01-primary-datasite-setup.ipynb index aa25ebf9310..a2a2df5f6ad 100644 --- a/notebooks/scenarios/enclave/01-primary-domain-setup.ipynb +++ b/notebooks/scenarios/enclave/01-primary-datasite-setup.ipynb @@ -79,7 +79,7 @@ "output_type": "stream", "text": [ "\n", - "WARNING: private key is based on node name: model-owner in dev_mode. Don't run this in production.\n", + "WARNING: private key is based on server name: model-owner in dev_mode. Don't run this in production.\n", "Document Store's SQLite DB path: /var/folders/6_/7xx0tpq16h9cn40mq4w5gjk80000gn/T/syft/ea6fe92e4be5471da3d1a423d39773d0/db/ea6fe92e4be5471da3d1a423d39773d0.sqlite\n", "Action Store's SQLite DB path: /var/folders/6_/7xx0tpq16h9cn40mq4w5gjk80000gn/T/syft/ea6fe92e4be5471da3d1a423d39773d0/db/ea6fe92e4be5471da3d1a423d39773d0.sqlite\n", "INFO: 127.0.0.1:62672 - \"GET /api/v2/metadata HTTP/1.1\" 200 OK\n", @@ -89,10 +89,10 @@ { "data": { "text/html": [ - "
SyftInfo:
You have launched a development node at http://0.0.0.0:8081.It is intended only for local use.

" + "
SyftInfo:
You have launched a development server at http://0.0.0.0:8081.It is intended only for local use.

" ], "text/plain": [ - "SyftInfo: You have launched a development node at http://0.0.0.0:8081.It is intended only for local use." + "SyftInfo: You have launched a development server at http://0.0.0.0:8081.It is intended only for local use." ] }, "metadata": {}, @@ -221,7 +221,7 @@ "name": "stderr", "output_type": "stream", "text": [ - "ERROR:syft.node.routes:Register Error: User already exists with email: madhava@openmined.org. user={'id': , 'email': 'madhava@openmined.org', 'name': 'Ishan', 'role': None, 'password': 'changethis', 'password_verify': 'changethis', 'verify_key': None, 'institution': None, 'website': None, 'created_by': {'signing_key': }, 'mock_execution_permission': False}\n" + "ERROR:syft.server.routes:Register Error: User already exists with email: madhava@openmined.org. user={'id': , 'email': 'madhava@openmined.org', 'name': 'Ishan', 'role': None, 'password': 'changethis', 'password_verify': 'changethis', 'verify_key': None, 'institution': None, 'website': None, 'created_by': {'signing_key': }, 'mock_execution_permission': False}\n" ] }, { @@ -3151,7 +3151,7 @@ "output_type": "stream", "text": [ "\n", - "WARNING: private key is based on node name: model-owner in dev_mode. Don't run this in production.\n", + "WARNING: private key is based on server name: model-owner in dev_mode. Don't run this in production.\n", "Document Store's SQLite DB path: /var/folders/6_/7xx0tpq16h9cn40mq4w5gjk80000gn/T/syft/ea6fe92e4be5471da3d1a423d39773d0/db/ea6fe92e4be5471da3d1a423d39773d0.sqlite\n", "Action Store's SQLite DB path: /var/folders/6_/7xx0tpq16h9cn40mq4w5gjk80000gn/T/syft/ea6fe92e4be5471da3d1a423d39773d0/db/ea6fe92e4be5471da3d1a423d39773d0.sqlite\n", "INFO: 127.0.0.1:62771 - \"POST /api/v2/api_call HTTP/1.1\" 200 OK\n", @@ -3194,10 +3194,10 @@ { "data": { "text/html": [ - "
SyftSuccess:
Model uploaded to 'model-owner'. To see the models uploaded by a client on this node, use command `[your_client].models`

" + "
SyftSuccess:
Model uploaded to 'model-owner'. To see the models uploaded by a client on this server, use command `[your_client].models`

" ], "text/plain": [ - "SyftSuccess: Model uploaded to 'model-owner'. To see the models uploaded by a client on this node, use command `[your_client].models`" + "SyftSuccess: Model uploaded to 'model-owner'. To see the models uploaded by a client on this server, use command `[your_client].models`" ] }, "execution_count": 23, diff --git a/notebooks/scenarios/enclave/02-manual-enclave-setup.ipynb b/notebooks/scenarios/enclave/02-manual-enclave-setup.ipynb index 6d333d8eea5..ebc4d2f8f69 100644 --- a/notebooks/scenarios/enclave/02-manual-enclave-setup.ipynb +++ b/notebooks/scenarios/enclave/02-manual-enclave-setup.ipynb @@ -18,7 +18,7 @@ "metadata": {}, "outputs": [], "source": [ - "# create enclave node on Azure\n", + "# create enclave server on Azure\n", "# skip the need for custom worker pools and images by using transformers package already included" ] }, @@ -63,11 +63,11 @@ ], "source": [ "# syft absolute\n", - "from syft.abstract_node import NodeType\n", + "from syft.abstract_server import NodeType\n", "\n", "azure_h100_enclave = sy.orchestra.launch(\n", " name=\"azure-h100-enclave\",\n", - " node_type=NodeType.ENCLAVE,\n", + " server_type=NodeType.ENCLAVE,\n", " port=8083,\n", " create_producer=True,\n", " n_consumers=3,\n", @@ -152,28 +152,20 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 1, "id": "50d923c1-4d66-4b0b-b580-a138ba9aee2c", "metadata": {}, "outputs": [], "source": [ "# request attestations?" ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "0ef1e299-a95f-4263-8d87-2d98f286ccf3", - "metadata": {}, - "outputs": [], - "source": [] } ], "metadata": { "kernelspec": { - "display_name": "Python 3 (ipykernel)", + "display_name": "python311-leJXwuFJ", "language": "python", - "name": "python3" + "name": "python311-lejxwufj" }, "language_info": { "codemirror_mode": { @@ -185,9 +177,11 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.12.2" - } + "version": "3.11.8" + }, + "nbformat": 4, + "nbformat_minor": 5 }, "nbformat": 4, - "nbformat_minor": 5 + "nbformat_minor": 2 } diff --git a/notebooks/scenarios/enclave/03-secondary-domain-setup.ipynb b/notebooks/scenarios/enclave/03-secondary-datasite-setup.ipynb similarity index 99% rename from notebooks/scenarios/enclave/03-secondary-domain-setup.ipynb rename to notebooks/scenarios/enclave/03-secondary-datasite-setup.ipynb index b424caec0ce..0292f1d622a 100644 --- a/notebooks/scenarios/enclave/03-secondary-domain-setup.ipynb +++ b/notebooks/scenarios/enclave/03-secondary-datasite-setup.ipynb @@ -69,7 +69,7 @@ "output_type": "stream", "text": [ "\n", - "WARNING: private key is based on node name: model-auditor in dev_mode. Don't run this in production.\n", + "WARNING: private key is based on server name: model-auditor in dev_mode. Don't run this in production.\n", "Document Store's SQLite DB path: /var/folders/6_/7xx0tpq16h9cn40mq4w5gjk80000gn/T/syft/d7ffd135e5914ab0b2138fc7d4f72ec0/db/d7ffd135e5914ab0b2138fc7d4f72ec0.sqlite\n", "Action Store's SQLite DB path: /var/folders/6_/7xx0tpq16h9cn40mq4w5gjk80000gn/T/syft/d7ffd135e5914ab0b2138fc7d4f72ec0/db/d7ffd135e5914ab0b2138fc7d4f72ec0.sqlite\n", "INFO: 127.0.0.1:58675 - \"GET /api/v2/metadata HTTP/1.1\" 200 OK\n", @@ -79,10 +79,10 @@ { "data": { "text/html": [ - "
SyftInfo:
You have launched a development node at http://0.0.0.0:8082.It is intended only for local use.

" + "
SyftInfo:
You have launched a development server at http://0.0.0.0:8082.It is intended only for local use.

" ], "text/plain": [ - "SyftInfo: You have launched a development node at http://0.0.0.0:8082.It is intended only for local use." + "SyftInfo: You have launched a development server at http://0.0.0.0:8082.It is intended only for local use." ] }, "metadata": {}, @@ -303,10 +303,10 @@ { "data": { "text/html": [ - "
SyftSuccess:
Dataset uploaded to 'model-auditor'. To see the datasets uploaded by a client on this node, use command `[your_client].datasets`

" + "
SyftSuccess:
Dataset uploaded to 'model-auditor'. To see the datasets uploaded by a client on this server, use command `[your_client].datasets`

" ], "text/plain": [ - "SyftSuccess: Dataset uploaded to 'model-auditor'. To see the datasets uploaded by a client on this node, use command `[your_client].datasets`" + "SyftSuccess: Dataset uploaded to 'model-auditor'. To see the datasets uploaded by a client on this server, use command `[your_client].datasets`" ] }, "execution_count": 11, @@ -5807,7 +5807,7 @@ "outputs": [], "source": [ "# syft absolute\n", - "from syft.service.network.node_peer import NodePeer" + "from syft.service.network.server_peer import NodePeer" ] }, { @@ -5823,7 +5823,7 @@ "class NodePeer:\n", " id: str = d7ffd135e5914ab0b2138fc7d4f72ec0\n", " name: str = \"model-auditor\"\n", - " node_type: str = \"domain\"\n", + " server_type: str = \"domain\"\n", " admin_email: str = \"\"\n", " ping_status: str = None\n", " ping_status_message: str = None\n", @@ -5832,7 +5832,7 @@ "```" ], "text/plain": [ - "syft.service.network.node_peer.NodePeer" + "syft.service.network.server_peer.NodePeer" ] }, "execution_count": 18, @@ -5890,7 +5890,7 @@ "class NodePeer:\n", " id: str = ea6fe92e4be5471da3d1a423d39773d0\n", " name: str = \"model-owner\"\n", - " node_type: str = \"domain\"\n", + " server_type: str = \"domain\"\n", " admin_email: str = \"\"\n", " ping_status: str = None\n", " ping_status_message: str = None\n", @@ -5899,7 +5899,7 @@ "```" ], "text/plain": [ - "syft.service.network.node_peer.NodePeer" + "syft.service.network.server_peer.NodePeer" ] }, "execution_count": 20, @@ -7692,7 +7692,7 @@ "outputs": [], "source": [ "# syft absolute\n", - "from syft.service.project.project import check_route_reachability" + "from syft.service.network.utils import check_route_reachability" ] }, { diff --git a/notebooks/scenarios/enclave/04-data-scientist-join.ipynb b/notebooks/scenarios/enclave/04-data-scientist-join.ipynb index 2b080c82957..9a3abe94e8e 100644 --- a/notebooks/scenarios/enclave/04-data-scientist-join.ipynb +++ b/notebooks/scenarios/enclave/04-data-scientist-join.ipynb @@ -351,7 +351,7 @@ ], "source": [ "# syft absolute\n", - "from syft.service.project.project import check_route_reachability\n", + "from syft.service.network.utils import check_route_reachability\n", "\n", "check_route_reachability([model_owner_ds_client, model_auditor_ds_client])" ] diff --git a/notebooks/scenarios/enclave/05-domains-review.ipynb b/notebooks/scenarios/enclave/05-datasites-review.ipynb similarity index 100% rename from notebooks/scenarios/enclave/05-domains-review.ipynb rename to notebooks/scenarios/enclave/05-datasites-review.ipynb diff --git a/notebooks/scenarios/enclave/06-manual-execution.ipynb b/notebooks/scenarios/enclave/06-manual-execution.ipynb index a37a1114f9f..77d3163be85 100644 --- a/notebooks/scenarios/enclave/06-manual-execution.ipynb +++ b/notebooks/scenarios/enclave/06-manual-execution.ipynb @@ -3516,7 +3516,7 @@ "data": { "text/html": [ "
SyftError:
Exception calling enclave.request_enclave. Traceback (most recent call last):\n",
-       "  File \"/Users/madhavajay/dev/PySyft/packages/syft/src/syft/node/node.py\", line 1249, in handle_api_call_with_unsigned_result\n",
+       "  File \"/Users/madhavajay/dev/PySyft/packages/syft/src/syft/server/server.py\", line 1249, in handle_api_call_with_unsigned_result\n",
        "    result = method(context, *api_call.args, **api_call.kwargs)\n",
        "             ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n",
        "  File \"/Users/madhavajay/dev/PySyft/packages/syft/src/syft/service/service.py\", line 368, in _decorator\n",
@@ -3530,7 +3530,7 @@
       ],
       "text/plain": [
        "SyftError: Exception calling enclave.request_enclave. Traceback (most recent call last):\n",
-       "  File \"/Users/madhavajay/dev/PySyft/packages/syft/src/syft/node/node.py\", line 1249, in handle_api_call_with_unsigned_result\n",
+       "  File \"/Users/madhavajay/dev/PySyft/packages/syft/src/syft/server/server.py\", line 1249, in handle_api_call_with_unsigned_result\n",
        "    result = method(context, *api_call.args, **api_call.kwargs)\n",
        "             ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n",
        "  File \"/Users/madhavajay/dev/PySyft/packages/syft/src/syft/service/service.py\", line 368, in _decorator\n",
diff --git a/notebooks/scenarios/enclave/reset_nodes.ipynb b/notebooks/scenarios/enclave/reset_nodes.ipynb
index 2b401201109..13a93154fcc 100644
--- a/notebooks/scenarios/enclave/reset_nodes.ipynb
+++ b/notebooks/scenarios/enclave/reset_nodes.ipynb
@@ -42,7 +42,7 @@
      "output_type": "stream",
      "text": [
       "\n",
-      "WARNING: private key is based on node name: model-owner in dev_mode. Don't run this in production.\n",
+      "WARNING: private key is based on server name: model-owner in dev_mode. Don't run this in production.\n",
       "Document Store's SQLite DB path: /var/folders/6_/7xx0tpq16h9cn40mq4w5gjk80000gn/T/syft/ea6fe92e4be5471da3d1a423d39773d0/db/ea6fe92e4be5471da3d1a423d39773d0.sqlite\n",
       "Action Store's SQLite DB path: /var/folders/6_/7xx0tpq16h9cn40mq4w5gjk80000gn/T/syft/ea6fe92e4be5471da3d1a423d39773d0/db/ea6fe92e4be5471da3d1a423d39773d0.sqlite\n",
       "INFO:     127.0.0.1:58346 - \"GET /api/v2/metadata HTTP/1.1\" 200 OK\n",
@@ -52,10 +52,10 @@
     {
      "data": {
       "text/html": [
-       "
SyftInfo:
You have launched a development node at http://0.0.0.0:8081.It is intended only for local use.

" + "
SyftInfo:
You have launched a development server at http://0.0.0.0:8081.It is intended only for local use.

" ], "text/plain": [ - "SyftInfo: You have launched a development node at http://0.0.0.0:8081.It is intended only for local use." + "SyftInfo: You have launched a development server at http://0.0.0.0:8081.It is intended only for local use." ] }, "metadata": {}, @@ -134,7 +134,7 @@ "output_type": "stream", "text": [ "\n", - "WARNING: private key is based on node name: azure-h100-enclave in dev_mode. Don't run this in production.\n", + "WARNING: private key is based on server name: azure-h100-enclave in dev_mode. Don't run this in production.\n", "Document Store's SQLite DB path: /var/folders/6_/7xx0tpq16h9cn40mq4w5gjk80000gn/T/syft/3ea33ab41d1349d29d94545fde3b6721/db/3ea33ab41d1349d29d94545fde3b6721.sqlite\n", "Action Store's SQLite DB path: /var/folders/6_/7xx0tpq16h9cn40mq4w5gjk80000gn/T/syft/3ea33ab41d1349d29d94545fde3b6721/db/3ea33ab41d1349d29d94545fde3b6721.sqlite\n", "INFO: 127.0.0.1:58354 - \"GET /api/v2/metadata HTTP/1.1\" 200 OK\n", @@ -144,10 +144,10 @@ { "data": { "text/html": [ - "
SyftInfo:
You have launched a development node at http://0.0.0.0:8083.It is intended only for local use.

" + "
SyftInfo:
You have launched a development server at http://0.0.0.0:8083.It is intended only for local use.

" ], "text/plain": [ - "SyftInfo: You have launched a development node at http://0.0.0.0:8083.It is intended only for local use." + "SyftInfo: You have launched a development server at http://0.0.0.0:8083.It is intended only for local use." ] }, "metadata": {}, @@ -175,11 +175,11 @@ "source": [ "# syft absolute\n", "# use to reset\n", - "from syft.abstract_node import NodeType\n", + "from syft.abstract_server import NodeType\n", "\n", "azure_h100_enclave = sy.orchestra.launch(\n", " name=\"azure-h100-enclave\",\n", - " node_type=NodeType.ENCLAVE,\n", + " server_type=NodeType.ENCLAVE,\n", " port=8083,\n", " create_producer=True,\n", " n_consumers=3,\n", @@ -235,7 +235,7 @@ "output_type": "stream", "text": [ "\n", - "WARNING: private key is based on node name: model-auditor in dev_mode. Don't run this in production.\n", + "WARNING: private key is based on server name: model-auditor in dev_mode. Don't run this in production.\n", "Document Store's SQLite DB path: /var/folders/6_/7xx0tpq16h9cn40mq4w5gjk80000gn/T/syft/d7ffd135e5914ab0b2138fc7d4f72ec0/db/d7ffd135e5914ab0b2138fc7d4f72ec0.sqlite\n", "Action Store's SQLite DB path: /var/folders/6_/7xx0tpq16h9cn40mq4w5gjk80000gn/T/syft/d7ffd135e5914ab0b2138fc7d4f72ec0/db/d7ffd135e5914ab0b2138fc7d4f72ec0.sqlite\n", "INFO: 127.0.0.1:58368 - \"GET /api/v2/metadata HTTP/1.1\" 200 OK\n", @@ -245,10 +245,10 @@ { "data": { "text/html": [ - "
SyftInfo:
You have launched a development node at http://0.0.0.0:8082.It is intended only for local use.

" + "
SyftInfo:
You have launched a development server at http://0.0.0.0:8082.It is intended only for local use.

" ], "text/plain": [ - "SyftInfo: You have launched a development node at http://0.0.0.0:8082.It is intended only for local use." + "SyftInfo: You have launched a development server at http://0.0.0.0:8082.It is intended only for local use." ] }, "metadata": {}, diff --git a/notebooks/scenarios/getting-started/.gitignore b/notebooks/scenarios/getting-started/.gitignore new file mode 100644 index 00000000000..e96c8f2ef1a --- /dev/null +++ b/notebooks/scenarios/getting-started/.gitignore @@ -0,0 +1,2 @@ +secrets.json +*.csv \ No newline at end of file diff --git a/notebooks/scenarios/getting-started/.ruff.toml b/notebooks/scenarios/getting-started/.ruff.toml new file mode 100644 index 00000000000..4df73d6ade3 --- /dev/null +++ b/notebooks/scenarios/getting-started/.ruff.toml @@ -0,0 +1,2 @@ +[lint] +ignore = ["E501"] diff --git a/notebooks/scenarios/getting-started/01-installing-syft.ipynb b/notebooks/scenarios/getting-started/01-installing-syft.ipynb index 1926fbff596..aa3583f1e29 100644 --- a/notebooks/scenarios/getting-started/01-installing-syft.ipynb +++ b/notebooks/scenarios/getting-started/01-installing-syft.ipynb @@ -17,8 +17,52 @@ { "cell_type": "code", "execution_count": null, + "id": "ca568da2-70a3-4e83-b511-1aca1614a4e3", + "metadata": {}, + "outputs": [], + "source": [ + "# !pip install -U syft" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "29409187-9946-4f01-bb37-e13bb6a3ea57", + "metadata": {}, + "outputs": [], + "source": [ + "SYFT_VERSION = \">=0.8.7.b0,<0.9\"\n", + "package_string = f'\"syft{SYFT_VERSION}\"'" + ] + }, + { + "cell_type": "code", + "execution_count": 3, "id": "5ddb5738-99a2-4d93-9f14-cfa3ac06d3c9", "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "✅ The installed version of syft==0.8.7b14 matches the requirement >=0.8.7b0 and the requirement <0.9\n", + "0.8.7-beta.14\n" + ] + } + ], + "source": [ + "# syft absolute\n", + "import syft as sy\n", + "\n", + "sy.requires(SYFT_VERSION)\n", + "print(sy.__version__)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "ecfb8e42-b07f-41b0-821a-58b89d45e642", + "metadata": {}, "outputs": [], "source": [] } diff --git a/notebooks/scenarios/getting-started/02-running-python-server.ipynb b/notebooks/scenarios/getting-started/02-running-python-server.ipynb index 54eca1a1431..e666cf6eb93 100644 --- a/notebooks/scenarios/getting-started/02-running-python-server.ipynb +++ b/notebooks/scenarios/getting-started/02-running-python-server.ipynb @@ -16,9 +16,493 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 1, + "id": "acdedc06-f086-4fc4-94c8-df03f86be75d", + "metadata": {}, + "outputs": [], + "source": [ + "SYFT_VERSION = \">=0.8.7.b0,<0.9\"\n", + "package_string = f'\"syft{SYFT_VERSION}\"'" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "id": "3d357db9-26e3-4401-ab32-1e1befd7af87", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "✅ The installed version of syft==0.8.7b14 matches the requirement >=0.8.7b0 and the requirement <0.9\n" + ] + } + ], + "source": [ + "# syft absolute\n", + "import syft as sy\n", + "\n", + "sy.requires(SYFT_VERSION)" + ] + }, + { + "cell_type": "code", + "execution_count": 11, "id": "c204ff56-a6c6-4031-9ce2-f6b7a875af20", "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Starting test-datasite-1 server on 0.0.0.0:8081\n", + "INFO: 127.0.0.1:53964 - \"GET /api/v2/metadata HTTP/1.1\" 200 OK\n", + " Done.\n" + ] + }, + { + "data": { + "text/html": [ + "
SyftInfo:
You have launched a development server at http://0.0.0.0:8081.It is intended only for local use.

" + ], + "text/plain": [ + "SyftInfo: You have launched a development server at http://0.0.0.0:8081.It is intended only for local use." + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "# run a local webserver\n", + "server = sy.orchestra.launch(name=\"test-datasite-1\", port=8081)" + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "id": "057edd69-b843-4f1e-b777-1e8945abad59", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "INFO: 127.0.0.1:53966 - \"GET /api/v2/metadata HTTP/1.1\" 200 OK\n", + "Logged into as GUEST\n", + "INFO: 127.0.0.1:53966 - \"POST /api/v2/login HTTP/1.1\" 200 OK\n", + "INFO: 127.0.0.1:53966 - \"GET /api/v2/api?verify_key=8142e576c07f91243818f902b3ec8b2f49cd5a7eec2c126e8df11ce1bc36d7c2&communication_protocol=dev HTTP/1.1\" 200 OK\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "INFO: Started server process [15544]\n", + "INFO: Waiting for application startup.\n", + "INFO: Application startup complete.\n", + "ERROR: [Errno 48] error while attempting to bind on address ('0.0.0.0', 8081): address already in use\n", + "INFO: Waiting for application shutdown.\n", + "INFO: Application shutdown complete.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "INFO: 127.0.0.1:53968 - \"POST /api/v2/api_call HTTP/1.1\" 200 OK\n", + "Logged into as \n" + ] + }, + { + "data": { + "text/html": [ + "
SyftWarning:
You are using a default password. Please change the password using `[your_client].me.set_password([new_password])`.

" + ], + "text/plain": [ + "SyftWarning: You are using a default password. Please change the password using `[your_client].me.set_password([new_password])`." + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "client = server.login(email=\"info@openmined.org\", password=\"changethis\")" + ] + }, + { + "cell_type": "code", + "execution_count": 15, + "id": "96599a40-95dc-46f4-8921-27a9defcfde0", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "INFO: 127.0.0.1:53970 - \"POST /api/v2/api_call HTTP/1.1\" 200 OK\n" + ] + }, + { + "data": { + "text/html": [ + "\n", + " \n", + "
\n", + " \"Logo\"\n",\n", + "

Welcome to test-datasite-1

\n", + "
\n", + " URL: http://localhost:8081
\n", + " Server Description: This is the default description for a Datasite Server.
\n", + " Server Type: Datasite
\n", + " Server Side Type:High Side
\n", + " Syft Version: 0.8.7-beta.14
\n", + "\n", + "
\n", + "
\n", + " ⓘ \n", + " This datasite is run by the library PySyft to learn more about how it works visit\n", + " github.com/OpenMined/PySyft.\n", + "
\n", + "

Commands to Get Started

\n", + " \n", + "
    \n", + " \n", + "
  • <your_client>.projects - list projects
  • \n", + "
  • <your_client>.requests - list requests
  • \n", + "
  • <your_client>.users - list users
  • \n", + "\n", + "
\n", + " \n", + "

\n", + " " + ], + "text/plain": [ + "" + ] + }, + "execution_count": 15, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "client" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "034f839d-f656-4deb-92c5-f1e42fe8561f", + "metadata": {}, + "outputs": [], + "source": [ + "# what minimum settings do we need to be able to run jobs?\n", + "# create_producer=True,\n", + "# n_consumers=1," + ] + }, + { + "cell_type": "code", + "execution_count": 16, + "id": "b5a02956-9196-405e-aec8-3c59c1b016e7", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Stopping test-datasite-1\n", + "INFO: 127.0.0.1:53987 - \"GET /api/v2/metadata HTTP/1.1\" 200 OK\n", + "INFO: 127.0.0.1:54156 - \"GET /api/v2/metadata HTTP/1.1\" 200 OK\n", + "INFO: 127.0.0.1:54204 - \"GET /api/v2/metadata HTTP/1.1\" 200 OK\n", + "INFO: 127.0.0.1:54204 - \"POST /api/v2/login HTTP/1.1\" 200 OK\n", + "INFO: 127.0.0.1:54204 - \"GET /api/v2/api?verify_key=8142e576c07f91243818f902b3ec8b2f49cd5a7eec2c126e8df11ce1bc36d7c2&communication_protocol=dev HTTP/1.1\" 200 OK\n", + "INFO: 127.0.0.1:54206 - \"POST /api/v2/api_call HTTP/1.1\" 200 OK\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "ERROR:root:Invalid SMTP credentials timed out\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "INFO: 127.0.0.1:54209 - \"POST /api/v2/api_call HTTP/1.1\" 200 OK\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "ERROR:root:Invalid SMTP credentials timed out\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "INFO: 127.0.0.1:54495 - \"POST /api/v2/api_call HTTP/1.1\" 200 OK\n", + "INFO: 127.0.0.1:54618 - \"POST /api/v2/api_call HTTP/1.1\" 200 OK\n", + "INFO: 127.0.0.1:54621 - \"POST /api/v2/api_call HTTP/1.1\" 200 OK\n", + "INFO: 127.0.0.1:54658 - \"POST /api/v2/api_call HTTP/1.1\" 200 OK\n", + "INFO: 127.0.0.1:54660 - \"POST /api/v2/api_call HTTP/1.1\" 200 OK\n", + "INFO: 127.0.0.1:54667 - \"POST /api/v2/api_call HTTP/1.1\" 200 OK\n", + "INFO: 127.0.0.1:54669 - \"POST /api/v2/api_call HTTP/1.1\" 200 OK\n", + "INFO: 127.0.0.1:54791 - \"POST /api/v2/api_call HTTP/1.1\" 200 OK\n", + "INFO: 127.0.0.1:54808 - \"POST /api/v2/api_call HTTP/1.1\" 200 OK\n", + "INFO: 127.0.0.1:54810 - \"POST /api/v2/api_call HTTP/1.1\" 200 OK\n", + "INFO: 127.0.0.1:54812 - \"POST /api/v2/api_call HTTP/1.1\" 200 OK\n", + "INFO: 127.0.0.1:54815 - \"POST /api/v2/api_call HTTP/1.1\" 200 OK\n", + "INFO: 127.0.0.1:54817 - \"POST /api/v2/api_call HTTP/1.1\" 200 OK\n", + "INFO: 127.0.0.1:54819 - \"POST /api/v2/api_call HTTP/1.1\" 200 OK\n", + "INFO: 127.0.0.1:54823 - \"POST /api/v2/api_call HTTP/1.1\" 200 OK\n", + "INFO: 127.0.0.1:54825 - \"POST /api/v2/api_call HTTP/1.1\" 200 OK\n", + "INFO: 127.0.0.1:54827 - \"POST /api/v2/api_call HTTP/1.1\" 200 OK\n", + "INFO: 127.0.0.1:54829 - \"POST /api/v2/api_call HTTP/1.1\" 200 OK\n", + "INFO: 127.0.0.1:54831 - \"POST /api/v2/api_call HTTP/1.1\" 200 OK\n", + "INFO: 127.0.0.1:54833 - \"POST /api/v2/api_call HTTP/1.1\" 200 OK\n", + "INFO: 127.0.0.1:54835 - \"POST /api/v2/api_call HTTP/1.1\" 200 OK\n", + "INFO: 127.0.0.1:54837 - \"POST /api/v2/api_call HTTP/1.1\" 200 OK\n", + "INFO: 127.0.0.1:54839 - \"POST /api/v2/api_call HTTP/1.1\" 200 OK\n", + "INFO: 127.0.0.1:54856 - \"GET /api/v2/api?verify_key=e117b2b343c7bf0dc2c88a0b2e104e04118143220f0d7ff8ed2f178d1da0c167&communication_protocol=dev HTTP/1.1\" 200 OK\n", + "INFO: 127.0.0.1:54856 - \"GET /api/v2/metadata HTTP/1.1\" 200 OK\n", + "INFO: 127.0.0.1:54858 - \"POST /api/v2/api_call HTTP/1.1\" 200 OK\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "ERROR:syft.server.routes:Register Error: You don't have permission to create an account on the datasite: a. Please contact the Datasite Owner.. user={'id': , 'email': 'a@b.com', 'name': 'a', 'role': None, 'password': 'c', 'password_verify': 'c', 'verify_key': None, 'institution': None, 'website': None, 'created_by': {'signing_key': }, 'mock_execution_permission': False}\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "INFO: 127.0.0.1:54862 - \"POST /api/v2/register HTTP/1.1\" 200 OK\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "ERROR:syft.server.routes:Register Error: You don't have permission to create an account on the datasite: a. Please contact the Datasite Owner.. user={'id': , 'email': 'a@b.com', 'name': 'a', 'role': None, 'password': 'c', 'password_verify': 'c', 'verify_key': None, 'institution': None, 'website': None, 'created_by': {'signing_key': }, 'mock_execution_permission': False}\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "INFO: 127.0.0.1:54864 - \"POST /api/v2/register HTTP/1.1\" 200 OK\n", + "INFO: 127.0.0.1:54868 - \"POST /api/v2/api_call HTTP/1.1\" 200 OK\n", + "INFO: 127.0.0.1:54870 - \"POST /api/v2/api_call HTTP/1.1\" 200 OK\n", + "INFO: 127.0.0.1:54872 - \"POST /api/v2/api_call HTTP/1.1\" 200 OK\n", + "INFO: 127.0.0.1:55056 - \"POST /api/v2/api_call HTTP/1.1\" 200 OK\n", + "INFO: 127.0.0.1:55058 - \"POST /api/v2/api_call HTTP/1.1\" 200 OK\n", + "INFO: 127.0.0.1:55062 - \"POST /api/v2/api_call HTTP/1.1\" 200 OK\n", + "INFO: 127.0.0.1:55127 - \"POST /api/v2/api_call HTTP/1.1\" 200 OK\n", + "INFO: 127.0.0.1:55129 - \"POST /api/v2/api_call HTTP/1.1\" 200 OK\n", + "INFO: 127.0.0.1:55131 - \"POST /api/v2/api_call HTTP/1.1\" 200 OK\n", + "INFO: 127.0.0.1:55134 - \"GET /api/v2/api?verify_key=ca989d05c9ef083b8f56c281b097570223ca1567761515eaeac9e2794d103f0a&communication_protocol=dev HTTP/1.1\" 200 OK\n", + "INFO: 127.0.0.1:55134 - \"GET /api/v2/metadata HTTP/1.1\" 200 OK\n", + "INFO: 127.0.0.1:55136 - \"POST /api/v2/api_call HTTP/1.1\" 200 OK\n", + "INFO: 127.0.0.1:55149 - \"POST /api/v2/api_call HTTP/1.1\" 200 OK\n", + "INFO: 127.0.0.1:55152 - \"POST /api/v2/api_call HTTP/1.1\" 200 OK\n", + "INFO: 127.0.0.1:55155 - \"POST /api/v2/api_call HTTP/1.1\" 200 OK\n", + "INFO: 127.0.0.1:55157 - \"POST /api/v2/api_call HTTP/1.1\" 200 OK\n", + "INFO: 127.0.0.1:55159 - \"POST /api/v2/api_call HTTP/1.1\" 200 OK\n", + "INFO: 127.0.0.1:55177 - \"POST /api/v2/api_call HTTP/1.1\" 200 OK\n", + "INFO: 127.0.0.1:55180 - \"POST /api/v2/api_call HTTP/1.1\" 200 OK\n", + "INFO: 127.0.0.1:55182 - \"POST /api/v2/api_call HTTP/1.1\" 200 OK\n", + "INFO: 127.0.0.1:55184 - \"POST /api/v2/api_call HTTP/1.1\" 200 OK\n", + "INFO: 127.0.0.1:55186 - \"GET /api/v2/api?verify_key=863592d8f09e8410733073b3388c0a9219cda01aa4ab3c602ad35790adaf3a2a&communication_protocol=dev HTTP/1.1\" 200 OK\n", + "INFO: 127.0.0.1:55188 - \"POST /api/v2/api_call HTTP/1.1\" 200 OK\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "ERROR:syft.server.routes:Register Error: You don't have permission to create an account on the datasite: a. Please contact the Datasite Owner.. user={'id': , 'email': 'a@b.com', 'name': 'a', 'role': None, 'password': 'c', 'password_verify': 'c', 'verify_key': None, 'institution': None, 'website': None, 'created_by': {'signing_key': }, 'mock_execution_permission': False}\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "INFO: 127.0.0.1:55186 - \"POST /api/v2/register HTTP/1.1\" 200 OK\n", + "INFO: 127.0.0.1:55193 - \"POST /api/v2/api_call HTTP/1.1\" 200 OK\n", + "INFO: 127.0.0.1:55196 - \"POST /api/v2/api_call HTTP/1.1\" 200 OK\n", + "INFO: 127.0.0.1:55198 - \"POST /api/v2/register HTTP/1.1\" 200 OK\n", + "INFO: 127.0.0.1:55200 - \"POST /api/v2/login HTTP/1.1\" 200 OK\n", + "INFO: 127.0.0.1:55200 - \"GET /api/v2/api?verify_key=3a22a2dbfdfff9241974522007c3aefb3526cb709742bbe80594b39d5026621c&communication_protocol=dev HTTP/1.1\" 200 OK\n", + "INFO: 127.0.0.1:55202 - \"POST /api/v2/api_call HTTP/1.1\" 200 OK\n", + "INFO: 127.0.0.1:55204 - \"POST /api/v2/api_call HTTP/1.1\" 200 OK\n", + "INFO: 127.0.0.1:55479 - \"GET /api/v2/metadata HTTP/1.1\" 200 OK\n", + "INFO: 127.0.0.1:55496 - \"GET /api/v2/metadata HTTP/1.1\" 200 OK\n", + "INFO: 127.0.0.1:55526 - \"GET /api/v2/metadata HTTP/1.1\" 200 OK\n", + "INFO: 127.0.0.1:55612 - \"GET /api/v2/metadata HTTP/1.1\" 200 OK\n", + "INFO: 127.0.0.1:55612 - \"POST /api/v2/login HTTP/1.1\" 200 OK\n", + "INFO: 127.0.0.1:55612 - \"GET /api/v2/api?verify_key=8142e576c07f91243818f902b3ec8b2f49cd5a7eec2c126e8df11ce1bc36d7c2&communication_protocol=dev HTTP/1.1\" 200 OK\n", + "INFO: 127.0.0.1:55614 - \"POST /api/v2/api_call HTTP/1.1\" 200 OK\n", + "INFO: 127.0.0.1:55617 - \"POST /api/v2/api_call HTTP/1.1\" 200 OK\n", + "INFO: 127.0.0.1:55619 - \"POST /api/v2/api_call HTTP/1.1\" 200 OK\n", + "INFO: 127.0.0.1:55621 - \"POST /api/v2/api_call HTTP/1.1\" 200 OK\n", + "INFO: 127.0.0.1:55623 - \"POST /api/v2/api_call HTTP/1.1\" 200 OK\n", + "INFO: 127.0.0.1:55625 - \"POST /api/v2/api_call HTTP/1.1\" 200 OK\n", + "INFO: 127.0.0.1:55627 - \"POST /api/v2/api_call HTTP/1.1\" 200 OK\n", + "INFO: 127.0.0.1:55633 - \"POST /api/v2/api_call HTTP/1.1\" 200 OK\n", + "INFO: 127.0.0.1:55635 - \"POST /api/v2/api_call HTTP/1.1\" 200 OK\n", + "INFO: 127.0.0.1:55637 - \"POST /api/v2/api_call HTTP/1.1\" 200 OK\n", + "INFO: 127.0.0.1:55639 - \"POST /api/v2/api_call HTTP/1.1\" 200 OK\n", + "INFO: 127.0.0.1:55641 - \"POST /api/v2/api_call HTTP/1.1\" 200 OK\n", + "INFO: 127.0.0.1:55643 - \"POST /api/v2/api_call HTTP/1.1\" 200 OK\n", + "INFO: 127.0.0.1:55645 - \"POST /api/v2/api_call HTTP/1.1\" 200 OK\n", + "INFO: 127.0.0.1:56162 - \"POST /api/v2/api_call HTTP/1.1\" 200 OK\n", + "INFO: 127.0.0.1:56164 - \"POST /api/v2/api_call HTTP/1.1\" 200 OK\n", + "INFO: 127.0.0.1:56166 - \"POST /api/v2/api_call HTTP/1.1\" 200 OK\n", + "INFO: 127.0.0.1:56168 - \"POST /api/v2/api_call HTTP/1.1\" 200 OK\n", + "INFO: 127.0.0.1:56170 - \"POST /api/v2/api_call HTTP/1.1\" 200 OK\n", + "INFO: 127.0.0.1:56172 - \"POST /api/v2/api_call HTTP/1.1\" 200 OK\n", + "INFO: 127.0.0.1:56174 - \"POST /api/v2/api_call HTTP/1.1\" 200 OK\n", + "INFO: 127.0.0.1:56177 - \"POST /api/v2/api_call HTTP/1.1\" 200 OK\n", + "INFO: 127.0.0.1:56179 - \"POST /api/v2/api_call HTTP/1.1\" 200 OK\n" + ] + } + ], + "source": [ + "server.land()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "fbfbb1b1-8a3b-4386-9b30-e373ca2cf8d5", + "metadata": {}, "outputs": [], "source": [] } diff --git a/notebooks/scenarios/getting-started/03-configuring-datasite.ipynb b/notebooks/scenarios/getting-started/03-configuring-datasite.ipynb new file mode 100644 index 00000000000..c4925c81311 --- /dev/null +++ b/notebooks/scenarios/getting-started/03-configuring-datasite.ipynb @@ -0,0 +1,1559 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 1, + "id": "9278cdde-2ddc-493f-ab1a-7a224f2c2338", + "metadata": {}, + "outputs": [], + "source": [ + "# -- all the configuration and server settings options\n", + "# -- optional: adding email settings (smtp with sendgrid free account)\n", + "# -- changing passwords (importance of root account)" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "f9cac3bb-9310-4c6e-bf02-08752e9d70df", + "metadata": {}, + "outputs": [], + "source": [ + "SYFT_VERSION = \">=0.8.7.b0,<0.9\"\n", + "package_string = f'\"syft{SYFT_VERSION}\"'" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "d01b99d1-999f-4e85-abf3-c74484df8bef", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "✅ The installed version of syft==0.8.7b14 matches the requirement >=0.8.7b0 and the requirement <0.9\n" + ] + } + ], + "source": [ + "# syft absolute\n", + "import syft as sy\n", + "\n", + "sy.requires(SYFT_VERSION)" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "2045933d-bc99-4483-9ca1-89c6ab166a0f", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Starting test-datasite-1 server on 0.0.0.0:8081\n", + " Done.\n" + ] + }, + { + "data": { + "text/html": [ + "
SyftInfo:
You have launched a development server at http://0.0.0.0:8081.It is intended only for local use.

" + ], + "text/plain": [ + "SyftInfo: You have launched a development server at http://0.0.0.0:8081.It is intended only for local use." + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "# run a local webserver\n", + "server = sy.orchestra.launch(name=\"test-datasite-1\", port=8081)" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "id": "15470a8b-8760-49b5-8ab9-2947a4518075", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Logged into as GUEST\n", + "Logged into as \n" + ] + }, + { + "data": { + "text/html": [ + "
SyftWarning:
You are using a default password. Please change the password using `[your_client].me.set_password([new_password])`.

" + ], + "text/plain": [ + "SyftWarning: You are using a default password. Please change the password using `[your_client].me.set_password([new_password])`." + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "client = server.login(email=\"info@openmined.org\", password=\"changethis\")" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "id": "84cdb3e3-a165-4f7b-9390-56ebb3cf02a4", + "metadata": {}, + "outputs": [], + "source": [ + "# email" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "id": "1a0738a4-7acd-4e00-b659-651bf0af8a4e", + "metadata": {}, + "outputs": [], + "source": [ + "secrets = sy.get_nb_secrets(defaults={\"smtp_token\": \"ADD_POSTMARK_TOKEN\"})" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "id": "488d9fd4-6de9-46e0-9565-3f479af534e2", + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "
SyftError:
Invalid SMTP credentials. Please check your username and password.

" + ], + "text/plain": [ + "SyftError: Invalid SMTP credentials. Please check your username and password." + ] + }, + "execution_count": 10, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "smtp_username, smtp_password, smtp_server, smtp_port = (\n", + " secrets[\"smtp_token\"],\n", + " secrets[\"smtp_token\"],\n", + " \"smtp.postmarkapp.com\",\n", + " \"25\",\n", + ")\n", + "# configure email settings\n", + "client.api.services.settings.enable_notifications(\n", + " email_username=smtp_username,\n", + " email_password=smtp_password,\n", + " email_sender=\"madhava@openmined.org\",\n", + " email_server=smtp_server,\n", + " email_port=smtp_port,\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "id": "e0cbb4dc-3400-418e-ae06-fd3033e042a1", + "metadata": {}, + "outputs": [], + "source": [ + "ns = client.api.services.notifications.settings()" + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "id": "1ec5f362-ecc1-44f6-99ac-ea8771ceb040", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "True" + ] + }, + "execution_count": 14, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "ns.email_enabled" + ] + }, + { + "cell_type": "code", + "execution_count": 15, + "id": "0731f6fb-82bc-4ee3-bce1-9988e38b842a", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "{: syft.service.notifier.notifier.EmailNotifier}" + ] + }, + "execution_count": 15, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "ns.notifiers" + ] + }, + { + "cell_type": "code", + "execution_count": 17, + "id": "ace1e4a6-f1db-4506-8b5a-47d2940b510e", + "metadata": {}, + "outputs": [], + "source": [ + "s = client.api.services.settings.get()" + ] + }, + { + "cell_type": "code", + "execution_count": 18, + "id": "c5556eee-1f9e-4eb0-b82a-ec4f523a3260", + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + " \n", + " \n", + "
\n", + "

Settings

\n", + "

Id: 2baa8fe9deb94b6ead87ea7be4cb230e

\n", + "

Name: test-datasite-1

\n", + "

Organization: OpenMined

\n", + "

Description: This is the default description for a Datasite Server.

\n", + "

Deployed on: 07/15/2024

\n", + "

Signup enabled: False

\n", + "

Notifications enabled: True via email

\n", + "

Admin email: info@openmined.org

\n", + "
\n", + "\n", + " " + ], + "text/markdown": [ + "```python\n", + "class ServerSettings:\n", + " id: str = 2baa8fe9deb94b6ead87ea7be4cb230e\n", + " name: str = \"test-datasite-1\"\n", + " organization: str = \"OpenMined\"\n", + " description: str = \"This is the default description for a Datasite Server.\"\n", + " deployed_on: str = \"07/15/2024\"\n", + " signup_enabled: str = False\n", + " admin_email: str = \"info@openmined.org\"\n", + "\n", + "```" + ], + "text/plain": [ + "syft.service.settings.settings.ServerSettings" + ] + }, + "execution_count": 18, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "s" + ] + }, + { + "cell_type": "code", + "execution_count": 20, + "id": "f81fd6f7-6e61-48d6-bf0c-e22718a109fb", + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "
SyftError:
Arg is `SyftError: Duplication Key Error for syft.service.settings.settings.ServerSettings.\n",
+       "The fields that should be unique are `id`.`. \n",
+       "It must be of type `ServerSettings`, not `SyftError`

" + ], + "text/plain": [ + "SyftError: Arg is `SyftError: Duplication Key Error for syft.service.settings.settings.ServerSettings.\n", + "The fields that should be unique are `id`.`. \n", + "It must be of type `ServerSettings`, not `SyftError`" + ] + }, + "execution_count": 20, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "client.api.services.settings.set(s)" + ] + }, + { + "cell_type": "code", + "execution_count": 42, + "id": "0ad33f86-3576-4dd7-9cac-5ccf7fefb943", + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "
SyftSuccess:
Settings updated successfully. You must call .refresh() to sync your client with the changes.

" + ], + "text/plain": [ + "SyftSuccess: Settings updated successfully. You must call .refresh() to sync your client with the changes." + ] + }, + "execution_count": 42, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "client.api.services.settings.update(\n", + " name=\"a\",\n", + " organization=\"b\",\n", + " description=\"c\",\n", + " admin_email=\"info2@openmined.org\",\n", + " association_request_auto_approval=True,\n", + " eager_execution_enabled=False,\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": 46, + "id": "82eac269-4f51-45a8-8480-8a2e3a382862", + "metadata": {}, + "outputs": [], + "source": [ + "md = \"\"\"\n", + " An h1 header\n", + "============\n", + "\n", + "Paragraphs are separated by a blank line.\n", + "\n", + "2nd paragraph. *Italic*, **bold**, and `monospace`. Itemized lists\n", + "look like:\n", + "\n", + " * this one\n", + " * that one\n", + " * the other one\n", + "\n", + "Note that --- not considering the asterisk --- the actual text\n", + "content starts at 4-columns in.\n", + "\n", + "> Block quotes are\n", + "> written like so.\n", + ">\n", + "> They can span multiple paragraphs,\n", + "> if you like.\n", + "\n", + "Use 3 dashes for an em-dash. Use 2 dashes for ranges (ex., \"it's all\n", + "in chapters 12--14\"). Three dots ... will be converted to an ellipsis.\n", + "Unicode is supported. ☺\n", + "\"\"\"" + ] + }, + { + "cell_type": "code", + "execution_count": 49, + "id": "8389d1a4-027e-4282-ad91-274c34e0011c", + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "\n", + " \n", + " " + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/markdown": [ + "\n", + " An h1 header\n", + "============\n", + "\n", + "Paragraphs are separated by a blank line.\n", + "\n", + "2nd paragraph. *Italic*, **bold**, and `monospace`. Itemized lists\n", + "look like:\n", + "\n", + " * this one\n", + " * that one\n", + " * the other one\n", + "\n", + "Note that --- not considering the asterisk --- the actual text\n", + "content starts at 4-columns in.\n", + "\n", + "> Block quotes are\n", + "> written like so.\n", + ">\n", + "> They can span multiple paragraphs,\n", + "> if you like.\n", + "\n", + "Use 3 dashes for an em-dash. Use 2 dashes for ranges (ex., \"it's all\n", + "in chapters 12--14\"). Three dots ... will be converted to an ellipsis.\n", + "Unicode is supported. ☺\n" + ], + "text/plain": [ + "syft.util.misc_objs.MarkdownDescription" + ] + }, + "execution_count": 49, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "client.api.services.settings.welcome_preview(markdown=md)" + ] + }, + { + "cell_type": "code", + "execution_count": 53, + "id": "53d16bdf-bcfa-4ca5-8b98-5124f480e81b", + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "
SyftSuccess:
Welcome Markdown was successfully updated!

" + ], + "text/plain": [ + "SyftSuccess: Welcome Markdown was successfully updated!" + ] + }, + "execution_count": 53, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "client.api.services.settings.welcome_customize(markdown=md)" + ] + }, + { + "cell_type": "code", + "execution_count": 54, + "id": "d267ccf1-9cb0-4b02-80fe-2608d6c55549", + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "\n", + " \n", + " " + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/markdown": [ + "\n", + " An h1 header\n", + "============\n", + "\n", + "Paragraphs are separated by a blank line.\n", + "\n", + "2nd paragraph. *Italic*, **bold**, and `monospace`. Itemized lists\n", + "look like:\n", + "\n", + " * this one\n", + " * that one\n", + " * the other one\n", + "\n", + "Note that --- not considering the asterisk --- the actual text\n", + "content starts at 4-columns in.\n", + "\n", + "> Block quotes are\n", + "> written like so.\n", + ">\n", + "> They can span multiple paragraphs,\n", + "> if you like.\n", + "\n", + "Use 3 dashes for an em-dash. Use 2 dashes for ranges (ex., \"it's all\n", + "in chapters 12--14\"). Three dots ... will be converted to an ellipsis.\n", + "Unicode is supported. ☺\n" + ], + "text/plain": [ + "syft.util.misc_objs.MarkdownDescription" + ] + }, + "execution_count": 54, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "client.api.services.settings.welcome_show()" + ] + }, + { + "cell_type": "code", + "execution_count": 55, + "id": "bf5318ef-0aa5-4bb5-b366-3f5fc06f00b6", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Logged into as GUEST\n" + ] + }, + { + "data": { + "text/html": [ + "
An h1 header\n",
+       "
\n", + "

============

\n", + "

Paragraphs are separated by a blank line.

\n", + "

2nd paragraph. Italic, bold, and monospace. Itemized lists\n", + "look like:

\n", + "
    \n", + "
  • this one
  • \n", + "
  • that one
  • \n", + "
  • the other one
  • \n", + "
\n", + "

Note that --- not considering the asterisk --- the actual text\n", + "content starts at 4-columns in.

\n", + "
\n", + "

Block quotes are\n", + "written like so.

\n", + "

They can span multiple paragraphs,\n", + "if you like.

\n", + "
\n", + "

Use 3 dashes for an em-dash. Use 2 dashes for ranges (ex., \"it's all\n", + "in chapters 12--14\"). Three dots ... will be converted to an ellipsis.\n", + "Unicode is supported. ☺

" + ], + "text/plain": [ + "" + ] + }, + "execution_count": 55, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "guest_client = client.login_as_guest()\n", + "guest_client" + ] + }, + { + "cell_type": "code", + "execution_count": 56, + "id": "1abfe0af-9566-4f78-892e-0b7397ec081c", + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "
SyftError:
You don't have permission to create an account on the datasite: a. Please contact the Datasite Owner.

" + ], + "text/plain": [ + "SyftError: You don't have permission to create an account on the datasite: a. Please contact the Datasite Owner." + ] + }, + "execution_count": 56, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "guest_client.register(name=\"a\", email=\"a@b.com\", password=\"c\", password_verify=\"c\")" + ] + }, + { + "cell_type": "code", + "execution_count": 58, + "id": "a19d34bd-0033-4c90-9ef7-c88fe4d87dad", + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "
SyftSuccess:
Registration feature successfully enabled

" + ], + "text/plain": [ + "SyftSuccess: Registration feature successfully enabled" + ] + }, + "execution_count": 58, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "client.api.services.settings.allow_guest_signup(enable=True)" + ] + }, + { + "cell_type": "code", + "execution_count": 59, + "id": "bd7e84e7-8bd5-42cc-b5d0-86efbd6a56eb", + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "
SyftSuccess:
User 'a' successfully registered!

" + ], + "text/plain": [ + "SyftSuccess: User 'a' successfully registered!" + ] + }, + "execution_count": 59, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "guest_client.register(name=\"a\", email=\"a@b.com\", password=\"c\", password_verify=\"c\")" + ] + }, + { + "cell_type": "code", + "execution_count": 60, + "id": "1270cfa4-c771-4e79-8744-6a8276db153d", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Logged into as \n" + ] + }, + { + "data": { + "text/html": [ + "
An h1 header\n",
+       "
\n", + "

============

\n", + "

Paragraphs are separated by a blank line.

\n", + "

2nd paragraph. Italic, bold, and monospace. Itemized lists\n", + "look like:

\n", + "
    \n", + "
  • this one
  • \n", + "
  • that one
  • \n", + "
  • the other one
  • \n", + "
\n", + "

Note that --- not considering the asterisk --- the actual text\n", + "content starts at 4-columns in.

\n", + "
\n", + "

Block quotes are\n", + "written like so.

\n", + "

They can span multiple paragraphs,\n", + "if you like.

\n", + "
\n", + "

Use 3 dashes for an em-dash. Use 2 dashes for ranges (ex., \"it's all\n", + "in chapters 12--14\"). Three dots ... will be converted to an ellipsis.\n", + "Unicode is supported. ☺

" + ], + "text/plain": [ + "" + ] + }, + "execution_count": 60, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "guest_client.login(email=\"a@b.com\", password=\"c\")" + ] + }, + { + "cell_type": "code", + "execution_count": 61, + "id": "d8453c72-6095-44c5-82aa-6f11bb71599e", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Stopping test-datasite-1\n" + ] + } + ], + "source": [ + "server.land()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "b3571ba3-709a-4d5b-8e8e-346bf840a19e", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.5" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/notebooks/scenarios/getting-started/03-configuring-domain.ipynb b/notebooks/scenarios/getting-started/03-configuring-domain.ipynb deleted file mode 100644 index b152a87d872..00000000000 --- a/notebooks/scenarios/getting-started/03-configuring-domain.ipynb +++ /dev/null @@ -1,37 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": null, - "id": "9278cdde-2ddc-493f-ab1a-7a224f2c2338", - "metadata": {}, - "outputs": [], - "source": [ - "# -- all the configuration and node settings options\n", - "# -- optional: adding email settings (smtp with sendgrid free account)\n", - "# -- changing passwords (importance of root account)" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.12.2" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/notebooks/scenarios/getting-started/04-uploading-data.ipynb b/notebooks/scenarios/getting-started/04-uploading-data.ipynb index 1ba607547bb..d6e0dc4ff22 100644 --- a/notebooks/scenarios/getting-started/04-uploading-data.ipynb +++ b/notebooks/scenarios/getting-started/04-uploading-data.ipynb @@ -17,9 +17,8300 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 2, + "id": "845fd5a5-03aa-44fb-b6aa-532936ffc941", + "metadata": {}, + "outputs": [], + "source": [ + "SYFT_VERSION = \">=0.8.7.b0,<0.9\"\n", + "package_string = f'\"syft{SYFT_VERSION}\"'" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "43db561b-b80c-447a-ae85-cc37496a9965", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "✅ The installed version of syft==0.8.7b14 matches the requirement >=0.8.7b0 and the requirement <0.9\n" + ] + } + ], + "source": [ + "# syft absolute\n", + "import syft as sy\n", + "\n", + "sy.requires(SYFT_VERSION)" + ] + }, + { + "cell_type": "code", + "execution_count": 4, "id": "150862d6-0395-4fbb-ad19-9bdae91e33fa", "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Starting test-datasite-1 server on 0.0.0.0:8081\n", + " Done.\n" + ] + }, + { + "data": { + "text/html": [ + "
SyftInfo:
You have launched a development server at http://0.0.0.0:8081.It is intended only for local use.

" + ], + "text/plain": [ + "SyftInfo: You have launched a development server at http://0.0.0.0:8081.It is intended only for local use." + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "# run a local webserver\n", + "server = sy.orchestra.launch(name=\"test-datasite-1\", port=8081)" + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "id": "86e27ecb-f661-4fe4-a939-da060d001be2", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Logged into as GUEST\n", + "Logged into as \n" + ] + }, + { + "data": { + "text/html": [ + "
SyftWarning:
You are using a default password. Please change the password using `[your_client].me.set_password([new_password])`.

" + ], + "text/plain": [ + "SyftWarning: You are using a default password. Please change the password using `[your_client].me.set_password([new_password])`." + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "client = server.login(email=\"info@openmined.org\", password=\"changethis\")" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "id": "e2eeed1f-41d7-4e17-84db-e7897242bcec", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + " % Total % Received % Xferd Average Speed Time Time Time Current\n", + " Dload Upload Total Spent Left Speed\n", + "100 157M 100 157M 0 0 2122k 0 0:01:16 0:01:16 --:--:-- 2137k48k 0 0 1694k 0 0:01:35 0:00:05 0:01:30 1921k 0 0:01:40 0:00:10 0:01:30 1516k 0:01:15 2333k 0 0 2257k 0 0:01:11 0:01:01 0:00:10 917k 0 2150k 0 0:01:15 0:01:08 0:00:07 1455k\n" + ] + } + ], + "source": [ + "# stdlib\n", + "import os\n", + "\n", + "if not os.path.exists(\"ages_dataset.csv\"):\n", + " !curl -O https://openminedblob.blob.core.windows.net/csvs/ages_dataset.csv" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "id": "c25ffa3b-6415-4b63-b1ce-7ed3365eeeeb", + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
IdNameShort descriptionGenderCountryOccupationBirth yearDeath yearManner of deathAge of deathAssociated CountriesAssociated Country Coordinates (Lat/Lon)Associated Country Life Expectancy
0Q23George Washington1st president of the United States (1732–1799)MaleUnited States of America; Kingdom of Great Bri...Politician17321799.0natural causes67.0['United Kingdom', 'United States'][(55.378051, -3.435973), (37.09024, -95.712891)][81.3, 78.5]
1Q42Douglas AdamsEnglish writer and humoristMaleUnited KingdomArtist19522001.0natural causes49.0['United Kingdom'][(55.378051, -3.435973)][81.3]
2Q91Abraham Lincoln16th president of the United States (1809-1865)MaleUnited States of AmericaPolitician18091865.0homicide56.0['United States'][(37.09024, -95.712891)][78.5]
5Q260Jean-François ChampollionFrench classical scholarMaleKingdom of France; First French EmpireEgyptologist17901832.0natural causes42.0['France'][(46.227638, 2.213749)][82.5]
7Q296Claude MonetFrench impressionist painter (1840-1926)MaleFranceArtist18401926.0natural causes86.0['France'][(46.227638, 2.213749)][82.5]
\n", + "
" + ], + "text/plain": [ + " Id Name \\\n", + "0 Q23 George Washington \n", + "1 Q42 Douglas Adams \n", + "2 Q91 Abraham Lincoln \n", + "5 Q260 Jean-François Champollion \n", + "7 Q296 Claude Monet \n", + "\n", + " Short description Gender \\\n", + "0 1st president of the United States (1732–1799) Male \n", + "1 English writer and humorist Male \n", + "2 16th president of the United States (1809-1865) Male \n", + "5 French classical scholar Male \n", + "7 French impressionist painter (1840-1926) Male \n", + "\n", + " Country Occupation \\\n", + "0 United States of America; Kingdom of Great Bri... Politician \n", + "1 United Kingdom Artist \n", + "2 United States of America Politician \n", + "5 Kingdom of France; First French Empire Egyptologist \n", + "7 France Artist \n", + "\n", + " Birth year Death year Manner of death Age of death \\\n", + "0 1732 1799.0 natural causes 67.0 \n", + "1 1952 2001.0 natural causes 49.0 \n", + "2 1809 1865.0 homicide 56.0 \n", + "5 1790 1832.0 natural causes 42.0 \n", + "7 1840 1926.0 natural causes 86.0 \n", + "\n", + " Associated Countries \\\n", + "0 ['United Kingdom', 'United States'] \n", + "1 ['United Kingdom'] \n", + "2 ['United States'] \n", + "5 ['France'] \n", + "7 ['France'] \n", + "\n", + " Associated Country Coordinates (Lat/Lon) \\\n", + "0 [(55.378051, -3.435973), (37.09024, -95.712891)] \n", + "1 [(55.378051, -3.435973)] \n", + "2 [(37.09024, -95.712891)] \n", + "5 [(46.227638, 2.213749)] \n", + "7 [(46.227638, 2.213749)] \n", + "\n", + " Associated Country Life Expectancy \n", + "0 [81.3, 78.5] \n", + "1 [81.3] \n", + "2 [78.5] \n", + "5 [82.5] \n", + "7 [82.5] " + ] + }, + "execution_count": 6, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "# third party\n", + "import pandas as pd\n", + "\n", + "# syft absolute\n", + "import syft as sy\n", + "\n", + "age_df = pd.read_csv(\"ages_dataset.csv\")\n", + "age_df = age_df.dropna(how=\"any\")\n", + "age_df.head()" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "id": "b0fdc5b3-2cc5-4084-8904-525079b7fe00", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + " % Total % Received % Xferd Average Speed Time Time Time Current\n", + " Dload Upload Total Spent Left Speed\n", + "100 8217k 100 8217k 0 0 1501k 0 0:00:05 0:00:05 --:--:-- 1671k\n" + ] + } + ], + "source": [ + "# stdlib\n", + "# TODO: also move to dataset repo\n", + "import os\n", + "\n", + "if not os.path.exists(\"ages_mock_dataset.csv\"):\n", + " !curl -O https://openminedblob.blob.core.windows.net/csvs/ages_mock_dataset.csv" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "id": "0e130662-73f4-43a4-afb6-c6ecc107c5a7", + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
IdGenderAge of deathAssociated CountriesAssociated Country Life ExpectancyManner of deathNameShort descriptionOccupationDeath yearBirth yearCountryAssociated Country Coordinates (Lat/Lon)
0Q19723Gender 153.0['United States'][78.5]homicideNorma FisherMagazine truth stop whose group through despite.Corporate treasurer1989.01936Not AvailableNot Available
1Q20057Gender 151.0['United Kingdom'][81.3]natural causesBrandon LloydTotal financial role together range line beyon...Chief Financial Officer2018.01967Not AvailableNot Available
2Q8791Gender 184.0['Sweden'][82.5]natural causesMichelle GloverPartner stock four. Region as true develop sou...Speech and language therapist2000.01916Not AvailableNot Available
3Q30567Gender 164.0['Belgium'][81.6]natural causesWillie GoldenFeeling fact by four. Data son natural explain...Financial controller1989.01925Not AvailableNot Available
4Q14013Gender 188.0['United Kingdom'][81.3]suicideRoberto JohnsonAttorney quickly candidate change although bag...Sound technician, broadcasting/film/video2016.01928Not AvailableNot Available
\n", + "
" + ], + "text/plain": [ + " Id Gender Age of death Associated Countries \\\n", + "0 Q19723 Gender 1 53.0 ['United States'] \n", + "1 Q20057 Gender 1 51.0 ['United Kingdom'] \n", + "2 Q8791 Gender 1 84.0 ['Sweden'] \n", + "3 Q30567 Gender 1 64.0 ['Belgium'] \n", + "4 Q14013 Gender 1 88.0 ['United Kingdom'] \n", + "\n", + " Associated Country Life Expectancy Manner of death Name \\\n", + "0 [78.5] homicide Norma Fisher \n", + "1 [81.3] natural causes Brandon Lloyd \n", + "2 [82.5] natural causes Michelle Glover \n", + "3 [81.6] natural causes Willie Golden \n", + "4 [81.3] suicide Roberto Johnson \n", + "\n", + " Short description \\\n", + "0 Magazine truth stop whose group through despite. \n", + "1 Total financial role together range line beyon... \n", + "2 Partner stock four. Region as true develop sou... \n", + "3 Feeling fact by four. Data son natural explain... \n", + "4 Attorney quickly candidate change although bag... \n", + "\n", + " Occupation Death year Birth year \\\n", + "0 Corporate treasurer 1989.0 1936 \n", + "1 Chief Financial Officer 2018.0 1967 \n", + "2 Speech and language therapist 2000.0 1916 \n", + "3 Financial controller 1989.0 1925 \n", + "4 Sound technician, broadcasting/film/video 2016.0 1928 \n", + "\n", + " Country Associated Country Coordinates (Lat/Lon) \n", + "0 Not Available Not Available \n", + "1 Not Available Not Available \n", + "2 Not Available Not Available \n", + "3 Not Available Not Available \n", + "4 Not Available Not Available " + ] + }, + "execution_count": 8, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "age_mock_df = pd.read_csv(\"ages_mock_dataset.csv\")\n", + "age_mock_df = age_mock_df.dropna(how=\"any\")\n", + "age_mock_df.head()" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "id": "0bbc6868-d66c-4a96-ad39-fb340e7037b3", + "metadata": {}, + "outputs": [], + "source": [ + "# How an asset for low side and high-side would be defined:\n", + "main_contributor = sy.Contributor(\n", + " name=\"Jeffrey Salazar\", role=\"Dataset Creator\", email=\"jsala@ailab.com\"\n", + ")\n", + "\n", + "asset = sy.Asset(\n", + " name=\"asset_name\",\n", + " data=age_df, # real dataframe\n", + " mock=age_mock_df, # mock dataframe\n", + " contributors=[main_contributor],\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "43474e47-20dd-437a-9b16-324831f692f4", + "metadata": {}, + "outputs": [], + "source": [ + "description_template = \"\"\"### About the dataset\n", + "This extensive dataset provides a rich collection of demographic and life events records for individuals across multiple countries. It covers a wide range of indicators and attributes related to personal information, birth and death events, gender, occupation, and associated countries. The dataset offers valuable insights into population dynamics and various aspects of human life, enabling comprehensive analyses and cross-country comparisons. The dataset is the largest one on notable deceased people and includes individ- uals from a variety of social groups, including but not limited to 107k females, 90k researchers, and 124 non-binary indi- viduals, spread across more than 300 contemporary or histor- ical regions.\n", + "\n", + "### Dataset usage policy\n", + "This dataset is subject to compliance with internal data use and mis-use policies at our organisation. The following rules apply:\n", + "- only aggregate statistics can be released from data computation\n", + "- data subjects should never be identifiable through the data computation outcomes\n", + "- a fixed privacy budget of eps=5 must be preserved by each researcher\n", + "\n", + "### Data collection and pre-processing\n", + "The dataset is based on open data hosted by Wikimedia Foundation.\n", + "\n", + "**Age**\n", + "Whenever possible, age was calculated based on the birth and death year mentioned in the description of the individual.\n", + "\n", + "**Gender**\n", + "Gender was available in the original dataset for 50% of participants. For the remaining, it was added from predictions based on name, country and century in which they lived. (97.51% accuracy and 98.89% F1-score)\n", + "\n", + "**Occupation**\n", + "The occupation was available in the original dataset for 66% of the individuals. For the remaining, it was added from predictions from a multiclass text classificator model. (93.4% accuracy for 84% of the dataset)\n", + "\n", + "More details about the features can be found by reading the paper.\n", + "\n", + "### Key features\n", + "1. **Id**: Unique identifier for each individual.\n", + "2. **Name**: Name of the person.\n", + "3. **Short description**: Brief description or summary of the individual.\n", + "4. **Gender**: Gender/s of the individual.\n", + "5. **Country**: Countries/Kingdoms of residence and/or origin.\n", + "6. **Occupation**: Occupation or profession of the individual.\n", + "7. **Birth year**: Year of birth for the individual.\n", + "8. **Death year**: Year of death for the individual.\n", + "9. **Manner of death**: Details about the circumstances or manner of death.\n", + "10. **Age of death**: Age at the time of death for the individual.\n", + "11. **Associated Countries**: Modern Day Countries associated with the individual.\n", + "12. **Associated Country Coordinates (Lat/Lon)**: Modern Day Latitude and longitude coordinates of the associated countries.\n", + "13. **Associated Country Life Expectancy**: Life expectancy of the associated countries.\n", + "\n", + "### Use cases\n", + "- Analyze demographic trends and birth rates in different countries.\n", + "- Investigate factors affecting life expectancy and mortality rates.\n", + "- Study the relationship between gender and occupation across regions.\n", + "- Explore correlations between age of death and associated country attributes.\n", + "- Examine patterns of migration and associated countries' life expectancy.\n", + "\n", + "\n", + "### Getting started\n", + "\n", + "```\n", + "!curl -O https://openminedblob.blob.core.windows.net/csvs/ages_dataset.csv\n", + "\n", + "age_df = pd.read_csv(\"ages_dataset.csv\")\n", + "```\n", + "\n", + "### Execution environment\n", + "The data is hosted in a remote compute environment with the following specifications:\n", + "- X CPU cores\n", + "- 1 GPU of type Y\n", + "- Z RAM\n", + "- A additional available storage\n", + "\n", + "### Citation\n", + "Annamoradnejad, Issa; Annamoradnejad, Rahimberdi (2022), “Age dataset: A structured general-purpose dataset on life, work, and death of 1.22 million distinguished people”, In Workshop Proceedings of the 16th International AAAI Conference on Web and Social Media (ICWSM), doi: 10.36190/2022.82\n", + "\"\"\"" + ] + }, + { + "cell_type": "code", + "execution_count": 15, + "id": "ebfed140-db5b-4bd1-886a-dabeb52dbe9a", + "metadata": {}, + "outputs": [], + "source": [ + "dataset = sy.Dataset(\n", + " name=\"Dataset name\",\n", + " description=description_template,\n", + " asset_list=[asset],\n", + " contributors=[main_contributor],\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "6901ddcc-b72a-4e79-9071-a47531ff4eba", + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": 16, + "id": "18b4056a-260e-4d61-9333-c2a2d3e50f21", + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Uploading: asset_name: 100%|\u001b[32m████████████████\u001b[0m| 1/1 [00:00<00:00, 1.35it/s]\u001b[0m\n" + ] + }, + { + "data": { + "text/html": [ + "
SyftSuccess:
Dataset uploaded to 'a'. To see the datasets uploaded by a client on this server, use command `[your_client].datasets`

" + ], + "text/plain": [ + "SyftSuccess: Dataset uploaded to 'a'. To see the datasets uploaded by a client on this server, use command `[your_client].datasets`" + ] + }, + "execution_count": 16, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "# Uploading the dataset\n", + "client.upload_dataset(dataset)" + ] + }, + { + "cell_type": "code", + "execution_count": 18, + "id": "0ade7507-77f7-450b-9b59-a34bad501609", + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "
\n", + "\n", + "
\n", + "
\n", + " \n", + "
\n", + "

Dataset Dicttuple

\n", + "
\n", + "
\n", + "
\n", + " \n", + "
\n", + "
\n", + "

Total: 0

\n", + "
\n", + "
\n", + "
\n", + "\n", + "\n", + "\n", + "" + ], + "text/plain": [ + "" + ] + }, + "execution_count": 18, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "client.datasets" + ] + }, + { + "cell_type": "code", + "execution_count": 20, + "id": "f19c0bc1-1466-4002-a23e-1f3afdd84f25", + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + " \n", + " \n", + "
\n", + "

Dataset name

\n", + "

Summary

\n", + " \n", + " \n", + "

Description

\n", + "

About the dataset

\n", + "

This extensive dataset provides a rich collection of demographic and life events records for individuals across multiple countries. It covers a wide range of indicators and attributes related to personal information, birth and death events, gender, occupation, and associated countries. The dataset offers valuable insights into population dynamics and various aspects of human life, enabling comprehensive analyses and cross-country comparisons. The dataset is the largest one on notable deceased people and includes individ- uals from a variety of social groups, including but not limited to 107k females, 90k researchers, and 124 non-binary indi- viduals, spread across more than 300 contemporary or histor- ical regions.

\n", + "

Dataset usage policy

\n", + "

This dataset is subject to compliance with internal data use and mis-use policies at our organisation. The following rules apply:\n", + "- only aggregate statistics can be released from data computation\n", + "- data subjects should never be identifiable through the data computation outcomes\n", + "- a fixed privacy budget of eps=5 must be preserved by each researcher

\n", + "

Data collection and pre-processing

\n", + "

The dataset is based on open data hosted by Wikimedia Foundation.

\n", + "

Age\n", + "Whenever possible, age was calculated based on the birth and death year mentioned in the description of the individual.

\n", + "

Gender\n", + "Gender was available in the original dataset for 50% of participants. For the remaining, it was added from predictions based on name, country and century in which they lived. (97.51% accuracy and 98.89% F1-score)

\n", + "

Occupation\n", + "The occupation was available in the original dataset for 66% of the individuals. For the remaining, it was added from predictions from a multiclass text classificator model. (93.4% accuracy for 84% of the dataset)

\n", + "

More details about the features can be found by reading the paper.

\n", + "

Key features

\n", + "
    \n", + "
  1. Id: Unique identifier for each individual.
  2. \n", + "
  3. Name: Name of the person.
  4. \n", + "
  5. Short description: Brief description or summary of the individual.
  6. \n", + "
  7. Gender: Gender/s of the individual.
  8. \n", + "
  9. Country: Countries/Kingdoms of residence and/or origin.
  10. \n", + "
  11. Occupation: Occupation or profession of the individual.
  12. \n", + "
  13. Birth year: Year of birth for the individual.
  14. \n", + "
  15. Death year: Year of death for the individual.
  16. \n", + "
  17. Manner of death: Details about the circumstances or manner of death.
  18. \n", + "
  19. Age of death: Age at the time of death for the individual.
  20. \n", + "
  21. Associated Countries: Modern Day Countries associated with the individual.
  22. \n", + "
  23. Associated Country Coordinates (Lat/Lon): Modern Day Latitude and longitude coordinates of the associated countries.
  24. \n", + "
  25. Associated Country Life Expectancy: Life expectancy of the associated countries.
  26. \n", + "
\n", + "

Use cases

\n", + "
    \n", + "
  • Analyze demographic trends and birth rates in different countries.
  • \n", + "
  • Investigate factors affecting life expectancy and mortality rates.
  • \n", + "
  • Study the relationship between gender and occupation across regions.
  • \n", + "
  • Explore correlations between age of death and associated country attributes.
  • \n", + "
  • Examine patterns of migration and associated countries' life expectancy.
  • \n", + "
\n", + "

Getting started

\n", + "
!curl -O https://openminedblob.blob.core.windows.net/csvs/ages_dataset.csv\n",
+       "\n",
+       "age_df = pd.read_csv(\"ages_dataset.csv\")\n",
+       "
\n", + "

Execution environment

\n", + "

The data is hosted in a remote compute environment with the following specifications:\n", + "- X CPU cores\n", + "- 1 GPU of type Y\n", + "- Z RAM\n", + "- A additional available storage

\n", + "

Citation

\n", + "

Annamoradnejad, Issa; Annamoradnejad, Rahimberdi (2022), “Age dataset: A structured general-purpose dataset on life, work, and death of 1.22 million distinguished people”, In Workshop Proceedings of the 16th International AAAI Conference on Web and Social Media (ICWSM), doi: 10.36190/2022.82

\n", + " \n", + "

Dataset Details

\n", + "

Uploaded by: Jane Doe (info@openmined.org)

\n", + "

Created on: 2024-07-14 20:58:44

\n", + "

URL:\n", + " None

\n", + "

Contributors:\n", + " To see full details call dataset.contributors.

\n", + "

Assets

\n", + " \n", + "
\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "
\n", + "\n", + "
\n", + "
\n", + " \n", + "
\n", + "

Asset Dicttuple

\n", + "
\n", + "
\n", + "
\n", + " \n", + "
\n", + "
\n", + "

Total: 0

\n", + "
\n", + "
\n", + "
\n", + "\n", + "\n", + "\n", + "" + ], + "text/markdown": [ + "Syft Dataset: Dataset name\n", + "\n", + "Assets:\n", + "\n", + "\tasset_name\n", + "\n", + "Description: \n", + "\n", + "### About the dataset\n", + "This extensive dataset provides a rich collection of demographic and life events records for individuals across multiple countries. It covers a wide range of indicators and attributes related to personal information, birth and death events, gender, occupation, and associated countries. The dataset offers valuable insights into population dynamics and various aspects of human life, enabling comprehensive analyses and cross-country comparisons. The dataset is the largest one on notable deceased people and includes individ- uals from a variety of social groups, including but not limited to 107k females, 90k researchers, and 124 non-binary indi- viduals, spread across more than 300 contemporary or histor- ical regions.\n", + "\n", + "### Dataset usage policy\n", + "This dataset is subject to compliance with internal data use and mis-use policies at our organisation. The following rules apply:\n", + "- only aggregate statistics can be released from data computation\n", + "- data subjects should never be identifiable through the data computation outcomes\n", + "- a fixed privacy budget of eps=5 must be preserved by each researcher\n", + "\n", + "### Data collection and pre-processing\n", + "The dataset is based on open data hosted by Wikimedia Foundation.\n", + "\n", + "**Age**\n", + "Whenever possible, age was calculated based on the birth and death year mentioned in the description of the individual.\n", + "\n", + "**Gender**\n", + "Gender was available in the original dataset for 50% of participants. For the remaining, it was added from predictions based on name, country and century in which they lived. (97.51% accuracy and 98.89% F1-score)\n", + "\n", + "**Occupation**\n", + "The occupation was available in the original dataset for 66% of the individuals. For the remaining, it was added from predictions from a multiclass text classificator model. (93.4% accuracy for 84% of the dataset)\n", + "\n", + "More details about the features can be found by reading the paper.\n", + "\n", + "### Key features\n", + "1. **Id**: Unique identifier for each individual.\n", + "2. **Name**: Name of the person.\n", + "3. **Short description**: Brief description or summary of the individual.\n", + "4. **Gender**: Gender/s of the individual.\n", + "5. **Country**: Countries/Kingdoms of residence and/or origin.\n", + "6. **Occupation**: Occupation or profession of the individual.\n", + "7. **Birth year**: Year of birth for the individual.\n", + "8. **Death year**: Year of death for the individual.\n", + "9. **Manner of death**: Details about the circumstances or manner of death.\n", + "10. **Age of death**: Age at the time of death for the individual.\n", + "11. **Associated Countries**: Modern Day Countries associated with the individual.\n", + "12. **Associated Country Coordinates (Lat/Lon)**: Modern Day Latitude and longitude coordinates of the associated countries.\n", + "13. **Associated Country Life Expectancy**: Life expectancy of the associated countries.\n", + "\n", + "### Use cases\n", + "- Analyze demographic trends and birth rates in different countries.\n", + "- Investigate factors affecting life expectancy and mortality rates.\n", + "- Study the relationship between gender and occupation across regions.\n", + "- Explore correlations between age of death and associated country attributes.\n", + "- Examine patterns of migration and associated countries' life expectancy.\n", + "\n", + "\n", + "### Getting started\n", + "\n", + "```\n", + "!curl -O https://openminedblob.blob.core.windows.net/csvs/ages_dataset.csv\n", + "\n", + "age_df = pd.read_csv(\"ages_dataset.csv\")\n", + "```\n", + "\n", + "### Execution environment\n", + "The data is hosted in a remote compute environment with the following specifications:\n", + "- X CPU cores\n", + "- 1 GPU of type Y\n", + "- Z RAM\n", + "- A additional available storage\n", + "\n", + "### Citation\n", + "Annamoradnejad, Issa; Annamoradnejad, Rahimberdi (2022), “Age dataset: A structured general-purpose dataset on life, work, and death of 1.22 million distinguished people”, In Workshop Proceedings of the 16th International AAAI Conference on Web and Social Media (ICWSM), doi: 10.36190/2022.82\n", + "\n", + "\n" + ], + "text/plain": [ + "syft.service.dataset.dataset.Dataset" + ] + }, + "execution_count": 20, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "d = client.datasets[0]\n", + "d" + ] + }, + { + "cell_type": "code", + "execution_count": 22, + "id": "43e920d4-b400-4551-b4c2-a78b92f7f892", + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + " \n", + " \n", + "\n", + "
\n", + "

asset_name

\n", + "

None

\n", + "

Asset ID: c8b177dd01d74fc4bde31cdcf3bafcc6

\n", + "

Action Object ID: 61d18e2bd00c4f4f8a284a4ddb2acbe0

\n", + "

Uploaded by: Jane Doe (info@openmined.org)

\n", + "

Created on: 2024-07-14 20:58:44

\n", + "

Data:

\n", + "
\n", + "\n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "\n", + "
IdNameShort descriptionGenderCountryOccupationBirth yearDeath yearManner of deathAge of deathAssociated CountriesAssociated Country Coordinates (Lat/Lon)Associated Country Life Expectancy
Loading... (need help?)
\n", + "\n", + "\n", + "
\n", + "\n", + "

Mock Data:

\n", + "
\n", + "\n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "\n", + "
IdGenderAge of deathAssociated CountriesAssociated Country Life ExpectancyManner of deathNameShort descriptionOccupationDeath yearBirth yearCountryAssociated Country Coordinates (Lat/Lon)
Loading... (need help?)
\n", + "\n", + "\n", + "
\n", + "\n", + "
" + ], + "text/markdown": [ + "```python\n", + "Asset: asset_name\n", + "Pointer Id: 61d18e2bd00c4f4f8a284a4ddb2acbe0\n", + "Description: None\n", + "Total Data Subjects: 0\n", + "Shape: (44211, 13)\n", + "Contributors: 2\n", + "\tJane Doe: info@openmined.org\n", + "\tJeffrey Salazar: jsala@ailab.com\n", + "\n", + "```" + ], + "text/plain": [ + "Asset(name='asset_name', server_uid='2baa8fe9deb94b6ead87ea7be4cb230e', action_id='61d18e2bd00c4f4f8a284a4ddb2acbe0')" + ] + }, + "execution_count": 22, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "a = d.assets[0]\n", + "a" + ] + }, + { + "cell_type": "code", + "execution_count": 23, + "id": "86eb6f5c-ac2e-4dee-bac7-dcaa0dd972cf", + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
IdGenderAge of deathAssociated CountriesAssociated Country Life ExpectancyManner of deathNameShort descriptionOccupationDeath yearBirth yearCountryAssociated Country Coordinates (Lat/Lon)
0Q19723Gender 153.0['United States'][78.5]homicideNorma FisherMagazine truth stop whose group through despite.Corporate treasurer1989.01936Not AvailableNot Available
1Q20057Gender 151.0['United Kingdom'][81.3]natural causesBrandon LloydTotal financial role together range line beyon...Chief Financial Officer2018.01967Not AvailableNot Available
2Q8791Gender 184.0['Sweden'][82.5]natural causesMichelle GloverPartner stock four. Region as true develop sou...Speech and language therapist2000.01916Not AvailableNot Available
3Q30567Gender 164.0['Belgium'][81.6]natural causesWillie GoldenFeeling fact by four. Data son natural explain...Financial controller1989.01925Not AvailableNot Available
4Q14013Gender 188.0['United Kingdom'][81.3]suicideRoberto JohnsonAttorney quickly candidate change although bag...Sound technician, broadcasting/film/video2016.01928Not AvailableNot Available
..........................................
44206Q21223Gender 187.0['United States'][78.5]natural causesSteven HillOccur site mean. None imagine social collectio...Television/film/video producer2014.01927Not AvailableNot Available
44207Q18681Gender 175.0['Austria'][81.6]natural causesLaura SmithFive help event as sort. Class training possib...Race relations officer2018.01943Not AvailableNot Available
44208Q34424Gender 156.0['France'][82.5]natural causesDiana JacobsMiddle style capital describe increase. Fly si...Civil Service fast streamer2009.01953Not AvailableNot Available
44209Q33102Gender 175.0['France'][82.5]natural causesLarry FosterWatch size character piece speak moment outsid...Speech and language therapist1982.01907Not AvailableNot Available
44210Q34422Gender 163.0['Poland'][77.6]unnatural deathThomas GonzalesFact thousand week professional.Biochemist, clinical2005.01942Not AvailableNot Available
\n", + "

44211 rows × 13 columns

\n", + "
" + ], + "text/plain": [ + " Id Gender Age of death Associated Countries \\\n", + "0 Q19723 Gender 1 53.0 ['United States'] \n", + "1 Q20057 Gender 1 51.0 ['United Kingdom'] \n", + "2 Q8791 Gender 1 84.0 ['Sweden'] \n", + "3 Q30567 Gender 1 64.0 ['Belgium'] \n", + "4 Q14013 Gender 1 88.0 ['United Kingdom'] \n", + "... ... ... ... ... \n", + "44206 Q21223 Gender 1 87.0 ['United States'] \n", + "44207 Q18681 Gender 1 75.0 ['Austria'] \n", + "44208 Q34424 Gender 1 56.0 ['France'] \n", + "44209 Q33102 Gender 1 75.0 ['France'] \n", + "44210 Q34422 Gender 1 63.0 ['Poland'] \n", + "\n", + " Associated Country Life Expectancy Manner of death Name \\\n", + "0 [78.5] homicide Norma Fisher \n", + "1 [81.3] natural causes Brandon Lloyd \n", + "2 [82.5] natural causes Michelle Glover \n", + "3 [81.6] natural causes Willie Golden \n", + "4 [81.3] suicide Roberto Johnson \n", + "... ... ... ... \n", + "44206 [78.5] natural causes Steven Hill \n", + "44207 [81.6] natural causes Laura Smith \n", + "44208 [82.5] natural causes Diana Jacobs \n", + "44209 [82.5] natural causes Larry Foster \n", + "44210 [77.6] unnatural death Thomas Gonzales \n", + "\n", + " Short description \\\n", + "0 Magazine truth stop whose group through despite. \n", + "1 Total financial role together range line beyon... \n", + "2 Partner stock four. Region as true develop sou... \n", + "3 Feeling fact by four. Data son natural explain... \n", + "4 Attorney quickly candidate change although bag... \n", + "... ... \n", + "44206 Occur site mean. None imagine social collectio... \n", + "44207 Five help event as sort. Class training possib... \n", + "44208 Middle style capital describe increase. Fly si... \n", + "44209 Watch size character piece speak moment outsid... \n", + "44210 Fact thousand week professional. \n", + "\n", + " Occupation Death year Birth year \\\n", + "0 Corporate treasurer 1989.0 1936 \n", + "1 Chief Financial Officer 2018.0 1967 \n", + "2 Speech and language therapist 2000.0 1916 \n", + "3 Financial controller 1989.0 1925 \n", + "4 Sound technician, broadcasting/film/video 2016.0 1928 \n", + "... ... ... ... \n", + "44206 Television/film/video producer 2014.0 1927 \n", + "44207 Race relations officer 2018.0 1943 \n", + "44208 Civil Service fast streamer 2009.0 1953 \n", + "44209 Speech and language therapist 1982.0 1907 \n", + "44210 Biochemist, clinical 2005.0 1942 \n", + "\n", + " Country Associated Country Coordinates (Lat/Lon) \n", + "0 Not Available Not Available \n", + "1 Not Available Not Available \n", + "2 Not Available Not Available \n", + "3 Not Available Not Available \n", + "4 Not Available Not Available \n", + "... ... ... \n", + "44206 Not Available Not Available \n", + "44207 Not Available Not Available \n", + "44208 Not Available Not Available \n", + "44209 Not Available Not Available \n", + "44210 Not Available Not Available \n", + "\n", + "[44211 rows x 13 columns]" + ] + }, + "execution_count": 23, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "a.mock" + ] + }, + { + "cell_type": "code", + "execution_count": 24, + "id": "2fdd319f-8c51-4203-82f1-fbf29ad5d614", + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
IdNameShort descriptionGenderCountryOccupationBirth yearDeath yearManner of deathAge of deathAssociated CountriesAssociated Country Coordinates (Lat/Lon)Associated Country Life Expectancy
0Q23George Washington1st president of the United States (1732–1799)MaleUnited States of America; Kingdom of Great Bri...Politician17321799.0natural causes67.0['United Kingdom', 'United States'][(55.378051, -3.435973), (37.09024, -95.712891)][81.3, 78.5]
1Q42Douglas AdamsEnglish writer and humoristMaleUnited KingdomArtist19522001.0natural causes49.0['United Kingdom'][(55.378051, -3.435973)][81.3]
2Q91Abraham Lincoln16th president of the United States (1809-1865)MaleUnited States of AmericaPolitician18091865.0homicide56.0['United States'][(37.09024, -95.712891)][78.5]
5Q260Jean-François ChampollionFrench classical scholarMaleKingdom of France; First French EmpireEgyptologist17901832.0natural causes42.0['France'][(46.227638, 2.213749)][82.5]
7Q296Claude MonetFrench impressionist painter (1840-1926)MaleFranceArtist18401926.0natural causes86.0['France'][(46.227638, 2.213749)][82.5]
..........................................
1215502Q75349931John SaxtonBritish diarist and settler in New ZealandMaleUnited Kingdom of Great Britain and IrelandSettler18071866.0suicide59.0['United Kingdom'][(55.378051, -3.435973)][81.3]
1217919Q75717629Antonín GrunclCzechoslovak musicianMaleCzechoslovakiaArtist19051942.0capital punishment37.0['Slovakia'][(48.669026, 19.699024)][77.2]
1221308Q76161186Sara ChampionBritish archaeologistFemaleUnited KingdomResearcher19462000.0natural causes54.0['United Kingdom'][(55.378051, -3.435973)][81.3]
1222916Q76857201Dolores Millanesbaaŋa ŋun nyɛ paɣaFemaleSpainArtist18591906.0shipwrecking47.0['Spain'][(40.463667, -3.74922)][83.3]
1222958Q76995470David Hillhouse BuelUnion Army officerMaleUnited States of AmericaMilitary personnel18391870.0homicide31.0['United States'][(37.09024, -95.712891)][78.5]
\n", + "

44211 rows × 13 columns

\n", + "
" + ], + "text/plain": [ + " Id Name \\\n", + "0 Q23 George Washington \n", + "1 Q42 Douglas Adams \n", + "2 Q91 Abraham Lincoln \n", + "5 Q260 Jean-François Champollion \n", + "7 Q296 Claude Monet \n", + "... ... ... \n", + "1215502 Q75349931 John Saxton \n", + "1217919 Q75717629 Antonín Gruncl \n", + "1221308 Q76161186 Sara Champion \n", + "1222916 Q76857201 Dolores Millanes \n", + "1222958 Q76995470 David Hillhouse Buel \n", + "\n", + " Short description Gender \\\n", + "0 1st president of the United States (1732–1799) Male \n", + "1 English writer and humorist Male \n", + "2 16th president of the United States (1809-1865) Male \n", + "5 French classical scholar Male \n", + "7 French impressionist painter (1840-1926) Male \n", + "... ... ... \n", + "1215502 British diarist and settler in New Zealand Male \n", + "1217919 Czechoslovak musician Male \n", + "1221308 British archaeologist Female \n", + "1222916 baaŋa ŋun nyɛ paɣa Female \n", + "1222958 Union Army officer Male \n", + "\n", + " Country \\\n", + "0 United States of America; Kingdom of Great Bri... \n", + "1 United Kingdom \n", + "2 United States of America \n", + "5 Kingdom of France; First French Empire \n", + "7 France \n", + "... ... \n", + "1215502 United Kingdom of Great Britain and Ireland \n", + "1217919 Czechoslovakia \n", + "1221308 United Kingdom \n", + "1222916 Spain \n", + "1222958 United States of America \n", + "\n", + " Occupation Birth year Death year Manner of death \\\n", + "0 Politician 1732 1799.0 natural causes \n", + "1 Artist 1952 2001.0 natural causes \n", + "2 Politician 1809 1865.0 homicide \n", + "5 Egyptologist 1790 1832.0 natural causes \n", + "7 Artist 1840 1926.0 natural causes \n", + "... ... ... ... ... \n", + "1215502 Settler 1807 1866.0 suicide \n", + "1217919 Artist 1905 1942.0 capital punishment \n", + "1221308 Researcher 1946 2000.0 natural causes \n", + "1222916 Artist 1859 1906.0 shipwrecking \n", + "1222958 Military personnel 1839 1870.0 homicide \n", + "\n", + " Age of death Associated Countries \\\n", + "0 67.0 ['United Kingdom', 'United States'] \n", + "1 49.0 ['United Kingdom'] \n", + "2 56.0 ['United States'] \n", + "5 42.0 ['France'] \n", + "7 86.0 ['France'] \n", + "... ... ... \n", + "1215502 59.0 ['United Kingdom'] \n", + "1217919 37.0 ['Slovakia'] \n", + "1221308 54.0 ['United Kingdom'] \n", + "1222916 47.0 ['Spain'] \n", + "1222958 31.0 ['United States'] \n", + "\n", + " Associated Country Coordinates (Lat/Lon) \\\n", + "0 [(55.378051, -3.435973), (37.09024, -95.712891)] \n", + "1 [(55.378051, -3.435973)] \n", + "2 [(37.09024, -95.712891)] \n", + "5 [(46.227638, 2.213749)] \n", + "7 [(46.227638, 2.213749)] \n", + "... ... \n", + "1215502 [(55.378051, -3.435973)] \n", + "1217919 [(48.669026, 19.699024)] \n", + "1221308 [(55.378051, -3.435973)] \n", + "1222916 [(40.463667, -3.74922)] \n", + "1222958 [(37.09024, -95.712891)] \n", + "\n", + " Associated Country Life Expectancy \n", + "0 [81.3, 78.5] \n", + "1 [81.3] \n", + "2 [78.5] \n", + "5 [82.5] \n", + "7 [82.5] \n", + "... ... \n", + "1215502 [81.3] \n", + "1217919 [77.2] \n", + "1221308 [81.3] \n", + "1222916 [83.3] \n", + "1222958 [78.5] \n", + "\n", + "[44211 rows x 13 columns]" + ] + }, + "execution_count": 24, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "a.data" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "5aff9d42-9241-48f1-85c6-8a0dbc4ab83a", + "metadata": {}, "outputs": [], "source": [] } diff --git a/notebooks/scenarios/getting-started/05-adding-users.ipynb b/notebooks/scenarios/getting-started/05-adding-users.ipynb index 881fb6a5a01..6e6befee17b 100644 --- a/notebooks/scenarios/getting-started/05-adding-users.ipynb +++ b/notebooks/scenarios/getting-started/05-adding-users.ipynb @@ -14,9 +14,16101 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 2, + "id": "3de6bc42-4b2f-4902-9676-a36a0621fef3", + "metadata": {}, + "outputs": [], + "source": [ + "# syft absolute\n", + "import syft as sy" + ] + }, + { + "cell_type": "code", + "execution_count": 3, "id": "3ac0c932-601f-4424-ad80-f02da291b2a3", "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Starting test-datasite-1 server on 0.0.0.0:8081\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "INFO: Started server process [63035]\n", + "INFO: Waiting for application startup.\n", + "INFO: Application startup complete.\n", + "INFO: Uvicorn running on http://0.0.0.0:8081 (Press CTRL+C to quit)\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "INFO: 127.0.0.1:55426 - \"GET /api/v2/metadata HTTP/1.1\" 200 OK\n", + "Waiting for server to start Done.\n" + ] + }, + { + "data": { + "text/html": [ + "
SyftInfo:
You have launched a development server at http://0.0.0.0:8081.It is intended only for local use.

" + ], + "text/plain": [ + "SyftInfo: You have launched a development server at http://0.0.0.0:8081.It is intended only for local use." + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "INFO: 127.0.0.1:55432 - \"GET /api/v2/metadata HTTP/1.1\" 200 OK\n", + "INFO: 127.0.0.1:55432 - \"POST /api/v2/login HTTP/1.1\" 200 OK\n", + "INFO: 127.0.0.1:55432 - \"GET /api/v2/api?verify_key=d2dfbeeef378bf4737a4aec6a9406cd8a674a7831ec56f4ca8899e52e9c827e3&communication_protocol=dev HTTP/1.1\" 200 OK\n", + "INFO: 127.0.0.1:55442 - \"POST /api/v2/api_call HTTP/1.1\" 200 OK\n", + "INFO: 127.0.0.1:55432 - \"POST /api/v2/register HTTP/1.1\" 200 OK\n", + "INFO: 127.0.0.1:55456 - \"POST /api/v2/api_call HTTP/1.1\" 200 OK\n", + "INFO: 127.0.0.1:55472 - \"POST /api/v2/api_call HTTP/1.1\" 200 OK\n", + "INFO: 127.0.0.1:55474 - \"POST /api/v2/api_call HTTP/1.1\" 200 OK\n", + "INFO: 127.0.0.1:55490 - \"POST /api/v2/api_call HTTP/1.1\" 200 OK\n", + "INFO: 127.0.0.1:55500 - \"POST /api/v2/api_call HTTP/1.1\" 200 OK\n", + "INFO: 127.0.0.1:55516 - \"POST /api/v2/api_call HTTP/1.1\" 200 OK\n", + "INFO: 127.0.0.1:55518 - \"POST /api/v2/api_call HTTP/1.1\" 200 OK\n", + "INFO: 127.0.0.1:55534 - \"POST /api/v2/api_call HTTP/1.1\" 200 OK\n", + "INFO: 127.0.0.1:55542 - \"GET /api/v2/metadata HTTP/1.1\" 200 OK\n", + "INFO: 127.0.0.1:55542 - \"POST /api/v2/login HTTP/1.1\" 200 OK\n", + "INFO: 127.0.0.1:55542 - \"GET /api/v2/api?verify_key=d52fec1ad30b0934cf021e3925482276433f83c5943d5590a5298f4ce77fe8a5&communication_protocol=dev HTTP/1.1\" 200 OK\n", + "INFO: 127.0.0.1:40602 - \"POST /api/v2/api_call HTTP/1.1\" 200 OK\n", + "INFO: 127.0.0.1:40604 - \"POST /api/v2/api_call HTTP/1.1\" 200 OK\n", + "INFO: 127.0.0.1:40620 - \"POST /api/v2/api_call HTTP/1.1\" 200 OK\n", + "INFO: 127.0.0.1:40632 - \"POST /api/v2/api_call HTTP/1.1\" 200 OK\n", + "INFO: 127.0.0.1:40642 - \"POST /api/v2/api_call HTTP/1.1\" 200 OK\n", + "INFO: 127.0.0.1:40652 - \"GET /api/v2/metadata HTTP/1.1\" 200 OK\n", + "INFO: 127.0.0.1:40652 - \"POST /api/v2/register HTTP/1.1\" 200 OK\n", + "INFO: 127.0.0.1:40662 - \"POST /api/v2/api_call HTTP/1.1\" 200 OK\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "INFO: Shutting down\n", + "INFO: Waiting for application shutdown.\n", + "INFO: Application shutdown complete.\n", + "INFO: Finished server process [63035]\n" + ] + } + ], + "source": [ + "server = sy.orchestra.launch(name=\"test-datasite-1\", port=8081)" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "5ef99d40-8b1e-4be8-af42-0edb50df7250", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Logged into as GUEST\n", + "Logged into as \n" + ] + }, + { + "data": { + "text/html": [ + "
SyftWarning:
You are using a default password. Please change the password using `[your_client].me.set_password([new_password])`.

" + ], + "text/plain": [ + "SyftWarning: You are using a default password. Please change the password using `[your_client].me.set_password([new_password])`." + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "# logging in as root client with default credentials\n", + "client = server.login(email=\"info@openmined.org\", password=\"changethis\")" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "id": "d970eaef-e27e-4d81-b959-da18a0af2f01", + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "
SyftSuccess:
User 'John Doe' successfully registered! To see users, run `[your_client].users`

" + ], + "text/plain": [ + "SyftSuccess: User 'John Doe' successfully registered! To see users, run `[your_client].users`" + ] + }, + "execution_count": 5, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "# register a new user\n", + "client.register(\n", + " name=\"John Doe\", email=\"john@email.com\", password=\"pass\", password_verify=\"pass\"\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "id": "fd8f9701-1fe8-4a96-9d49-7ecb1a29adab", + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "
\n", + "\n", + "
\n", + "
\n", + " \n", + "
\n", + "

UserView List

\n", + "
\n", + "
\n", + "
\n", + " \n", + "
\n", + "
\n", + "

Total: 0

\n", + "
\n", + "
\n", + "
\n", + "\n", + "\n", + "\n", + "" + ], + "text/plain": [ + "" + ] + }, + "execution_count": 6, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "# Returns a list of all the existing users in the domain\n", + "client.users" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "id": "db6af0e7-dceb-48ae-887d-614c41a38797", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "'John Doe'" + ] + }, + "execution_count": 7, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "# Selecting an user by index\n", + "client.users[1].name" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "id": "74bc93da-0c86-4397-9a2f-fb4aa414ad3a", + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "
\n", + "\n", + "
\n", + "
\n", + " \n", + "
\n", + "

UserView List

\n", + "
\n", + "
\n", + "
\n", + " \n", + "
\n", + "
\n", + "

Total: 0

\n", + "
\n", + "
\n", + "
\n", + "\n", + "\n", + "\n", + "" + ], + "text/plain": [ + "[syft.service.user.user.UserView]" + ] + }, + "execution_count": 8, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "# Selecting an user by filtering\n", + "search_results = client.users.search(email=\"john@email.com\")\n", + "search_results" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "id": "68d7a572-b90c-44a1-9ae8-e41db39f8f23", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "'John Doe'" + ] + }, + "execution_count": 9, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "searched_user = search_results[0]\n", + "searched_user.name" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "id": "859f20c6-21d2-4448-8f3b-097e6df98b90", + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "
SyftSuccess:
Successfully updated email for the user 'John Doe' to 'updatedjohn@email.com'.

" + ], + "text/plain": [ + "SyftSuccess: Successfully updated email for the user 'John Doe' to 'updatedjohn@email.com'." + ] + }, + "execution_count": 10, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "# update email\n", + "searched_user.set_email(email=\"updatedjohn@email.com\")" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "id": "ef702a91-0674-49ba-80bc-aaa66f83f434", + "metadata": {}, + "outputs": [ + { + "data": { + "text/markdown": [ + "```python\n", + "class UserView:\n", + " id: str = e451c1c5c46e462dbc6e0484626559fc\n", + " name: str = \"John Doe\"\n", + " email: str = \"updatedjohn@email.com\"\n", + " institution: str = None\n", + " website: str = None\n", + " role: str = ServiceRole.DATA_SCIENTIST\n", + " notifications_enabled: str = {: True, : False, : False, : False}\n", + "\n", + "```" + ], + "text/plain": [ + "syft.service.user.user.UserView" + ] + }, + "execution_count": 11, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "searched_user" + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "id": "b8d5a650-2d4b-4fec-a45e-9f93f9e0f73e", + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "
SyftSuccess:
Successfully updated password for user 'John Doe' with email 'updatedjohn@email.com'.

" + ], + "text/plain": [ + "SyftSuccess: Successfully updated password for user 'John Doe' with email 'updatedjohn@email.com'." + ] + }, + "execution_count": 12, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "# update password\n", + "searched_user.set_password(new_password=\"newpass\", confirm=False)" + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "id": "83aba8a1-98a2-4dea-ae18-2f70f2df078e", + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "
\n", + "\n", + "
\n", + "
\n", + " \n", + "
\n", + "

UserView List

\n", + "
\n", + "
\n", + "
\n", + " \n", + "
\n", + "
\n", + "

Total: 0

\n", + "
\n", + "
\n", + "
\n", + "\n", + "\n", + "\n", + "" + ], + "text/plain": [ + "" + ] + }, + "execution_count": 13, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "client.users" + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "id": "d4ca0ce8-4bb6-44a3-8e4b-6d89b91fd30d", + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "
SyftSuccess:
User details successfully updated.

" + ], + "text/plain": [ + "SyftSuccess: User details successfully updated." + ] + }, + "execution_count": 14, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "# Update info of an existing user\n", + "searched_user.update(\n", + " name=\"Updated Jane Doe\",\n", + " institution=\"My institution\",\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": 15, + "id": "a4d7f854-3771-4dbd-94ad-ffa4a76272ee", + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "
\n", + "\n", + "
\n", + "
\n", + " \n", + "
\n", + "

UserView List

\n", + "
\n", + "
\n", + "
\n", + " \n", + "
\n", + "
\n", + "

Total: 0

\n", + "
\n", + "
\n", + "
\n", + "\n", + "\n", + "\n", + "" + ], + "text/plain": [ + "" + ] + }, + "execution_count": 15, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "client.users" + ] + }, + { + "cell_type": "code", + "execution_count": 16, + "id": "b0d7b4fc-eaf3-45bf-b808-6caf91d74ccd", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Logged into as GUEST\n", + "Logged into as \n" + ] + }, + { + "data": { + "text/html": [ + "\n", + " \n", + "
\n", + " \"Logo\"\n",\n", + "

Welcome to test-datasite-1

\n", + "
\n", + " URL: http://localhost:8081
\n", + " Server Description: This is the default description for a Datasite Server.
\n", + " Server Type: Datasite
\n", + " Server Side Type:High Side
\n", + " Syft Version: 0.8.7-beta.14
\n", + "\n", + "
\n", + "
\n", + " ⓘ \n", + " This datasite is run by the library PySyft to learn more about how it works visit\n", + " github.com/OpenMined/PySyft.\n", + "
\n", + "

Commands to Get Started

\n", + " \n", + "
    \n", + " \n", + "
  • <your_client>.datasets - list datasets
  • \n", + "
  • <your_client>.code - list code
  • \n", + "
  • <your_client>.projects - list projects
  • \n", + "\n", + "
\n", + " \n", + "

\n", + " " + ], + "text/plain": [ + "" + ] + }, + "execution_count": 16, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "# validate login for new user\n", + "server.login(email=\"updatedjohn@email.com\", password=\"newpass\")" + ] + }, + { + "cell_type": "code", + "execution_count": 17, + "id": "4b4c078c-663f-437e-b103-92f76602eee1", + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "
SyftSuccess:
ID: e451c1c5c46e462dbc6e0484626559fc deleted

" + ], + "text/plain": [ + "SyftSuccess: ID: e451c1c5c46e462dbc6e0484626559fc deleted" + ] + }, + "execution_count": 17, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "# Delete user\n", + "client.users.delete(searched_user.id)" + ] + }, + { + "cell_type": "code", + "execution_count": 18, + "id": "3167816a-5efc-4ae5-be5d-389398b74e3c", + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "
\n", + "\n", + "
\n", + "
\n", + " \n", + "
\n", + "

UserView List

\n", + "
\n", + "
\n", + "
\n", + " \n", + "
\n", + "
\n", + "

Total: 0

\n", + "
\n", + "
\n", + "
\n", + "\n", + "\n", + "\n", + "" + ], + "text/plain": [ + "" + ] + }, + "execution_count": 18, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "client.users" + ] + }, + { + "cell_type": "code", + "execution_count": 19, + "id": "877d0945-0684-42b5-a5d4-4fd3e69f950b", + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "
SyftSuccess:
Registration feature successfully enabled

" + ], + "text/plain": [ + "SyftSuccess: Registration feature successfully enabled" + ] + }, + "execution_count": 19, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "# Enable user registration\n", + "client.settings.allow_guest_signup(enable=True)" + ] + }, + { + "cell_type": "code", + "execution_count": 20, + "id": "c2260402-642f-41f5-8217-f0d44f75d62b", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Logged into as GUEST\n", + "Logged into as GUEST\n" + ] + } + ], + "source": [ + "guest_user = server.login_as_guest()" + ] + }, + { + "cell_type": "code", + "execution_count": 21, + "id": "fa874373-9289-4531-94cc-9342f1e244ff", + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "
SyftSuccess:
User 'Curious Scientist' successfully registered!

" + ], + "text/plain": [ + "SyftSuccess: User 'Curious Scientist' successfully registered!" + ] + }, + "execution_count": 21, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "# register the account\n", + "guest_user.register(\n", + " email=\"scientist@test.com\",\n", + " password=\"123\",\n", + " password_verify=\"123\",\n", + " name=\"Curious Scientist\",\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": 22, + "id": "a2db0aee-a380-4bf2-85bf-65c2914d719a", + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "
\n", + "\n", + "
\n", + "
\n", + " \n", + "
\n", + "

UserView List

\n", + "
\n", + "
\n", + "
\n", + " \n", + "
\n", + "
\n", + "

Total: 0

\n", + "
\n", + "
\n", + "
\n", + "\n", + "\n", + "\n", + "" + ], + "text/plain": [ + "" + ] + }, + "execution_count": 22, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "client.users" + ] + }, + { + "cell_type": "code", + "execution_count": 23, + "id": "17749554-e971-401b-977f-6d1819b2194a", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Stopping test-datasite-1\n" + ] + } + ], + "source": [ + "server.land()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "f6f2c835-7a79-4331-9486-dfeb8acdfbf5", + "metadata": {}, "outputs": [], "source": [] } @@ -37,7 +16129,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.12.2" + "version": "3.11.5" } }, "nbformat": 4, diff --git a/notebooks/scenarios/reverse-tunnel/03-network-configuration.ipynb b/notebooks/scenarios/reverse-tunnel/03-network-configuration.ipynb index 477148a7c33..9946fac52c7 100644 --- a/notebooks/scenarios/reverse-tunnel/03-network-configuration.ipynb +++ b/notebooks/scenarios/reverse-tunnel/03-network-configuration.ipynb @@ -1,37 +1,37 @@ { - "cells": [ - { - "cell_type": "code", - "execution_count": null, - "id": "6296126b-2575-44b4-9f09-2d73f444ca96", - "metadata": {}, - "outputs": [], - "source": [ - "# -- ingress\n", - "# -- open port for rathole server\n", - "# -- nodeport vs websockets" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "id": "6296126b-2575-44b4-9f09-2d73f444ca96", + "metadata": {}, + "outputs": [], + "source": [ + "# -- ingress\n", + "# -- open port for rathole server\n", + "# -- serverport vs websockets" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.12.2" + } }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.12.2" - } - }, - "nbformat": 4, - "nbformat_minor": 5 + "nbformat": 4, + "nbformat_minor": 5 } diff --git a/notebooks/scenarios/reverse-tunnel/04-setup-domain-with-tunnel.ipynb b/notebooks/scenarios/reverse-tunnel/04-setup-datasite-with-tunnel.ipynb similarity index 100% rename from notebooks/scenarios/reverse-tunnel/04-setup-domain-with-tunnel.ipynb rename to notebooks/scenarios/reverse-tunnel/04-setup-datasite-with-tunnel.ipynb diff --git a/notebooks/scenarios/reverse-tunnel/06-proxy-clients.ipynb b/notebooks/scenarios/reverse-tunnel/06-proxy-clients.ipynb index 536dce15404..3c8d070e544 100644 --- a/notebooks/scenarios/reverse-tunnel/06-proxy-clients.ipynb +++ b/notebooks/scenarios/reverse-tunnel/06-proxy-clients.ipynb @@ -1,36 +1,36 @@ { - "cells": [ - { - "cell_type": "code", - "execution_count": null, - "id": "1c3868bb-a140-451a-ac51-a4fd17bf2ab8", - "metadata": {}, - "outputs": [], - "source": [ - "# -- how to list domains on gateway\n", - "# -- getting a proxy client" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "id": "1c3868bb-a140-451a-ac51-a4fd17bf2ab8", + "metadata": {}, + "outputs": [], + "source": [ + "# -- how to list datasites on gateway\n", + "# -- getting a proxy client" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.12.2" + } }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.12.2" - } - }, - "nbformat": 4, - "nbformat_minor": 5 + "nbformat": 4, + "nbformat_minor": 5 } diff --git a/notebooks/tutorials/data-owner/01-uploading-private-data.ipynb b/notebooks/tutorials/data-owner/01-uploading-private-data.ipynb index 3a1863c0bdd..1803bc2257c 100644 --- a/notebooks/tutorials/data-owner/01-uploading-private-data.ipynb +++ b/notebooks/tutorials/data-owner/01-uploading-private-data.ipynb @@ -50,8 +50,8 @@ "metadata": {}, "outputs": [], "source": [ - "node = sy.orchestra.launch(\n", - " name=\"private-data-example-domain-1\", port=\"auto\", reset=True\n", + "server = sy.orchestra.launch(\n", + " name=\"private-data-example-datasite-1\", port=\"auto\", reset=True\n", ")" ] }, @@ -82,7 +82,7 @@ "source": [ "# syft absolute\n", "\n", - "client = node.login(email=\"info@openmined.org\", password=\"changethis\")" + "client = server.login(email=\"info@openmined.org\", password=\"changethis\")" ] }, { @@ -322,9 +322,9 @@ "metadata": {}, "outputs": [], "source": [ - "# Cleanup local domain server\n", - "if node.node_type.value == \"python\":\n", - " node.land()" + "# Cleanup local datasite server\n", + "if server.server_type.value == \"python\":\n", + " server.land()" ] } ], diff --git a/notebooks/tutorials/data-owner/02-account-management.ipynb b/notebooks/tutorials/data-owner/02-account-management.ipynb index a4e64b74698..4f145816565 100644 --- a/notebooks/tutorials/data-owner/02-account-management.ipynb +++ b/notebooks/tutorials/data-owner/02-account-management.ipynb @@ -48,8 +48,8 @@ "metadata": {}, "outputs": [], "source": [ - "node = sy.orchestra.launch(\n", - " name=\"account-management-example-domain-1\", port=8041, reset=True\n", + "server = sy.orchestra.launch(\n", + " name=\"account-management-example-datasite-1\", port=8041, reset=True\n", ")" ] }, @@ -78,10 +78,8 @@ "source": [ "# syft absolute\n", "from syft.service.user.user import ServiceRole\n", - "from syft.service.user.user import UserCreate\n", - "from syft.service.user.user import UserUpdate\n", "\n", - "client = node.login(email=\"info@openmined.org\", password=\"changethis\")" + "client = server.login(email=\"info@openmined.org\", password=\"changethis\")" ] }, { @@ -107,9 +105,7 @@ "metadata": {}, "outputs": [], "source": [ - "client.users.create(\n", - " UserCreate(email=\"newuser@openmined.org\", name=\"John Doe\", password=\"pw\")\n", - ")" + "client.users.create(email=\"newuser@openmined.org\", name=\"John Doe\", password=\"pw\")" ] }, { @@ -209,7 +205,7 @@ "outputs": [], "source": [ "updated_user = client.users.update(\n", - " new_user.id, UserUpdate(role=ServiceRole.DATA_SCIENTIST, password=\"123\")\n", + " uid=new_user.id, role=ServiceRole.DATA_SCIENTIST, password=\"123\"\n", ")" ] }, @@ -238,7 +234,7 @@ "metadata": {}, "outputs": [], "source": [ - "ds_client = node.login(email=\"newuser@openmined.org\", password=\"123\")" + "ds_client = server.login(email=\"newuser@openmined.org\", password=\"123\")" ] }, { @@ -328,7 +324,7 @@ "metadata": {}, "outputs": [], "source": [ - "new_user = node.login(email=\"joker@test.com\", password=\"joker123\")" + "new_user = server.login(email=\"joker@test.com\", password=\"joker123\")" ] }, { @@ -482,7 +478,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.11.3" + "version": "3.11.5" }, "toc": { "base_numbering": 1, diff --git a/notebooks/tutorials/data-owner/03-messages-and-requests.ipynb b/notebooks/tutorials/data-owner/03-messages-and-requests.ipynb index c05d6f7d556..398862601f2 100644 --- a/notebooks/tutorials/data-owner/03-messages-and-requests.ipynb +++ b/notebooks/tutorials/data-owner/03-messages-and-requests.ipynb @@ -48,8 +48,8 @@ "metadata": {}, "outputs": [], "source": [ - "node = sy.orchestra.launch(\n", - " name=\"messages-requests-example-domain-1-do\", port=7021, reset=True\n", + "server = sy.orchestra.launch(\n", + " name=\"messages-requests-example-datasite-1-do\", port=7021, reset=True\n", ")" ] }, @@ -76,7 +76,7 @@ "metadata": {}, "outputs": [], "source": [ - "admin_client = node.login(email=\"info@openmined.org\", password=\"changethis\")" + "admin_client = server.login(email=\"info@openmined.org\", password=\"changethis\")" ] }, { @@ -122,7 +122,7 @@ "metadata": {}, "outputs": [], "source": [ - "guest_client = node.client.login(email=\"jane@caltech.edu\", password=\"abc123\")" + "guest_client = server.client.login(email=\"jane@caltech.edu\", password=\"abc123\")" ] }, { diff --git a/notebooks/tutorials/data-owner/05-syft-services-api.ipynb b/notebooks/tutorials/data-owner/05-syft-services-api.ipynb index 7c3f409105a..760ec6c481d 100644 --- a/notebooks/tutorials/data-owner/05-syft-services-api.ipynb +++ b/notebooks/tutorials/data-owner/05-syft-services-api.ipynb @@ -56,8 +56,8 @@ "metadata": {}, "outputs": [], "source": [ - "node = sy.orchestra.launch(\n", - " name=\"services-api-example-domain-1\", port=\"auto\", reset=True\n", + "server = sy.orchestra.launch(\n", + " name=\"services-api-example-datasite-1\", port=\"auto\", reset=True\n", ")" ] }, @@ -78,7 +78,7 @@ "source": [ "# syft absolute\n", "\n", - "client = node.login(email=\"info@openmined.org\", password=\"changethis\")" + "client = server.login(email=\"info@openmined.org\", password=\"changethis\")" ] }, { diff --git a/notebooks/tutorials/data-scientist/02-finding-datasets.ipynb b/notebooks/tutorials/data-scientist/02-finding-datasets.ipynb index 30dcf080d8c..bfae6dbdb2d 100644 --- a/notebooks/tutorials/data-scientist/02-finding-datasets.ipynb +++ b/notebooks/tutorials/data-scientist/02-finding-datasets.ipynb @@ -21,7 +21,7 @@ "id": "2", "metadata": {}, "source": [ - "## Connecting to a Domain" + "## Connecting to a Datasite" ] }, { diff --git a/notebooks/tutorials/data-scientist/03-working-with-private-datasets.ipynb b/notebooks/tutorials/data-scientist/03-working-with-private-datasets.ipynb index acf4ec170df..731123c88d4 100644 --- a/notebooks/tutorials/data-scientist/03-working-with-private-datasets.ipynb +++ b/notebooks/tutorials/data-scientist/03-working-with-private-datasets.ipynb @@ -48,8 +48,8 @@ "metadata": {}, "outputs": [], "source": [ - "node = sy.orchestra.launch(\n", - " name=\"private-datasets-example-domain-1\", port=8062, reset=True\n", + "server = sy.orchestra.launch(\n", + " name=\"private-datasets-example-datasite-1\", port=8062, reset=True\n", ")" ] }, @@ -76,7 +76,7 @@ "metadata": {}, "outputs": [], "source": [ - "root_client = node.login(email=\"info@openmined.org\", password=\"changethis\")" + "root_client = server.login(email=\"info@openmined.org\", password=\"changethis\")" ] }, { @@ -147,7 +147,7 @@ "metadata": {}, "outputs": [], "source": [ - "guest_client = node.client.login(email=\"jane@caltech.edu\", password=\"abc123\")" + "guest_client = server.client.login(email=\"jane@caltech.edu\", password=\"abc123\")" ] }, { @@ -437,7 +437,7 @@ "metadata": {}, "outputs": [], "source": [ - "root_client = node.login(email=\"info@openmined.org\", password=\"changethis\")" + "root_client = server.login(email=\"info@openmined.org\", password=\"changethis\")" ] }, { diff --git a/notebooks/tutorials/data-scientist/04-syft-functions.ipynb b/notebooks/tutorials/data-scientist/04-syft-functions.ipynb index cbee1755a3d..64812ef5333 100644 --- a/notebooks/tutorials/data-scientist/04-syft-functions.ipynb +++ b/notebooks/tutorials/data-scientist/04-syft-functions.ipynb @@ -48,8 +48,8 @@ "metadata": {}, "outputs": [], "source": [ - "node = sy.orchestra.launch(\n", - " name=\"syft-functions-example-domain-1\", port=7022, reset=True\n", + "server = sy.orchestra.launch(\n", + " name=\"syft-functions-example-datasite-1\", port=7022, reset=True\n", ")" ] }, @@ -78,7 +78,7 @@ "source": [ "# syft absolute\n", "\n", - "admin_client = node.login(email=\"info@openmined.org\", password=\"changethis\")" + "admin_client = server.login(email=\"info@openmined.org\", password=\"changethis\")" ] }, { @@ -140,7 +140,7 @@ "metadata": {}, "outputs": [], "source": [ - "guest_client = node.client.login(email=\"jane@caltech.edu\", password=\"abc123\")" + "guest_client = server.client.login(email=\"jane@caltech.edu\", password=\"abc123\")" ] }, { @@ -252,7 +252,7 @@ "id": "23", "metadata": {}, "source": [ - "You have probably noticed that in the last example we also specified the output policy. Its purpose has to do with the release of information for a given function and controlling the parameters that this release comes with. For example, if a data owner and a data scientist agree on the content of a function run on a domain and on what private data that can be run on, their work might not be done yet. They might negotiate how many times that function can be run, whether or not the data scientist can have access or what happens before releasing the output (maybe we add some noise like in the case of differential privacy). At the moment we have policies that allow data scientist to ask for a certain amount of runs on function, but the ones you will find most often is `SingleExecutionExactOutput` that ask for a single use on a function. We have used it so much that we came with the `syft_function_single_use` decorator that use by default that output policy. What is also cool is that you can pass the input for an input policy to this decorator to get a shorter version like this:" + "You have probably noticed that in the last example we also specified the output policy. Its purpose has to do with the release of information for a given function and controlling the parameters that this release comes with. For example, if a data owner and a data scientist agree on the content of a function run on a datasite and on what private data that can be run on, their work might not be done yet. They might negotiate how many times that function can be run, whether or not the data scientist can have access or what happens before releasing the output (maybe we add some noise like in the case of differential privacy). At the moment we have policies that allow data scientist to ask for a certain amount of runs on function, but the ones you will find most often is `SingleExecutionExactOutput` that ask for a single use on a function. We have used it so much that we came with the `syft_function_single_use` decorator that use by default that output policy. What is also cool is that you can pass the input for an input policy to this decorator to get a shorter version like this:" ] }, { @@ -435,7 +435,7 @@ "id": "42", "metadata": {}, "source": [ - "Right! Our code was not approved, so we should wait for the review from the data owner. As we also deployed the domain, we will do that quickly here, but for more details on what is happening check the data owner sections under tutorials:" + "Right! Our code was not approved, so we should wait for the review from the data owner. As we also deployed the datasite, we will do that quickly here, but for more details on what is happening check the data owner sections under tutorials:" ] }, { @@ -509,7 +509,7 @@ "id": "50", "metadata": {}, "source": [ - "Notice that the result we see is still `1.0` which looks like the result on the mock data. That is because it actually is! The object returned is an `ActionObject` which here behaves like a pointer for the data on the domain:" + "Notice that the result we see is still `1.0` which looks like the result on the mock data. That is because it actually is! The object returned is an `ActionObject` which here behaves like a pointer for the data on the datasite:" ] }, { diff --git a/notebooks/tutorials/data-scientist/05-messaging-and-requests.ipynb b/notebooks/tutorials/data-scientist/05-messaging-and-requests.ipynb index 5d7ff62fa94..fa8cfc445ba 100644 --- a/notebooks/tutorials/data-scientist/05-messaging-and-requests.ipynb +++ b/notebooks/tutorials/data-scientist/05-messaging-and-requests.ipynb @@ -48,8 +48,8 @@ "metadata": {}, "outputs": [], "source": [ - "node = sy.orchestra.launch(\n", - " name=\"messages-requests-example-domain-1-ds\", port=7023, reset=True\n", + "server = sy.orchestra.launch(\n", + " name=\"messages-requests-example-datasite-1-ds\", port=7023, reset=True\n", ")" ] }, @@ -76,7 +76,7 @@ "metadata": {}, "outputs": [], "source": [ - "admin_client = node.login(email=\"info@openmined.org\", password=\"changethis\")" + "admin_client = server.login(email=\"info@openmined.org\", password=\"changethis\")" ] }, { @@ -122,7 +122,7 @@ "metadata": {}, "outputs": [], "source": [ - "guest_client = node.client.login(email=\"jane@caltech.edu\", password=\"abc123\")" + "guest_client = server.client.login(email=\"jane@caltech.edu\", password=\"abc123\")" ] }, { diff --git a/notebooks/tutorials/deployments/01-deploy-python.ipynb b/notebooks/tutorials/deployments/01-deploy-python.ipynb index cd80e629126..0dedeb13ea5 100644 --- a/notebooks/tutorials/deployments/01-deploy-python.ipynb +++ b/notebooks/tutorials/deployments/01-deploy-python.ipynb @@ -59,9 +59,9 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "## Working with Python Domain\n", + "## Working with Python Datasite\n", "\n", - "`PySyft` makes it very easy to develop against a domain in a notebook by providing the `sy.orchestra` interface. It allows you to start a domain with a webserver in a notebook in the background, which is a lightweight version of a Domain that would be used in production. You can specify options such as what kind of database you are using, whether you want to use networking and how many processes you want to use. You can launch a Domain by simply executing:" + "`PySyft` makes it very easy to develop against a datasite in a notebook by providing the `sy.orchestra` interface. It allows you to start a datasite with a webserver in a notebook in the background, which is a lightweight version of a Datasite that would be used in production. You can specify options such as what kind of database you are using, whether you want to use networking and how many processes you want to use. You can launch a Datasite by simply executing:" ] }, { @@ -80,8 +80,8 @@ "metadata": {}, "outputs": [], "source": [ - "node = sy.orchestra.launch(\n", - " name=\"dev-mode-example-domain-1\", port=8020, reset=True, dev_mode=True\n", + "server = sy.orchestra.launch(\n", + " name=\"dev-mode-example-datasite-1\", port=8020, reset=True, dev_mode=True\n", ")" ] }, @@ -92,12 +92,12 @@ "If we don't need a webserver (for development this is true in many cases), we can omit the port and instead use\n", "\n", "```python\n", - "node = sy.orchestra.launch(name=\"dev-mode-example-domain-1\", dev_mode=True, reset=True)\n", + "server = sy.orchestra.launch(name=\"dev-mode-example-datasite-1\", dev_mode=True, reset=True)\n", "```\n", "\n", "One of the benefits of not using a port is that you can use a debugger and set breakpoints within api calls. This makes debugging way faster in many cases.\n", "\n", - "Now, we are ready to start using the domain. The domain comes with test login credentials for the admin." + "Now, we are ready to start using the datasite. The datasite comes with test login credentials for the admin." ] }, { @@ -106,14 +106,14 @@ "metadata": {}, "outputs": [], "source": [ - "client = node.login(email=\"info@openmined.org\", password=\"changethis\")" + "client = server.login(email=\"info@openmined.org\", password=\"changethis\")" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "Once you are logged in, you are ready to start using the domain, for instance for creating a dataset (this one is empty, just as a example)." + "Once you are logged in, you are ready to start using the datasite, for instance for creating a dataset (this one is empty, just as a example)." ] }, { @@ -130,7 +130,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "Lastly to stop or terminate your Domain, we can execute the following command:" + "Lastly to stop or terminate your Datasite, we can execute the following command:" ] }, { @@ -139,7 +139,7 @@ "metadata": {}, "outputs": [], "source": [ - "node.land()" + "server.land()" ] }, { @@ -157,7 +157,7 @@ "- [04-pytorch-example.ipynb](../../api/0.8/04-pytorch-example.ipynb)\n", "- [05-custom-policy.ipynb](../../api/0.8/05-custom-policy.ipynb)\n", "- [06-multiple-code-requests.ipynb](../../api/0.8/06-multiple-code-requests.ipynb)\n", - "- [07-domain-register-control-flow.ipynb](../../api/0.8/07-domain-register-control-flow.ipynb)\n", + "- [07-datasite-register-control-flow.ipynb](../../api/0.8/07-datasite-register-control-flow.ipynb)\n", "- [08-code-version.ipynb](../../api/0.8/08-code-version.ipynb)\n", "- [09-blob-storage.ipynb](../../api/0.8/09-blob-storage.ipynb)\n", "- [10-container-images.ipynb](../../api/0.8/10-container-images.ipynb)\n", diff --git a/notebooks/tutorials/deployments/02-deploy-container.ipynb b/notebooks/tutorials/deployments/02-deploy-container.ipynb index a8717135d3f..6c8b5bbfdb8 100644 --- a/notebooks/tutorials/deployments/02-deploy-container.ipynb +++ b/notebooks/tutorials/deployments/02-deploy-container.ipynb @@ -58,14 +58,14 @@ "source": [ "``` bash\n", "docker run -it \\\n", - " -e NODE_NAME=syft-example-domain-1 \\\n", - " -e NODE_TYPE=domain \\\n", + " -e SERVER_NAME=syft-example-datasite-1 \\\n", + " -e SERVER_TYPE=datasite \\\n", " -e N_CONSUMERS=1 \\\n", " -e SINGLE_CONTAINER_MODE=true \\\n", " -e CREATE_PRODUCER=true \\\n", " -e INMEMORY_WORKERS=true \\\n", " -p 8080:80 --add-host=host.docker.internal:host-gateway \\\n", - " --name syft-example-domain-1 openmined/grid-backend:$SYFT_VERSION\n", + " --name syft-example-datasite-1 openmined/syft-backend:$SYFT_VERSION\n", "```" ] }, @@ -75,7 +75,7 @@ "source": [ "## Working with the single container deployment\n", "\n", - "PySyft makes it very simple to connect to any existing Syft cluster by providing the `sy.orchestra` interface. You can connect to the domain by executing these steps in your jupyter notebook:" + "PySyft makes it very simple to connect to any existing Syft cluster by providing the `sy.orchestra` interface. You can connect to the datasite by executing these steps in your jupyter notebook:" ] }, { @@ -86,7 +86,7 @@ "# syft absolute\n", "import syft as sy\n", "\n", - "node = sy.orchestra.launch(name=\"syft-example-domain-1\", deploy_to=\"remote\")\n", + "server = sy.orchestra.launch(name=\"syft-example-datasite-1\", deploy_to=\"remote\")\n", "```" ] }, @@ -94,12 +94,12 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "This will return a node handle by connecting to `http://localhost:8080` which is the default host and port where your docker container will be running. You can connect to a different host and port by setting the environment variables `NODE_URL` and `NODE_PORT`.\n", + "This will return a server handle by connecting to `http://localhost:8080` which is the default host and port where your docker container will be running. You can connect to a different host and port by setting the environment variables `SERVER_URL` and `SERVER_PORT`.\n", "```python\n", "import os\n", "\n", - "os.environ[\"NODE_URL\"] = \"\"\n", - "os.environ[\"NODE_PORT\"] = \"\"\n", + "os.environ[\"SERVER_URL\"] = \"\"\n", + "os.environ[\"SERVER_PORT\"] = \"\"\n", "```" ] }, @@ -107,7 +107,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "Now, we are ready to start using the domain. The domain comes with default login credentials for the admin." + "Now, we are ready to start using the datasite. The datasite comes with default login credentials for the admin." ] }, { @@ -115,7 +115,7 @@ "metadata": {}, "source": [ "```python3\n", - "client = node.login(email=\"info@openmined.org\", password=\"changethis\")\n", + "client = server.login(email=\"info@openmined.org\", password=\"changethis\")\n", "```" ] }, @@ -123,7 +123,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "Once you are logged in, you are ready to start using the domain, for instance for creating a dataset (this one is empty, just as a example)." + "Once you are logged in, you are ready to start using the datasite, for instance for creating a dataset (this one is empty, just as a example)." ] }, { @@ -151,7 +151,7 @@ "- [04-pytorch-example.ipynb](../../api/0.8/04-pytorch-example.ipynb)\n", "- [05-custom-policy.ipynb](../../api/0.8/05-custom-policy.ipynb)\n", "- [06-multiple-code-requests.ipynb](../../api/0.8/06-multiple-code-requests.ipynb)\n", - "- [07-domain-register-control-flow.ipynb](../../api/0.8/07-domain-register-control-flow.ipynb)\n", + "- [07-datasite-register-control-flow.ipynb](../../api/0.8/07-datasite-register-control-flow.ipynb)\n", "- [08-code-version.ipynb](../../api/0.8/08-code-version.ipynb)\n", "- [09-blob-storage.ipynb](../../api/0.8/09-blob-storage.ipynb)\n", "- [10-container-images.ipynb](../../api/0.8/10-container-images.ipynb)\n", diff --git a/notebooks/tutorials/deployments/03-deploy-k8s-k3d.ipynb b/notebooks/tutorials/deployments/03-deploy-k8s-k3d.ipynb index 3ba644ee5dc..c64e6c40f4a 100644 --- a/notebooks/tutorials/deployments/03-deploy-k8s-k3d.ipynb +++ b/notebooks/tutorials/deployments/03-deploy-k8s-k3d.ipynb @@ -78,7 +78,7 @@ "If you want to deploy your Kubernetes cluster in a resource-constrained environment, use the following flags to override the default configurations. Please note that you will need at least 1 CPU and 2 GB of RAM on Docker, and some tests may not work in such low-resource environments:\n", "\n", "```sh\n", - "helm install my-syft openmined/syft --version $SYFT_VERSION --namespace syft --create-namespace --set ingress.className=\"traefik\" --set node.resourcesPreset=null --set seaweedfs.resourcesPreset=null --set mongo.resourcesPreset=null --set registry.resourcesPreset=null --set proxy.resourcesPreset=null --set frontend.resourcesPreset=null\n", + "helm install my-syft openmined/syft --version $SYFT_VERSION --namespace syft --create-namespace --set ingress.className=\"traefik\" --set server.resourcesPreset=null --set seaweedfs.resourcesPreset=null --set mongo.resourcesPreset=null --set registry.resourcesPreset=null --set proxy.resourcesPreset=null --set frontend.resourcesPreset=null\n", "```\n", "\n", "
\n", @@ -89,7 +89,7 @@ "If you would like to set your own default password even for the production style deployment, use the following command:\n", "\n", "```sh\n", - "helm install my-syft openmined/syft --version $SYFT_VERSION --namespace syft --create-namespace --set ingress.className=\"traefik\" --set global.randomizedSecrets=false --set node.secret.defaultRootPassword=\"changethis\" --set seaweedfs.secret.s3RootPassword=\"admin\" --set mongo.secret.rootPassword=\"example\"\n", + "helm install my-syft openmined/syft --version $SYFT_VERSION --namespace syft --create-namespace --set ingress.className=\"traefik\" --set global.randomizedSecrets=false --set server.secret.defaultRootPassword=\"changethis\" --set seaweedfs.secret.s3RootPassword=\"admin\" --set mongo.secret.rootPassword=\"example\"\n", "```\n", "\n" ] @@ -104,7 +104,7 @@ "source": [ "## Working with the local Kubernetes deployment\n", "\n", - "PySyft makes it very simple to connect to your existing Syft cluster by providing the `sy.orchestra` interface. You can connect to the domain by executing these steps in your jupyter notebook:" + "PySyft makes it very simple to connect to your existing Syft cluster by providing the `sy.orchestra` interface. You can connect to the datasite by executing these steps in your jupyter notebook:" ] }, { @@ -115,7 +115,7 @@ "# syft absolute\n", "import syft as sy\n", "\n", - "node = sy.orchestra.launch(name=\"syft-example-domain-1\", deploy_to=\"remote\")\n", + "server = sy.orchestra.launch(name=\"syft-example-datasite-1\", deploy_to=\"remote\")\n", "```" ] }, @@ -123,13 +123,13 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "This will return a node handle by connecting to `http://localhost:8080` which is the default host and port where your kubernetes cluster will be running. You can connect to a different host and port by setting the environment variables `NODE_URL` and `NODE_PORT`.\n", + "This will return a server handle by connecting to `http://localhost:8080` which is the default host and port where your kubernetes cluster will be running. You can connect to a different host and port by setting the environment variables `SERVER_URL` and `SERVER_PORT`.\n", "\n", "```python\n", "import os\n", "\n", - "os.environ[\"NODE_URL\"] = \"\"\n", - "os.environ[\"NODE_PORT\"] = \"\"\n", + "os.environ[\"SERVER_URL\"] = \"\"\n", + "os.environ[\"SERVER_PORT\"] = \"\"\n", "```" ] }, @@ -137,7 +137,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "Now, we are ready to start using the domain. Since helm is a product grade deployement stack, the domain comes with a randomized password for the default email credentials for the admin. Either run with Step 5 with your custom password or to extract the randomized password using `kubectl`, run the following command (in case you use a custom cluster name in step 1, replace `--context=k3d-$CLUSTER_NAME` appropriately): \n", + "Now, we are ready to start using the datasite. Since helm is a product grade deployement stack, the datasite comes with a randomized password for the default email credentials for the admin. Either run with Step 5 with your custom password or to extract the randomized password using `kubectl`, run the following command (in case you use a custom cluster name in step 1, replace `--context=k3d-$CLUSTER_NAME` appropriately): \n", "\n", "```sh\n", "kubectl --context=k3d-syft get secret backend-secret -n syft -o jsonpath='{.data.defaultRootPassword}' | base64 --decode\n", @@ -156,7 +156,7 @@ "metadata": {}, "source": [ "```python\n", - "client = node.login(email=\"info@openmined.org\", password=\"changethis\")\n", + "client = server.login(email=\"info@openmined.org\", password=\"changethis\")\n", "```" ] }, @@ -164,7 +164,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "Once you are logged in, you are ready to start using the domain, for instance for creating a dataset (this one is empty, just as a example)." + "Once you are logged in, you are ready to start using the datasite, for instance for creating a dataset (this one is empty, just as a example)." ] }, { @@ -192,7 +192,7 @@ "- [04-pytorch-example.ipynb](../../api/0.8/04-pytorch-example.ipynb)\n", "- [05-custom-policy.ipynb](../../api/0.8/05-custom-policy.ipynb)\n", "- [06-multiple-code-requests.ipynb](../../api/0.8/06-multiple-code-requests.ipynb)\n", - "- [07-domain-register-control-flow.ipynb](../../api/0.8/07-domain-register-control-flow.ipynb)\n", + "- [07-datasite-register-control-flow.ipynb](../../api/0.8/07-datasite-register-control-flow.ipynb)\n", "- [08-code-version.ipynb](../../api/0.8/08-code-version.ipynb)\n", "- [09-blob-storage.ipynb](../../api/0.8/09-blob-storage.ipynb)\n", "- [10-container-images.ipynb](../../api/0.8/10-container-images.ipynb)\n", diff --git a/notebooks/tutorials/deployments/07-deploy-devspace.ipynb b/notebooks/tutorials/deployments/07-deploy-devspace.ipynb index 29632c99dec..1033814036b 100644 --- a/notebooks/tutorials/deployments/07-deploy-devspace.ipynb +++ b/notebooks/tutorials/deployments/07-deploy-devspace.ipynb @@ -82,12 +82,12 @@ "You can use the command `k9s` or `lazydocker` (will require installing lazydocker) to view your cluster.\n", "\n", "\n", - "#### Launching Different Node Types\n", - "Alternatively, you can also use the following commands to launch different node types:\n", - "* Launching a Domain: `CLUSTER_NAME=testdomain1 CLUSTER_HTTP_PORT=9082 tox -e dev.k8s.launch.domain`\n", + "#### Launching Different Server Types\n", + "Alternatively, you can also use the following commands to launch different server types:\n", + "* Launching a Datasite: `CLUSTER_NAME=testdatasite1 CLUSTER_HTTP_PORT=9082 tox -e dev.k8s.launch.datasite`\n", "* Launching a Gateway: `CLUSTER_NAME=testgateway1 CLUSTER_HTTP_PORT=9081 tox -e dev.k8s.launch.gateway`\n", "* Launching a Enclave: `CLUSTER_NAME=testenclave1 CLUSTER_HTTP_PORT=9083 tox -e dev.k8s.launch.enclave`\n", - "* Launching Nodes with `hotreload` using tox posargs: `tox -e dev.k8s.launch.domain -- hotreload`" + "* Launching Servers with `hotreload` using tox posargs: `tox -e dev.k8s.launch.datasite -- hotreload`" ] }, { @@ -105,7 +105,7 @@ "- [04-jax-example.ipynb](../../api/0.8/04-jax-example.ipynb)\n", "- [05-custom-policy.ipynb](../../api/0.8/05-custom-policy.ipynb)\n", "- [06-multiple-code-requests.ipynb](../../api/0.8/06-multiple-code-requests.ipynb)\n", - "- [07-domain-register-control-flow.ipynb](../../api/0.8/07-domain-register-control-flow.ipynb)\n", + "- [07-datasite-register-control-flow.ipynb](../../api/0.8/07-datasite-register-control-flow.ipynb)\n", "- [08-code-version.ipynb](../../api/0.8/08-code-version.ipynb)\n", "- [09-blob-storage.ipynb](../../api/0.8/09-blob-storage.ipynb)\n", "- [10-container-images.ipynb](../../api/0.8/10-container-images.ipynb)\n", diff --git a/notebooks/tutorials/hello-syft/01-hello-syft.ipynb b/notebooks/tutorials/hello-syft/01-hello-syft.ipynb index e51018e2be0..b710b59a92f 100644 --- a/notebooks/tutorials/hello-syft/01-hello-syft.ipynb +++ b/notebooks/tutorials/hello-syft/01-hello-syft.ipynb @@ -93,9 +93,9 @@ "metadata": {}, "outputs": [], "source": [ - "node = sy.orchestra.launch(name=\"hello-syft-usa-server\", port=9000, reset=True)\n", - "root_domain_client = node.login(email=\"info@openmined.org\", password=\"changethis\")\n", - "root_domain_client.register(\n", + "server = sy.orchestra.launch(name=\"hello-syft-usa-server\", port=9000, reset=True)\n", + "root_datasite_client = server.login(email=\"info@openmined.org\", password=\"changethis\")\n", + "root_datasite_client.register(\n", " name=\"Jane Doe\",\n", " email=\"janedoe@caltech.edu\",\n", " password=\"abc123\",\n", @@ -104,7 +104,7 @@ " website=\"https://www.caltech.edu/\",\n", ")\n", "\n", - "ds_client = node.login(email=\"janedoe@caltech.edu\", password=\"abc123\")" + "ds_client = server.login(email=\"janedoe@caltech.edu\", password=\"abc123\")" ] }, { @@ -114,7 +114,7 @@ "source": [ "## Data owner - Part 1\n", "\n", - "### Upload Data to Domain" + "### Upload Data to Datasite" ] }, { @@ -162,7 +162,7 @@ " )\n", " ],\n", ")\n", - "root_domain_client.upload_dataset(dataset)" + "root_datasite_client.upload_dataset(dataset)" ] }, { @@ -319,7 +319,7 @@ "metadata": {}, "outputs": [], "source": [ - "root_domain_client.requests" + "root_datasite_client.requests" ] }, { @@ -329,7 +329,7 @@ "metadata": {}, "outputs": [], "source": [ - "request = root_domain_client.requests[0]" + "request = root_datasite_client.requests[0]" ] }, { @@ -578,7 +578,7 @@ "metadata": {}, "outputs": [], "source": [ - "node.land()" + "server.land()" ] } ], diff --git a/notebooks/tutorials/model-auditing/colab/01-user-log.ipynb b/notebooks/tutorials/model-auditing/colab/01-user-log.ipynb index 6e1e438c04e..1fd95c66ebb 100644 --- a/notebooks/tutorials/model-auditing/colab/01-user-log.ipynb +++ b/notebooks/tutorials/model-auditing/colab/01-user-log.ipynb @@ -13,7 +13,7 @@ "id": "1", "metadata": {}, "source": [ - "In this tutorial, we show how external parties can audit internal AI systems without accessing them — mitigating privacy, security, and IP costs and risks. **This tutorial uses syft 0.8.2.b0, with a domain setup that does not use networking, to run the tutorial with networking read more in section 1.1.1**\n", + "In this tutorial, we show how external parties can audit internal AI systems without accessing them — mitigating privacy, security, and IP costs and risks. **This tutorial uses syft 0.8.2.b0, with a datasite setup that does not use networking, to run the tutorial with networking read more in section 1.1.1**\n", "\n", "You can read more about this tutorial and the follow up tutorials here on the [blog post](https://blog.openmined.org/)." ] @@ -70,7 +70,7 @@ "id": "6", "metadata": {}, "source": [ - "### Launch PySyft domain server" + "### Launch PySyft datasite server" ] }, { @@ -78,7 +78,7 @@ "id": "7", "metadata": {}, "source": [ - "To start we launch a `PySyft` domain server. This is the backend that stores the private data." + "To start we launch a `PySyft` datasite server. This is the backend that stores the private data." ] }, { @@ -90,7 +90,7 @@ }, "outputs": [], "source": [ - "node = sy.orchestra.launch(name=\"syft-domain\", reset=True)" + "server = sy.orchestra.launch(name=\"syft-datasite\", reset=True)" ] }, { @@ -98,15 +98,15 @@ "id": "9", "metadata": {}, "source": [ - "There are 3 ways to launch a `PySyft` domain\n", + "There are 3 ways to launch a `PySyft` datasite\n", "\n", "**A) From a notebook, with simulated networking \\*\\*THIS NOTEBOOK\\*\\***\n", " - Apart from the network calls, this uses exactly the same code as other setups\n", - " - run orchestra **without a port**: `sy.orchestra.launch(name=\"syft-domain\")`\n", + " - run orchestra **without a port**: `sy.orchestra.launch(name=\"syft-datasite\")`\n", " \n", "**B) From a notebook with networking (also supports docker)**\n", " - This spawns a separate process that starts a uvicorn webserver\n", - " - run orchestra **with a port**:`sy.orchestra.launch(name=\"syft-domain\", port=8080)`\n", + " - run orchestra **with a port**:`sy.orchestra.launch(name=\"syft-datasite\", port=8080)`\n", " \n", "**C) From the command line (supports docker/kubernetes)**\n", " - setup for production\n", @@ -129,7 +129,7 @@ "id": "11", "metadata": {}, "source": [ - "We can now login to our domain using the default admin credentials. In production we would change these." + "We can now login to our datasite using the default admin credentials. In production we would change these." ] }, { @@ -141,7 +141,7 @@ }, "outputs": [], "source": [ - "mo_client = node.login(email=\"info@openmined.org\", password=\"changethis\")" + "mo_client = server.login(email=\"info@openmined.org\", password=\"changethis\")" ] }, { @@ -149,7 +149,7 @@ "id": "13", "metadata": {}, "source": [ - "### Configure node to allow user registration" + "### Configure server to allow user registration" ] }, { @@ -157,7 +157,7 @@ "id": "14", "metadata": {}, "source": [ - "For this tutorial we allow other users to create their own account. New accounts will get limited permissions and will only be able to see the mock version of any datasets we upload to the domain." + "For this tutorial we allow other users to create their own account. New accounts will get limited permissions and will only be able to see the mock version of any datasets we upload to the datasite." ] }, { @@ -223,7 +223,7 @@ "id": "21", "metadata": {}, "source": [ - "To upload our dataset to the domain we need to wrap it in a `Syft Dataset` object. We can add some metadata to the object." + "To upload our dataset to the datasite we need to wrap it in a `Syft Dataset` object. We can add some metadata to the object." ] }, { @@ -296,13 +296,13 @@ }, "outputs": [], "source": [ - "auditor_client = node.register(\n", + "auditor_client = server.register(\n", " name=\"Peter Jones\",\n", " email=\"pjones@aisb.org\",\n", " password=\"password1234\",\n", " password_verify=\"password1234\",\n", ")\n", - "auditor_client = node.login(email=\"pjones@aisb.org\", password=\"password1234\")" + "auditor_client = server.login(email=\"pjones@aisb.org\", password=\"password1234\")" ] }, { diff --git a/notebooks/tutorials/model-training/00-data-owner-upload-data.ipynb b/notebooks/tutorials/model-training/00-data-owner-upload-data.ipynb index 10287b41fe5..adca3805b12 100644 --- a/notebooks/tutorials/model-training/00-data-owner-upload-data.ipynb +++ b/notebooks/tutorials/model-training/00-data-owner-upload-data.ipynb @@ -26,7 +26,7 @@ "id": "1", "metadata": {}, "source": [ - "## 1. Launch the domain, upload the data" + "## 1. Launch the datasite, upload the data" ] }, { @@ -36,8 +36,8 @@ "metadata": {}, "outputs": [], "source": [ - "node = sy.orchestra.launch(name=\"mnist-torch-domain\", dev_mode=True, reset=True)\n", - "root_client = node.login(email=\"info@openmined.org\", password=\"changethis\")" + "server = sy.orchestra.launch(name=\"mnist-torch-datasite\", dev_mode=True, reset=True)\n", + "root_client = server.login(email=\"info@openmined.org\", password=\"changethis\")" ] }, { diff --git a/notebooks/tutorials/model-training/01-data-scientist-submit-code.ipynb b/notebooks/tutorials/model-training/01-data-scientist-submit-code.ipynb index 7b65d033cbc..13e52c83015 100644 --- a/notebooks/tutorials/model-training/01-data-scientist-submit-code.ipynb +++ b/notebooks/tutorials/model-training/01-data-scientist-submit-code.ipynb @@ -4,7 +4,9 @@ "cell_type": "code", "execution_count": null, "id": "0", - "metadata": {}, + "metadata": { + "metadata": {} + }, "outputs": [], "source": [ "# third party\n", @@ -20,18 +22,20 @@ "id": "1", "metadata": {}, "source": [ - "## 1. DS logins to the domain with the credentials created by the DO" + "## 1. DS logins to the datasite with the credentials created by the DO" ] }, { "cell_type": "code", "execution_count": null, "id": "2", - "metadata": {}, + "metadata": { + "metadata": {} + }, "outputs": [], "source": [ - "node = sy.orchestra.launch(name=\"mnist-torch-domain\", dev_mode=True)\n", - "ds_client = node.login(email=\"sheldon@caltech.edu\", password=\"changethis\")" + "server = sy.orchestra.launch(name=\"mnist-torch-datasite\", dev_mode=True)\n", + "ds_client = server.login(email=\"sheldon@caltech.edu\", password=\"changethis\")" ] }, { @@ -39,14 +43,16 @@ "id": "3", "metadata": {}, "source": [ - "### Inspect the datasets on the domain" + "### Inspect the datasets on the datasite" ] }, { "cell_type": "code", "execution_count": null, "id": "4", - "metadata": {}, + "metadata": { + "metadata": {} + }, "outputs": [], "source": [ "datasets = ds_client.datasets.get_all()\n", @@ -103,7 +109,8 @@ "metadata": {}, "outputs": [], "source": [ - "assert training_images.data is None" + "assert training_images.data is None\n", + "training_labels.data" ] }, { diff --git a/notebooks/tutorials/model-training/02-data-owner-review-approve-code.ipynb b/notebooks/tutorials/model-training/02-data-owner-review-approve-code.ipynb index d4a9c70cc5e..5606ec79111 100644 --- a/notebooks/tutorials/model-training/02-data-owner-review-approve-code.ipynb +++ b/notebooks/tutorials/model-training/02-data-owner-review-approve-code.ipynb @@ -21,8 +21,8 @@ "metadata": {}, "outputs": [], "source": [ - "node = sy.orchestra.launch(name=\"mnist-torch-domain\", dev_mode=True)\n", - "root_client = node.login(email=\"info@openmined.org\", password=\"changethis\")" + "server = sy.orchestra.launch(name=\"mnist-torch-datasite\", dev_mode=True)\n", + "root_client = server.login(email=\"info@openmined.org\", password=\"changethis\")" ] }, { diff --git a/notebooks/tutorials/model-training/03-data-scientist-download-results.ipynb b/notebooks/tutorials/model-training/03-data-scientist-download-results.ipynb index de5c4270d2e..250a9f23dcc 100644 --- a/notebooks/tutorials/model-training/03-data-scientist-download-results.ipynb +++ b/notebooks/tutorials/model-training/03-data-scientist-download-results.ipynb @@ -22,8 +22,8 @@ "metadata": {}, "outputs": [], "source": [ - "node = sy.orchestra.launch(name=\"mnist-torch-domain\", dev_mode=True)\n", - "ds_client = node.login(email=\"sheldon@caltech.edu\", password=\"changethis\")" + "server = sy.orchestra.launch(name=\"mnist-torch-datasite\", dev_mode=True)\n", + "ds_client = server.login(email=\"sheldon@caltech.edu\", password=\"changethis\")" ] }, { diff --git a/notebooks/tutorials/pandas-cookbook/01-reading-from-a-csv.ipynb b/notebooks/tutorials/pandas-cookbook/01-reading-from-a-csv.ipynb index 368d7090d57..df6b188c0c4 100644 --- a/notebooks/tutorials/pandas-cookbook/01-reading-from-a-csv.ipynb +++ b/notebooks/tutorials/pandas-cookbook/01-reading-from-a-csv.ipynb @@ -54,7 +54,7 @@ }, "outputs": [], "source": [ - "node = sy.orchestra.launch(name=\"pandas-test-domain-1\", port=7081, reset=True)" + "server = sy.orchestra.launch(name=\"pandas-test-datasite-1\", port=7081, reset=True)" ] }, { @@ -74,7 +74,7 @@ }, "outputs": [], "source": [ - "root_domain_client = node.login(email=\"info@openmined.org\", password=\"changethis\")" + "root_datasite_client = server.login(email=\"info@openmined.org\", password=\"changethis\")" ] }, { @@ -256,7 +256,7 @@ " name=\"test\",\n", " asset_list=[sy.Asset(name=\"bikes\", data=fixed_df, mock=mock, mock_is_real=False)],\n", ")\n", - "root_domain_client.upload_dataset(dataset)" + "root_datasite_client.upload_dataset(dataset)" ] }, { @@ -276,7 +276,7 @@ }, "outputs": [], "source": [ - "user = root_domain_client.register(\n", + "user = root_datasite_client.register(\n", " name=\"Jane Doe\",\n", " email=\"jane@caltech.edu\",\n", " password=\"abc123\",\n", @@ -307,7 +307,7 @@ }, "outputs": [], "source": [ - "guest_domain_client = node.client" + "guest_datasite_client = server.client" ] }, { @@ -319,7 +319,7 @@ }, "outputs": [], "source": [ - "guest_client = guest_domain_client.login(email=\"jane@caltech.edu\", password=\"abc123\")" + "guest_client = guest_datasite_client.login(email=\"jane@caltech.edu\", password=\"abc123\")" ] }, { @@ -355,8 +355,8 @@ }, "outputs": [], "source": [ - "guest_domain_client = node.client\n", - "guest_client = guest_domain_client.login(email=\"jane@caltech.edu\", password=\"abc123\")" + "guest_datasite_client = server.client\n", + "guest_client = guest_datasite_client.login(email=\"jane@caltech.edu\", password=\"abc123\")" ] }, { @@ -368,7 +368,7 @@ }, "outputs": [], "source": [ - "ds = guest_domain_client.datasets[0]" + "ds = guest_datasite_client.datasets[0]" ] }, { @@ -632,7 +632,7 @@ }, "outputs": [], "source": [ - "domain_client = node.client.login(email=\"info@openmined.org\", password=\"changethis\")" + "datasite_client = server.client.login(email=\"info@openmined.org\", password=\"changethis\")" ] }, { @@ -644,7 +644,7 @@ }, "outputs": [], "source": [ - "notifications = domain_client.notifications.get_all_unread()" + "notifications = datasite_client.notifications.get_all_unread()" ] }, { @@ -750,7 +750,7 @@ }, "outputs": [], "source": [ - "real_data = domain_client.datasets[0].assets[0].data" + "real_data = datasite_client.datasets[0].assets[0].data" ] }, { diff --git a/notebooks/tutorials/pandas-cookbook/02-selecting-data-finding-common-complain.ipynb b/notebooks/tutorials/pandas-cookbook/02-selecting-data-finding-common-complain.ipynb index c7a8d83c7f6..bdb331d2f4e 100644 --- a/notebooks/tutorials/pandas-cookbook/02-selecting-data-finding-common-complain.ipynb +++ b/notebooks/tutorials/pandas-cookbook/02-selecting-data-finding-common-complain.ipynb @@ -54,7 +54,7 @@ }, "outputs": [], "source": [ - "node = sy.orchestra.launch(name=\"pandas-test-domain-2\", port=9082, reset=True)" + "server = sy.orchestra.launch(name=\"pandas-test-datasite-2\", port=9082, reset=True)" ] }, { @@ -74,7 +74,7 @@ }, "outputs": [], "source": [ - "domain_client = node.login(email=\"info@openmined.org\", password=\"changethis\")" + "datasite_client = server.login(email=\"info@openmined.org\", password=\"changethis\")" ] }, { @@ -340,7 +340,7 @@ " sy.Asset(name=\"complaints\", data=complaints, mock=mock, mock_is_real=False)\n", " ],\n", ")\n", - "domain_client.upload_dataset(dataset)" + "datasite_client.upload_dataset(dataset)" ] }, { @@ -360,7 +360,7 @@ }, "outputs": [], "source": [ - "user = domain_client.register(\n", + "user = datasite_client.register(\n", " name=\"Jane Doe\",\n", " email=\"jane@caltech.edu\",\n", " password=\"abc123\",\n", @@ -371,9 +371,9 @@ "\n", "# todo: give user data scientist role\n", "\n", - "guest_domain_client = node.client\n", + "guest_datasite_client = server.client\n", "\n", - "guest_client = guest_domain_client.login(email=\"jane@caltech.edu\", password=\"abc123\")" + "guest_client = guest_datasite_client.login(email=\"jane@caltech.edu\", password=\"abc123\")" ] }, { @@ -409,7 +409,7 @@ }, "outputs": [], "source": [ - "guest_domain_client = node.client" + "guest_datasite_client = server.client" ] }, { @@ -421,8 +421,8 @@ }, "outputs": [], "source": [ - "# guest_domain_client = worker.guest_client\n", - "guest_client = guest_domain_client.login(email=\"jane@caltech.edu\", password=\"abc123\")" + "# guest_datasite_client = worker.guest_client\n", + "guest_client = guest_datasite_client.login(email=\"jane@caltech.edu\", password=\"abc123\")" ] }, { @@ -434,7 +434,7 @@ }, "outputs": [], "source": [ - "ds = guest_domain_client.datasets[0]" + "ds = guest_datasite_client.datasets[0]" ] }, { @@ -830,7 +830,7 @@ }, "outputs": [], "source": [ - "domain_client = node.client.login(email=\"info@openmined.org\", password=\"changethis\")" + "datasite_client = server.client.login(email=\"info@openmined.org\", password=\"changethis\")" ] }, { @@ -850,7 +850,7 @@ }, "outputs": [], "source": [ - "notifications = domain_client.notifications.get_all_unread()" + "notifications = datasite_client.notifications.get_all_unread()" ] }, { @@ -925,7 +925,7 @@ }, "outputs": [], "source": [ - "real_data = domain_client.datasets[0].assets[0].data" + "real_data = datasite_client.datasets[0].assets[0].data" ] }, { @@ -1020,7 +1020,7 @@ }, "outputs": [], "source": [ - "node.land()" + "server.land()" ] } ], diff --git a/notebooks/tutorials/pandas-cookbook/03-which-borough-has-the-most-noise-complaints.ipynb b/notebooks/tutorials/pandas-cookbook/03-which-borough-has-the-most-noise-complaints.ipynb index 407f6507c3a..7dc6a47b948 100644 --- a/notebooks/tutorials/pandas-cookbook/03-which-borough-has-the-most-noise-complaints.ipynb +++ b/notebooks/tutorials/pandas-cookbook/03-which-borough-has-the-most-noise-complaints.ipynb @@ -57,7 +57,7 @@ }, "outputs": [], "source": [ - "node = sy.orchestra.launch(name=\"pandas-test-domain-3\", port=7083, reset=True)" + "server = sy.orchestra.launch(name=\"pandas-test-datasite-3\", port=7083, reset=True)" ] }, { @@ -77,7 +77,7 @@ }, "outputs": [], "source": [ - "domain_client = node.login(email=\"info@openmined.org\", password=\"changethis\")" + "datasite_client = server.login(email=\"info@openmined.org\", password=\"changethis\")" ] }, { @@ -355,7 +355,7 @@ " sy.Asset(name=\"complaints\", data=complaints, mock=mock, mock_is_real=False)\n", " ],\n", ")\n", - "domain_client.upload_dataset(dataset)" + "datasite_client.upload_dataset(dataset)" ] }, { @@ -375,7 +375,7 @@ }, "outputs": [], "source": [ - "user = domain_client.register(\n", + "user = datasite_client.register(\n", " name=\"Jane Doe\",\n", " email=\"jane@caltech.edu\",\n", " password=\"abc123\",\n", @@ -386,9 +386,9 @@ "\n", "# todo: give user data scientist role\n", "\n", - "guest_domain_client = node.client\n", + "guest_datasite_client = server.client\n", "\n", - "guest_client = guest_domain_client.login(email=\"jane@caltech.edu\", password=\"abc123\")" + "guest_client = guest_datasite_client.login(email=\"jane@caltech.edu\", password=\"abc123\")" ] }, { @@ -424,8 +424,8 @@ }, "outputs": [], "source": [ - "guest_domain_client = node.client\n", - "guest_client = guest_domain_client.login(email=\"jane@caltech.edu\", password=\"abc123\")" + "guest_datasite_client = server.client\n", + "guest_client = guest_datasite_client.login(email=\"jane@caltech.edu\", password=\"abc123\")" ] }, { @@ -437,7 +437,7 @@ }, "outputs": [], "source": [ - "ds = guest_domain_client.datasets[0]" + "ds = guest_datasite_client.datasets[0]" ] }, { @@ -955,7 +955,7 @@ }, "outputs": [], "source": [ - "domain_client = node.login(email=\"info@openmined.org\", password=\"changethis\")" + "datasite_client = server.login(email=\"info@openmined.org\", password=\"changethis\")" ] }, { @@ -967,7 +967,7 @@ }, "outputs": [], "source": [ - "notifications = domain_client.notifications.get_all_unread()" + "notifications = datasite_client.notifications.get_all_unread()" ] }, { @@ -1042,7 +1042,7 @@ }, "outputs": [], "source": [ - "real_data = domain_client.datasets[0].assets[0].data" + "real_data = datasite_client.datasets[0].assets[0].data" ] }, { @@ -1137,7 +1137,7 @@ }, "outputs": [], "source": [ - "node.land()" + "server.land()" ] } ], diff --git a/notebooks/tutorials/pandas-cookbook/04-weekday-bike-most-groupby-aggregate.ipynb b/notebooks/tutorials/pandas-cookbook/04-weekday-bike-most-groupby-aggregate.ipynb index daed660972d..1d065baae95 100644 --- a/notebooks/tutorials/pandas-cookbook/04-weekday-bike-most-groupby-aggregate.ipynb +++ b/notebooks/tutorials/pandas-cookbook/04-weekday-bike-most-groupby-aggregate.ipynb @@ -46,7 +46,7 @@ }, "outputs": [], "source": [ - "node = sy.orchestra.launch(name=\"pandas-test-domain-4\", port=9084, reset=True)" + "server = sy.orchestra.launch(name=\"pandas-test-datasite-4\", port=9084, reset=True)" ] }, { @@ -66,7 +66,7 @@ }, "outputs": [], "source": [ - "root_domain_client = node.login(email=\"info@openmined.org\", password=\"changethis\")" + "root_datasite_client = server.login(email=\"info@openmined.org\", password=\"changethis\")" ] }, { @@ -211,7 +211,7 @@ " name=\"bikes2\",\n", " asset_list=[sy.Asset(name=\"bikes\", data=df, mock=mock, mock_is_real=False)],\n", ")\n", - "root_domain_client.upload_dataset(dataset)" + "root_datasite_client.upload_dataset(dataset)" ] }, { @@ -231,7 +231,7 @@ }, "outputs": [], "source": [ - "user = root_domain_client.register(\n", + "user = root_datasite_client.register(\n", " name=\"Jane Doe\",\n", " email=\"jane@caltech.edu\",\n", " password=\"abc123\",\n", @@ -242,9 +242,9 @@ "\n", "# todo: give user data scientist role\n", "\n", - "guest_domain_client = node.client\n", + "guest_datasite_client = server.client\n", "\n", - "guest_client = guest_domain_client.login(email=\"jane@caltech.edu\", password=\"abc123\")" + "guest_client = guest_datasite_client.login(email=\"jane@caltech.edu\", password=\"abc123\")" ] }, { @@ -288,7 +288,7 @@ }, "outputs": [], "source": [ - "ds = guest_domain_client.datasets[0]" + "ds = guest_datasite_client.datasets[0]" ] }, { @@ -716,7 +716,7 @@ }, "outputs": [], "source": [ - "root_domain_client = node.login(email=\"info@openmined.org\", password=\"changethis\")" + "root_datasite_client = server.login(email=\"info@openmined.org\", password=\"changethis\")" ] }, { @@ -736,7 +736,7 @@ }, "outputs": [], "source": [ - "notifications = root_domain_client.notifications.get_all_unread()" + "notifications = root_datasite_client.notifications.get_all_unread()" ] }, { @@ -811,7 +811,7 @@ }, "outputs": [], "source": [ - "real_data = root_domain_client.datasets[0].assets[0].data" + "real_data = root_datasite_client.datasets[0].assets[0].data" ] }, { @@ -906,7 +906,7 @@ }, "outputs": [], "source": [ - "node.land()" + "server.land()" ] } ], diff --git a/notebooks/tutorials/pandas-cookbook/05-combining-dataframes-scraping-weather-data.ipynb b/notebooks/tutorials/pandas-cookbook/05-combining-dataframes-scraping-weather-data.ipynb index 0bff86b5c06..8bece628ede 100644 --- a/notebooks/tutorials/pandas-cookbook/05-combining-dataframes-scraping-weather-data.ipynb +++ b/notebooks/tutorials/pandas-cookbook/05-combining-dataframes-scraping-weather-data.ipynb @@ -54,7 +54,7 @@ }, "outputs": [], "source": [ - "node = sy.orchestra.launch(name=\"pandas-test-domain-5\", port=9085, reset=True)" + "server = sy.orchestra.launch(name=\"pandas-test-datasite-5\", port=9085, reset=True)" ] }, { @@ -74,7 +74,7 @@ }, "outputs": [], "source": [ - "root_domain_client = node.login(email=\"info@openmined.org\", password=\"changethis\")" + "root_datasite_client = server.login(email=\"info@openmined.org\", password=\"changethis\")" ] }, { @@ -298,7 +298,7 @@ "outputs": [], "source": [ "dataset = sy.Dataset(name=\"test\", asset_list=assets)\n", - "root_domain_client.upload_dataset(dataset)" + "root_datasite_client.upload_dataset(dataset)" ] }, { @@ -330,7 +330,7 @@ }, "outputs": [], "source": [ - "user = root_domain_client.register(\n", + "user = root_datasite_client.register(\n", " name=\"Jane Doe\",\n", " email=\"jane@caltech.edu\",\n", " password=\"abc123\",\n", @@ -341,9 +341,9 @@ "\n", "# todo: give user data scientist role\n", "\n", - "guest_domain_client = node.client\n", + "guest_datasite_client = server.client\n", "\n", - "guest_client = guest_domain_client.login(email=\"jane@caltech.edu\", password=\"abc123\")" + "guest_client = guest_datasite_client.login(email=\"jane@caltech.edu\", password=\"abc123\")" ] }, { @@ -382,7 +382,7 @@ }, "outputs": [], "source": [ - "ds = guest_domain_client.datasets[-1]" + "ds = guest_datasite_client.datasets[-1]" ] }, { @@ -903,7 +903,7 @@ }, "outputs": [], "source": [ - "root_domain_client = node.login(email=\"info@openmined.org\", password=\"changethis\")" + "root_datasite_client = server.login(email=\"info@openmined.org\", password=\"changethis\")" ] }, { @@ -923,7 +923,7 @@ }, "outputs": [], "source": [ - "notifications = root_domain_client.notifications.get_all_unread()" + "notifications = root_datasite_client.notifications.get_all_unread()" ] }, { @@ -999,8 +999,8 @@ "outputs": [], "source": [ "real_data1, real_data2 = (\n", - " root_domain_client.datasets[-1].assets[\"weather1\"].data,\n", - " root_domain_client.datasets[-1].assets[\"weather2\"].data,\n", + " root_datasite_client.datasets[-1].assets[\"weather1\"].data,\n", + " root_datasite_client.datasets[-1].assets[\"weather2\"].data,\n", ")" ] }, @@ -1070,7 +1070,7 @@ }, "outputs": [], "source": [ - "guest_client = guest_domain_client.login(email=\"jane@caltech.edu\", password=\"abc123\")" + "guest_client = guest_datasite_client.login(email=\"jane@caltech.edu\", password=\"abc123\")" ] }, { @@ -1175,7 +1175,7 @@ }, "outputs": [], "source": [ - "node.land()" + "server.land()" ] } ], diff --git a/notebooks/tutorials/pandas-cookbook/06-string-operations-which-month-was-the-snowiest.ipynb b/notebooks/tutorials/pandas-cookbook/06-string-operations-which-month-was-the-snowiest.ipynb index 896f842e6cb..2fc567f9be9 100644 --- a/notebooks/tutorials/pandas-cookbook/06-string-operations-which-month-was-the-snowiest.ipynb +++ b/notebooks/tutorials/pandas-cookbook/06-string-operations-which-month-was-the-snowiest.ipynb @@ -54,7 +54,7 @@ }, "outputs": [], "source": [ - "node = sy.orchestra.launch(name=\"pandas-test-domain-6\", port=9086, reset=True)" + "server = sy.orchestra.launch(name=\"pandas-test-datasite-6\", port=9086, reset=True)" ] }, { @@ -74,7 +74,7 @@ }, "outputs": [], "source": [ - "root_domain_client = node.login(email=\"info@openmined.org\", password=\"changethis\")" + "root_datasite_client = server.login(email=\"info@openmined.org\", password=\"changethis\")" ] }, { @@ -287,7 +287,7 @@ " sy.Asset(name=\"weather\", data=weather_2012_final, mock=mock, mock_is_real=False)\n", " ],\n", ")\n", - "root_domain_client.upload_dataset(dataset)" + "root_datasite_client.upload_dataset(dataset)" ] }, { @@ -319,7 +319,7 @@ }, "outputs": [], "source": [ - "user = root_domain_client.register(\n", + "user = root_datasite_client.register(\n", " name=\"Jane Doe\",\n", " email=\"jane@caltech.edu\",\n", " password=\"abc123\",\n", @@ -328,8 +328,8 @@ " website=\"https://www.caltech.edu/\",\n", ")\n", "# todo: give user data scientist role\n", - "guest_domain_client = node.client\n", - "guest_client = guest_domain_client.login(email=\"jane@caltech.edu\", password=\"abc123\")" + "guest_datasite_client = server.client\n", + "guest_client = guest_datasite_client.login(email=\"jane@caltech.edu\", password=\"abc123\")" ] }, { @@ -368,7 +368,7 @@ }, "outputs": [], "source": [ - "ds = guest_domain_client.datasets[0]" + "ds = guest_datasite_client.datasets[0]" ] }, { @@ -805,7 +805,7 @@ }, "outputs": [], "source": [ - "domain_client = node.login(email=\"info@openmined.org\", password=\"changethis\")" + "datasite_client = server.login(email=\"info@openmined.org\", password=\"changethis\")" ] }, { @@ -826,7 +826,7 @@ }, "outputs": [], "source": [ - "notifications = domain_client.notifications.get_all_unread()" + "notifications = datasite_client.notifications.get_all_unread()" ] }, { @@ -901,7 +901,7 @@ }, "outputs": [], "source": [ - "real_data = domain_client.datasets[0].assets[0].data" + "real_data = datasite_client.datasets[0].assets[0].data" ] }, { @@ -958,7 +958,7 @@ }, "outputs": [], "source": [ - "guest_client = node.login(email=\"jane@caltech.edu\", password=\"abc123\")" + "guest_client = server.login(email=\"jane@caltech.edu\", password=\"abc123\")" ] }, { @@ -1072,7 +1072,7 @@ }, "outputs": [], "source": [ - "node.land()" + "server.land()" ] } ], diff --git a/notebooks/tutorials/pandas-cookbook/07-cleaning-up-messy-data.ipynb b/notebooks/tutorials/pandas-cookbook/07-cleaning-up-messy-data.ipynb index dab79f0c217..bbbfe46751e 100644 --- a/notebooks/tutorials/pandas-cookbook/07-cleaning-up-messy-data.ipynb +++ b/notebooks/tutorials/pandas-cookbook/07-cleaning-up-messy-data.ipynb @@ -54,7 +54,7 @@ }, "outputs": [], "source": [ - "node = sy.orchestra.launch(name=\"pandas-test-domain-7\", port=9087, reset=True)" + "server = sy.orchestra.launch(name=\"pandas-test-datasite-7\", port=9087, reset=True)" ] }, { @@ -74,7 +74,7 @@ }, "outputs": [], "source": [ - "root_domain_client = node.login(email=\"info@openmined.org\", password=\"changethis\")" + "root_datasite_client = server.login(email=\"info@openmined.org\", password=\"changethis\")" ] }, { @@ -307,7 +307,7 @@ " )\n", " ],\n", ")\n", - "root_domain_client.upload_dataset(dataset)" + "root_datasite_client.upload_dataset(dataset)" ] }, { @@ -327,7 +327,7 @@ }, "outputs": [], "source": [ - "user = root_domain_client.register(\n", + "user = root_datasite_client.register(\n", " name=\"Jane Doe\",\n", " email=\"jane@caltech.edu\",\n", " password=\"abc123\",\n", @@ -336,8 +336,8 @@ " website=\"https://www.caltech.edu/\",\n", ")\n", "# todo: give user data scientist role\n", - "guest_domain_client = node.client\n", - "guest_client = guest_domain_client.login(email=\"jane@caltech.edu\", password=\"abc123\")" + "guest_datasite_client = server.client\n", + "guest_client = guest_datasite_client.login(email=\"jane@caltech.edu\", password=\"abc123\")" ] }, { @@ -860,7 +860,7 @@ }, "outputs": [], "source": [ - "domain_client = node.login(email=\"info@openmined.org\", password=\"changethis\")" + "datasite_client = server.login(email=\"info@openmined.org\", password=\"changethis\")" ] }, { @@ -881,7 +881,7 @@ }, "outputs": [], "source": [ - "notifications = domain_client.notifications.get_all_unread()" + "notifications = datasite_client.notifications.get_all_unread()" ] }, { @@ -956,7 +956,7 @@ }, "outputs": [], "source": [ - "real_data = domain_client.datasets[0].assets[0].data" + "real_data = datasite_client.datasets[0].assets[0].data" ] }, { @@ -1086,7 +1086,7 @@ }, "outputs": [], "source": [ - "node.land()" + "server.land()" ] } ], diff --git a/notebooks/tutorials/pandas-cookbook/08-how-to-deal-with-timestamps.ipynb b/notebooks/tutorials/pandas-cookbook/08-how-to-deal-with-timestamps.ipynb index 32beba9af48..153ec4d16e0 100644 --- a/notebooks/tutorials/pandas-cookbook/08-how-to-deal-with-timestamps.ipynb +++ b/notebooks/tutorials/pandas-cookbook/08-how-to-deal-with-timestamps.ipynb @@ -56,7 +56,7 @@ }, "outputs": [], "source": [ - "node = sy.orchestra.launch(name=\"pandas-test-domain-8\", port=9088, reset=True)" + "server = sy.orchestra.launch(name=\"pandas-test-datasite-8\", port=9088, reset=True)" ] }, { @@ -77,7 +77,7 @@ }, "outputs": [], "source": [ - "root_domain_client = node.login(email=\"info@openmined.org\", password=\"changethis\")" + "root_datasite_client = server.login(email=\"info@openmined.org\", password=\"changethis\")" ] }, { @@ -328,7 +328,7 @@ " )\n", " ],\n", ")\n", - "root_domain_client.upload_dataset(dataset)" + "root_datasite_client.upload_dataset(dataset)" ] }, { @@ -361,7 +361,7 @@ }, "outputs": [], "source": [ - "user = root_domain_client.register(\n", + "user = root_datasite_client.register(\n", " name=\"Jane Doe\",\n", " email=\"jane@caltech.edu\",\n", " password=\"abc123\",\n", @@ -370,8 +370,8 @@ " website=\"https://www.caltech.edu/\",\n", ")\n", "# todo: give user data scientist role\n", - "guest_domain_client = node.client\n", - "guest_client = guest_domain_client.login(email=\"jane@caltech.edu\", password=\"abc123\")" + "guest_datasite_client = server.client\n", + "guest_client = guest_datasite_client.login(email=\"jane@caltech.edu\", password=\"abc123\")" ] }, { @@ -425,7 +425,7 @@ }, "outputs": [], "source": [ - "ds = guest_domain_client.datasets[0]" + "ds = guest_datasite_client.datasets[0]" ] }, { @@ -811,7 +811,7 @@ }, "outputs": [], "source": [ - "domain_client = node.login(email=\"info@openmined.org\", password=\"changethis\")" + "datasite_client = server.login(email=\"info@openmined.org\", password=\"changethis\")" ] }, { @@ -832,7 +832,7 @@ }, "outputs": [], "source": [ - "notifications = domain_client.notifications.get_all_unread()" + "notifications = datasite_client.notifications.get_all_unread()" ] }, { @@ -907,7 +907,7 @@ }, "outputs": [], "source": [ - "real_data = domain_client.datasets[0].assets[0].data" + "real_data = datasite_client.datasets[0].assets[0].data" ] }, { @@ -1014,7 +1014,7 @@ }, "outputs": [], "source": [ - "node.land()" + "server.land()" ] } ], diff --git a/notebooks/tutorials/version-upgrades/0-prepare-migration-data.ipynb b/notebooks/tutorials/version-upgrades/0-prepare-migration-data.ipynb index 37be2b11305..ed8a496c407 100644 --- a/notebooks/tutorials/version-upgrades/0-prepare-migration-data.ipynb +++ b/notebooks/tutorials/version-upgrades/0-prepare-migration-data.ipynb @@ -55,7 +55,7 @@ "id": "4", "metadata": {}, "source": [ - "# Launch Node" + "# Launch Server" ] }, { @@ -65,7 +65,7 @@ "metadata": {}, "outputs": [], "source": [ - "node = sy.orchestra.launch(\n", + "server = sy.orchestra.launch(\n", " name=\"test_upgradability\",\n", " dev_mode=True,\n", " reset=True,\n", @@ -82,7 +82,7 @@ "metadata": {}, "outputs": [], "source": [ - "client = node.login(email=\"info@openmined.org\", password=\"changethis\")" + "client = server.login(email=\"info@openmined.org\", password=\"changethis\")" ] }, { @@ -112,7 +112,7 @@ "metadata": {}, "outputs": [], "source": [ - "client_ds = node.login(email=\"ds@openmined.org\", password=\"pw\")" + "client_ds = server.login(email=\"ds@openmined.org\", password=\"pw\")" ] }, { @@ -149,7 +149,7 @@ "\n", "\n", "@sy.syft_function_single_use(data=data_low)\n", - "def compute_mean(domain, data) -> float:\n", + "def compute_mean(datasite, data) -> float:\n", " # launch another job\n", " print(\"Computing mean...\")\n", " return data" @@ -222,8 +222,8 @@ "metadata": {}, "outputs": [], "source": [ - "if node.node_type.value == \"python\":\n", - " node.land()" + "if server.server_type.value == \"python\":\n", + " server.land()" ] }, { diff --git a/notebooks/tutorials/version-upgrades/1-dump-database-to-file.ipynb b/notebooks/tutorials/version-upgrades/1-dump-database-to-file.ipynb index e5ecd841e8f..ad247d91a38 100644 --- a/notebooks/tutorials/version-upgrades/1-dump-database-to-file.ipynb +++ b/notebooks/tutorials/version-upgrades/1-dump-database-to-file.ipynb @@ -21,7 +21,7 @@ "metadata": {}, "outputs": [], "source": [ - "node = sy.orchestra.launch(\n", + "server = sy.orchestra.launch(\n", " name=\"test_upgradability\",\n", " dev_mode=True,\n", " local_db=True,\n", @@ -30,7 +30,7 @@ " migrate=False,\n", ")\n", "\n", - "client = node.login(email=\"info@openmined.org\", password=\"changethis\")" + "client = server.login(email=\"info@openmined.org\", password=\"changethis\")" ] }, { @@ -50,7 +50,7 @@ "metadata": {}, "outputs": [], "source": [ - "# Check if this node has data on it\n", + "# Check if this server has data on it\n", "assert len(client.users.get_all()) == 2" ] }, @@ -112,8 +112,8 @@ "metadata": {}, "outputs": [], "source": [ - "if node.node_type.value == \"python\":\n", - " node.land()" + "if server.server_type.value == \"python\":\n", + " server.land()" ] }, { diff --git a/notebooks/tutorials/version-upgrades/2-migrate-from-file.ipynb b/notebooks/tutorials/version-upgrades/2-migrate-from-file.ipynb index 24329301c61..993894948d4 100644 --- a/notebooks/tutorials/version-upgrades/2-migrate-from-file.ipynb +++ b/notebooks/tutorials/version-upgrades/2-migrate-from-file.ipynb @@ -31,14 +31,14 @@ "metadata": {}, "outputs": [], "source": [ - "node = sy.orchestra.launch(\n", + "server = sy.orchestra.launch(\n", " name=\"test_upgradability\",\n", " dev_mode=True,\n", " reset=True,\n", " port=\"auto\",\n", ")\n", "\n", - "client = node.login(email=\"info@openmined.org\", password=\"changethis\")" + "client = server.login(email=\"info@openmined.org\", password=\"changethis\")" ] }, { @@ -48,7 +48,7 @@ "metadata": {}, "outputs": [], "source": [ - "# Check if this is a new node\n", + "# Check if this is a new server\n", "migration_data = client.get_migration_data()\n", "\n", "assert len(migration_data.store_objects[User]) == 1\n", @@ -102,7 +102,7 @@ "metadata": {}, "outputs": [], "source": [ - "client_ds = node.login(email=\"ds@openmined.org\", password=\"pw\")" + "client_ds = server.login(email=\"ds@openmined.org\", password=\"pw\")" ] }, { @@ -258,8 +258,8 @@ "metadata": {}, "outputs": [], "source": [ - "if node.node_type.value == \"python\":\n", - " node.land()" + "if server.server_type.value == \"python\":\n", + " server.land()" ] }, { diff --git a/packages/grid/VERSION b/packages/grid/VERSION index df1dec5602a..ab18b3a5a42 100644 --- a/packages/grid/VERSION +++ b/packages/grid/VERSION @@ -1,5 +1,5 @@ # Mono Repo Global Version -__version__ = "0.8.7-beta.13" +__version__ = "0.8.8-beta.2" # elsewhere we can call this file: `python VERSION` and simply take the stdout # stdlib diff --git a/packages/grid/backend/backend.dockerfile b/packages/grid/backend/backend.dockerfile index 606569c49f4..256fdfd447b 100644 --- a/packages/grid/backend/backend.dockerfile +++ b/packages/grid/backend/backend.dockerfile @@ -60,7 +60,7 @@ WORKDIR /root/app/ # Copy pre-built syft dependencies COPY --from=syft_deps /root/app/.venv .venv -# copy grid +# copy server COPY grid/backend/grid ./grid/ # copy syft @@ -73,8 +73,8 @@ ENV \ VIRTUAL_ENV="/root/app/.venv" \ # Syft APPDIR="/root/app" \ - NODE_NAME="default_node_name" \ - NODE_TYPE="domain" \ + SERVER_NAME="default_server_name" \ + SERVER_TYPE="datasite" \ SERVICE_NAME="backend" \ RELEASE="production" \ DEV_MODE="False" \ diff --git a/packages/grid/backend/grid/api/new/new.py b/packages/grid/backend/grid/api/new/new.py index 2f0a6910bdf..3ff61bb041b 100644 --- a/packages/grid/backend/grid/api/new/new.py +++ b/packages/grid/backend/grid/api/new/new.py @@ -1,7 +1,7 @@ # syft absolute -from syft.node.routes import make_routes +from syft.server.routes import make_routes -# grid absolute -from grid.core.node import worker +# server absolute +from grid.core.server import worker router = make_routes(worker=worker) diff --git a/packages/grid/backend/grid/api/router.py b/packages/grid/backend/grid/api/router.py index 020491323a6..8412869db53 100644 --- a/packages/grid/backend/grid/api/router.py +++ b/packages/grid/backend/grid/api/router.py @@ -7,7 +7,7 @@ # third party from fastapi import APIRouter -# grid absolute +# server absolute from grid.api.new.new import router as new_router api_router = APIRouter() diff --git a/packages/grid/backend/grid/bootstrap.py b/packages/grid/backend/grid/bootstrap.py index 0e1878e2ee1..914411ec864 100644 --- a/packages/grid/backend/grid/bootstrap.py +++ b/packages/grid/backend/grid/bootstrap.py @@ -9,7 +9,7 @@ from nacl.encoding import HexEncoder from nacl.signing import SigningKey -# we want to bootstrap nodes with persistent uids and keys and allow a variety of ways +# we want to bootstrap servers with persistent uids and keys and allow a variety of ways # to resolve these at startup # first we check the environment variables @@ -27,8 +27,8 @@ def get_env(key: str, default: str = "") -> str | None: CREDENTIALS_PATH = str(get_env("CREDENTIALS_PATH", "credentials.json")) -NODE_PRIVATE_KEY = "NODE_PRIVATE_KEY" -NODE_UID = "NODE_UID" +SERVER_PRIVATE_KEY = "SERVER_PRIVATE_KEY" +SERVER_UID = "SERVER_UID" def get_credentials_file() -> dict[str, str]: @@ -64,7 +64,7 @@ def save_credential(key: str, value: str) -> str: return value -def generate_node_uid() -> str: +def generate_server_uid() -> str: return str(uuid.uuid4()) @@ -77,11 +77,11 @@ def generate_private_key() -> str: def get_private_key_env() -> str | None: - return get_env(NODE_PRIVATE_KEY) + return get_env(SERVER_PRIVATE_KEY) -def get_node_uid_env() -> str | None: - return get_env(NODE_UID) +def get_server_uid_env() -> str | None: + return get_env(SERVER_UID) def validate_private_key(private_key: str | bytes) -> str: @@ -95,17 +95,17 @@ def validate_private_key(private_key: str | bytes) -> str: return str_key except Exception: pass - raise Exception(f"{NODE_PRIVATE_KEY} is invalid") + raise Exception(f"{SERVER_PRIVATE_KEY} is invalid") -def validate_uid(node_uid: str) -> str: +def validate_uid(server_uid: str) -> str: try: - uid = uuid.UUID(node_uid) - if node_uid == uid.hex or node_uid == str(uid): + uid = uuid.UUID(server_uid) + if server_uid == uid.hex or server_uid == str(uid): return str(uid) except Exception: pass - raise Exception(f"{NODE_UID} is invalid") + raise Exception(f"{SERVER_UID} is invalid") def get_credential( @@ -140,11 +140,13 @@ def get_credential( def get_private_key() -> str: - return get_credential(NODE_PRIVATE_KEY, validate_private_key, generate_private_key) + return get_credential( + SERVER_PRIVATE_KEY, validate_private_key, generate_private_key + ) -def get_node_uid() -> str: - return get_credential(NODE_UID, validate_uid, generate_node_uid) +def get_server_uid() -> str: + return get_credential(SERVER_UID, validate_uid, generate_server_uid) def delete_credential_file() -> None: @@ -168,15 +170,15 @@ def delete_credential_file() -> None: if args.private_key: print(get_private_key()) elif args.uid: - print(get_node_uid()) + print(get_server_uid()) elif args.file: delete_credential_file() get_private_key() - get_node_uid() + get_server_uid() print(f"Generated credentials file at '{CREDENTIALS_PATH}'") elif args.debug: print("Credentials File", get_credentials_file()) - print(NODE_PRIVATE_KEY, "=", get_private_key_env()) - print(NODE_UID, "=", get_node_uid_env()) + print(SERVER_PRIVATE_KEY, "=", get_private_key_env()) + print(SERVER_UID, "=", get_server_uid_env()) else: parser.print_help() diff --git a/packages/grid/backend/grid/core/celery_config.py b/packages/grid/backend/grid/core/celery_config.py deleted file mode 100644 index 20fa3f55544..00000000000 --- a/packages/grid/backend/grid/core/celery_config.py +++ /dev/null @@ -1,13 +0,0 @@ -worker_send_task_event = False -task_ignore_result = True -task_time_limit = 1500 # Rasswanth: should modify after optimizing PC -task_acks_late = True -broker_pool_limit = 500 -worker_prefetch_multiplier = 1 -task_routes = { - "grid.worker.msg_without_reply": "main-queue", - "delivery_mode": "transient", -} -accept_content = ["application/syft"] -task_serializer = "syft" -result_serializer = "syft" diff --git a/packages/grid/backend/grid/core/celery_serde.py b/packages/grid/backend/grid/core/celery_serde.py deleted file mode 100644 index 005e4a4f514..00000000000 --- a/packages/grid/backend/grid/core/celery_serde.py +++ /dev/null @@ -1,46 +0,0 @@ -# stdlib -from typing import Any - -# third party -from kombu import serialization - -# syft absolute -import syft as sy -from syft.logger import error - - -def loads(data: bytes) -> Any: - # original payload might have nested bytes in the args - org_payload = sy.deserialize(data, from_bytes=True) - # original payload is found at org_payload[0][0] - if ( - len(org_payload) > 0 - and len(org_payload[0]) > 0 - and isinstance(org_payload[0][0], bytes) - ): - try: - nested_data = org_payload[0][0] - org_obj = sy.deserialize(nested_data, from_bytes=True) - org_payload[0][0] = org_obj - except Exception as e: - error(f"Unable to deserialize nested payload. {e}") - raise e - - return org_payload - - -def dumps(obj: Any) -> bytes: - # this is usually a Tuple of args where the first one is what we send to the task - # but it can also get other arbitrary data which we need to serde - # since we might get bytes directly from the web endpoint we can avoid double - # unserializing it by keeping it inside the nested args list org_payload[0][0] - return sy.serialize(obj, to_bytes=True) - - -serialization.register( - "syft", - dumps, - loads, - content_type="application/syft", - content_encoding="binary", -) diff --git a/packages/grid/backend/grid/core/config.py b/packages/grid/backend/grid/core/config.py index 619637c06c6..0d7d090743a 100644 --- a/packages/grid/backend/grid/core/config.py +++ b/packages/grid/backend/grid/core/config.py @@ -97,13 +97,13 @@ def get_emails_enabled(self) -> Self: DEFAULT_ROOT_PASSWORD: str = "changethis" USERS_OPEN_REGISTRATION: bool = False - NODE_NAME: str = "default_node_name" + SERVER_NAME: str = "default_server_name" STREAM_QUEUE: bool = False - NODE_TYPE: str = "domain" + SERVER_TYPE: str = "datasite" OPEN_REGISTRATION: bool = True - # DOMAIN_ASSOCIATION_REQUESTS_AUTOMATICALLY_ACCEPTED: bool = True + # DATASITE_ASSOCIATION_REQUESTS_AUTOMATICALLY_ACCEPTED: bool = True USE_BLOB_STORAGE: bool = ( True if os.getenv("USE_BLOB_STORAGE", "false").lower() == "true" else False ) @@ -124,7 +124,7 @@ def get_emails_enabled(self) -> Self: # STORE_DB_ID: int = int(os.getenv("STORE_DB_ID", 0)) # LEDGER_DB_ID: int = int(os.getenv("LEDGER_DB_ID", 1)) # NETWORK_CHECK_INTERVAL: int = int(os.getenv("NETWORK_CHECK_INTERVAL", 60)) - # DOMAIN_CHECK_INTERVAL: int = int(os.getenv("DOMAIN_CHECK_INTERVAL", 60)) + # DATASITE_CHECK_INTERVAL: int = int(os.getenv("DATASITE_CHECK_INTERVAL", 60)) CONTAINER_HOST: str = str(os.getenv("CONTAINER_HOST", "docker")) MONGO_HOST: str = str(os.getenv("MONGO_HOST", "")) MONGO_PORT: int = int(os.getenv("MONGO_PORT", 27017)) diff --git a/packages/grid/backend/grid/core/node.py b/packages/grid/backend/grid/core/server.py similarity index 75% rename from packages/grid/backend/grid/core/node.py rename to packages/grid/backend/grid/core/server.py index 926cbbc5556..9802eece8e8 100644 --- a/packages/grid/backend/grid/core/node.py +++ b/packages/grid/backend/grid/core/server.py @@ -1,15 +1,15 @@ # syft absolute -from syft.abstract_node import NodeType -from syft.node.domain import Domain -from syft.node.domain import Node -from syft.node.enclave import Enclave -from syft.node.gateway import Gateway -from syft.node.node import get_default_bucket_name -from syft.node.node import get_enable_warnings -from syft.node.node import get_node_name -from syft.node.node import get_node_side_type -from syft.node.node import get_node_type -from syft.node.node import get_node_uid_env +from syft.abstract_server import ServerType +from syft.server.datasite import Datasite +from syft.server.datasite import Server +from syft.server.enclave import Enclave +from syft.server.gateway import Gateway +from syft.server.server import get_default_bucket_name +from syft.server.server import get_enable_warnings +from syft.server.server import get_server_name +from syft.server.server import get_server_side_type +from syft.server.server import get_server_type +from syft.server.server import get_server_uid_env from syft.service.queue.zmq_queue import ZMQClientConfig from syft.service.queue.zmq_queue import ZMQQueueConfig from syft.store.blob_storage.seaweedfs import SeaweedFSClientConfig @@ -20,7 +20,7 @@ from syft.store.sqlite_document_store import SQLiteStoreConfig from syft.types.uid import UID -# grid absolute +# server absolute from grid.core.config import settings @@ -49,7 +49,7 @@ def mongo_store_config() -> MongoStoreConfig: def sql_store_config() -> SQLiteStoreConfig: client_config = SQLiteStoreClientConfig( - filename=f"{UID.from_string(get_node_uid_env())}.sqlite", + filename=f"{UID.from_string(get_server_uid_env())}.sqlite", path=settings.SQLITE_PATH, ) return SQLiteStoreConfig(client_config=client_config) @@ -72,28 +72,28 @@ def seaweedfs_config() -> SeaweedFSConfig: ) -node_type = NodeType(get_node_type()) -node_name = get_node_name() +server_type = ServerType(get_server_type()) +server_name = get_server_name() -node_side_type = get_node_side_type() +server_side_type = get_server_side_type() enable_warnings = get_enable_warnings() worker_classes = { - NodeType.DOMAIN: Domain, - NodeType.GATEWAY: Gateway, - NodeType.ENCLAVE: Enclave, + ServerType.DATASITE: Datasite, + ServerType.GATEWAY: Gateway, + ServerType.ENCLAVE: Enclave, } -worker_class = worker_classes[node_type] +worker_class = worker_classes[server_type] single_container_mode = settings.SINGLE_CONTAINER_MODE store_config = sql_store_config() if single_container_mode else mongo_store_config() blob_storage_config = None if single_container_mode else seaweedfs_config() queue_config = queue_config() -worker: Node = worker_class( - name=node_name, - node_side_type=node_side_type, +worker: Server = worker_class( + name=server_name, + server_side_type=server_side_type, action_store_config=store_config, document_store_config=store_config, enable_warnings=enable_warnings, diff --git a/packages/grid/backend/grid/images/worker_cpu.dockerfile b/packages/grid/backend/grid/images/worker_cpu.dockerfile index 94297fee0a9..e8021670dde 100644 --- a/packages/grid/backend/grid/images/worker_cpu.dockerfile +++ b/packages/grid/backend/grid/images/worker_cpu.dockerfile @@ -2,11 +2,11 @@ # Build as-is to create a base worker image # Build with args to create a custom worker image -# NOTE: This dockerfile will be built inside a grid-backend container in PROD +# NOTE: This dockerfile will be built inside a syft-backend container in PROD # Hence COPY will not work the same way in DEV vs. PROD -ARG SYFT_VERSION_TAG="0.8.7-beta.13" -FROM openmined/grid-backend:${SYFT_VERSION_TAG} +ARG SYFT_VERSION_TAG="0.8.8-beta.2" +FROM openmined/syft-backend:${SYFT_VERSION_TAG} # should match base image python version ARG PYTHON_VERSION="3.12" diff --git a/packages/grid/backend/grid/main.py b/packages/grid/backend/grid/main.py index 459448c5f01..497a2dd7a90 100644 --- a/packages/grid/backend/grid/main.py +++ b/packages/grid/backend/grid/main.py @@ -11,10 +11,10 @@ # syft absolute from syft.protocol.data_protocol import stage_protocol_changes -# grid absolute +# server absolute from grid.api.router import api_router from grid.core.config import settings -from grid.core.node import worker +from grid.core.server import worker class EndpointFilter(logging.Filter): diff --git a/packages/grid/backend/grid/start.sh b/packages/grid/backend/grid/start.sh index 4b3d5de4cf2..02194eb840b 100755 --- a/packages/grid/backend/grid/start.sh +++ b/packages/grid/backend/grid/start.sh @@ -7,7 +7,7 @@ APP_MODULE=grid.main:app LOG_LEVEL=${LOG_LEVEL:-info} HOST=${HOST:-0.0.0.0} PORT=${PORT:-80} -NODE_TYPE=${NODE_TYPE:-domain} +SERVER_TYPE=${SERVER_TYPE:-datasite} APPDIR=${APPDIR:-$HOME/app} RELOAD="" DEBUG_CMD="" @@ -26,11 +26,11 @@ then fi export CREDENTIALS_PATH=${CREDENTIALS_PATH:-$HOME/data/creds/credentials.json} -export NODE_PRIVATE_KEY=$(python $APPDIR/grid/bootstrap.py --private_key) -export NODE_UID=$(python $APPDIR/grid/bootstrap.py --uid) -export NODE_TYPE=$NODE_TYPE +export SERVER_PRIVATE_KEY=$(python $APPDIR/grid/bootstrap.py --private_key) +export SERVER_UID=$(python $APPDIR/grid/bootstrap.py --uid) +export SERVER_TYPE=$SERVER_TYPE -echo "NODE_UID=$NODE_UID" -echo "NODE_TYPE=$NODE_TYPE" +echo "SERVER_UID=$SERVER_UID" +echo "SERVER_TYPE=$SERVER_TYPE" exec $DEBUG_CMD uvicorn $RELOAD --host $HOST --port $PORT --log-config=$APPDIR/grid/logging.yaml --log-level $LOG_LEVEL "$APP_MODULE" diff --git a/packages/grid/default.env b/packages/grid/default.env index 0aae09f1026..e3edfdc38df 100644 --- a/packages/grid/default.env +++ b/packages/grid/default.env @@ -1,8 +1,8 @@ #!/bin/bash -DOMAIN=localhost -NODE_NAME=default_node_name -NODE_TYPE=domain -FRONTEND_TARGET=grid-ui-development +DATASITE=localhost +SERVER_NAME=default_server_name +SERVER_TYPE=datasite +FRONTEND_TARGET=syft-ui-development PORT=80 HTTP_PORT=80 HTTPS_PORT=443 @@ -15,26 +15,26 @@ IGNORE_TLS_ERRORS=False TRAEFIK_TLS_CONF=./traefik/dynamic-configurations TRAEFIK_TLS_CERTS=./traefik/certs TRAEFIK_PUBLIC_NETWORK=traefik-public -TRAEFIK_TAG=grid.openmined.org +TRAEFIK_TAG=syft.openmined.org TRAEFIK_PUBLIC_TAG=traefik-public -STACK_NAME=grid-openmined-org -DOCKER_IMAGE_BACKEND=openmined/grid-backend -DOCKER_IMAGE_FRONTEND=openmined/grid-frontend -DOCKER_IMAGE_RATHOLE=openmined/grid-rathole +STACK_NAME=syft-openmined-org +DOCKER_IMAGE_BACKEND=openmined/syft-backend +DOCKER_IMAGE_FRONTEND=openmined/syft-frontend +DOCKER_IMAGE_RATHOLE=openmined/syft-rathole DOCKER_IMAGE_TRAEFIK=traefik TRAEFIK_VERSION=v2.11.0 REDIS_VERSION=6.2 RABBITMQ_VERSION=3 -DOCKER_IMAGE_SEAWEEDFS=openmined/grid-seaweedfs +DOCKER_IMAGE_SEAWEEDFS=openmined/syft-seaweedfs VERSION=latest VERSION_HASH=unknown STACK_API_KEY="" # Backend -BACKEND_CORS_ORIGINS='["http://localhost","http://localhost:4200","http://localhost:3000","http://localhost:8080","https://localhost","https://localhost:4200","https://localhost:3000","https://localhost:8080","http://dev.grid.openmined.org","https://stag.grid.openmined.org","https://grid.openmined.org"]' +BACKEND_CORS_ORIGINS='["http://localhost","http://localhost:4200","http://localhost:3000","http://localhost:8080","https://localhost","https://localhost:4200","https://localhost:3000","https://localhost:8080","http://dev.syft.openmined.org","https://stag.syft.openmined.org","https://syft.openmined.org"]' SEAWEED_MOUNT_PORT=4001 -PROJECT_NAME=grid +PROJECT_NAME=syft SECRET_KEY=changethis DEFAULT_ROOT_EMAIL=info@openmined.org DEFAULT_ROOT_PASSWORD=changethis @@ -44,9 +44,9 @@ SMTP_HOST= SMTP_USERNAME= SMTP_PASSWORD= EMAIL_SENDER= -SERVER_HOST="https://${DOMAIN}" +SERVER_HOST="https://${DATASITE}" NETWORK_CHECK_INTERVAL=60 -DOMAIN_CHECK_INTERVAL=60 +DATASITE_CHECK_INTERVAL=60 ASSOCIATION_TIMEOUT=10 USERS_OPEN_REGISTRATION=False DEV_MODE=False @@ -101,7 +101,7 @@ JAEGER_PORT=14268 # Syft SYFT_TUTORIAL_MODE=False ENABLE_WARNINGS=True -NODE_SIDE_TYPE=high +SERVER_SIDE_TYPE=high # Worker USE_BLOB_STORAGE=False @@ -110,7 +110,7 @@ USE_BLOB_STORAGE=False ENABLE_SIGNUP=False # Enclave Attestation -DOCKER_IMAGE_ENCLAVE_ATTESTATION=openmined/grid-enclave-attestation +DOCKER_IMAGE_ENCLAVE_ATTESTATION=openmined/syft-enclave-attestation # Rathole Config RATHOLE_PORT=2333 \ No newline at end of file diff --git a/packages/grid/devspace.yaml b/packages/grid/devspace.yaml index 4644e2d72e1..daef60f3857 100644 --- a/packages/grid/devspace.yaml +++ b/packages/grid/devspace.yaml @@ -22,13 +22,13 @@ pipelines: create_deployments --all vars: - DOCKER_IMAGE_BACKEND: openmined/grid-backend - DOCKER_IMAGE_FRONTEND: openmined/grid-frontend - DOCKER_IMAGE_SEAWEEDFS: openmined/grid-seaweedfs - DOCKER_IMAGE_RATHOLE: openmined/grid-rathole - DOCKER_IMAGE_ENCLAVE_ATTESTATION: openmined/grid-enclave-attestation + DOCKER_IMAGE_BACKEND: openmined/syft-backend + DOCKER_IMAGE_FRONTEND: openmined/syft-frontend + DOCKER_IMAGE_SEAWEEDFS: openmined/syft-seaweedfs + DOCKER_IMAGE_RATHOLE: openmined/syft-rathole + DOCKER_IMAGE_ENCLAVE_ATTESTATION: openmined/syft-enclave-attestation CONTAINER_REGISTRY: "docker.io" - VERSION: "0.8.7-beta.13" + VERSION: "0.8.8-beta.2" PLATFORM: $(uname -m | grep -q 'arm64' && echo "arm64" || echo "amd64") # This is a list of `images` that DevSpace can build for this project @@ -48,7 +48,7 @@ images: buildKit: args: ["--platform", "linux/${PLATFORM}"] dockerfile: ./frontend/frontend.dockerfile - target: "grid-ui-production" + target: "syft-ui-production" context: ./frontend tags: - dev-${DEVSPACE_TIMESTAMP} @@ -73,7 +73,7 @@ deployments: global: registry: ${CONTAINER_REGISTRY} version: dev-${DEVSPACE_TIMESTAMP} - node: {} + server: {} # anything that does not need templating should go in helm/examples/dev/base.yaml # or profile specific values files valuesFiles: @@ -117,23 +117,23 @@ dev: localPort: 3480 profiles: - - name: domain-low - description: "Deploy a low-side domain" + - name: datasite-low + description: "Deploy a low-side datasite" patches: - op: add - path: deployments.syft.helm.values.node + path: deployments.syft.helm.values.server value: side: low - - name: migrated-domain - description: "Deploy a migrated domain" + - name: migrated-datasite + description: "Deploy a migrated datasite" patches: - op: add path: deployments.syft.helm.valuesFiles value: ./helm/examples/dev/migration.yaml - - name: domain-tunnel - description: "Deploy a domain with tunneling enabled" + - name: datasite-tunnel + description: "Deploy a datasite with tunneling enabled" patches: # enable rathole image - op: add @@ -150,10 +150,10 @@ profiles: # use rathole client-specific chart values - op: add path: deployments.syft.helm.valuesFiles - value: ./helm/examples/dev/domain.tunnel.yaml + value: ./helm/examples/dev/datasite.tunnel.yaml - name: gateway - description: "Deploy a Gateway Node with tunnel enabled" + description: "Deploy a Gateway Server with tunnel enabled" patches: # enable rathole image - op: add @@ -201,7 +201,7 @@ profiles: value: 2334:2333 - name: gcp - description: "Deploy a high-side domain on GCP" + description: "Deploy a high-side datasite on GCP" patches: - op: replace path: deployments.syft.helm.valuesFiles @@ -209,7 +209,7 @@ profiles: - ./helm/examples/gcp/gcp.high.yaml - name: gcp-low - description: "Deploy a low-side domain on GCP" + description: "Deploy a low-side datasite on GCP" patches: - op: replace path: deployments.syft.helm.valuesFiles @@ -217,15 +217,15 @@ profiles: - ./helm/examples/gcp/gcp.low.yaml - name: azure - description: "Deploy a high-side domain on AKS" + description: "Deploy a high-side datasite on AKS" patches: - op: replace path: deployments.syft.helm.valuesFiles value: - ./helm/examples/azure/azure.high.yaml - - name: enclave - description: "Deploy an enclave node" + - name: enclave-cpu + description: "Deploy an CPU enclave server" patches: # enable image build for enclave-attestation - op: add @@ -245,10 +245,54 @@ profiles: enclave-attestation: sync: - path: ./enclave/attestation/server:/app/server - # use gateway-specific chart values + # use enclave-specific chart values + - op: add + path: deployments.syft.helm.valuesFiles + value: ./helm/examples/dev/enclave-cpu.yaml + # Port Re-Mapping + - op: replace + path: dev.mongo.ports[0].port + value: 27019:27017 + - op: replace + path: dev.backend.ports[0].port + value: 5680:5678 + - op: replace + path: dev.backend.containers.backend-container.ssh.localPort + value: 3482 + - op: replace + path: dev.seaweedfs.ports + value: + - port: "9334:9333" # admin + - port: "8889:8888" # filer + - port: "8334:8333" # S3 + - port: "4002:4001" # mount api + + # TODO: Can we de-duplicate it? + - name: enclave-gpu + description: "Deploy an GPU enclave node" + patches: + # enable image build for enclave-attestation + - op: add + path: images + value: + enclave-attestation: + image: "${CONTAINER_REGISTRY}/${DOCKER_IMAGE_ENCLAVE_ATTESTATION}" + buildKit: + args: ["--platform", "linux/amd64"] + dockerfile: ./enclave/attestation/attestation.dockerfile + context: ./enclave/attestation + tags: + - dev-${DEVSPACE_TIMESTAMP} + - op: add + path: dev.backend.containers + value: + enclave-attestation: + sync: + - path: ./enclave/attestation/server:/app/server + # use enclave-specific chart values - op: add path: deployments.syft.helm.valuesFiles - value: ./helm/examples/dev/enclave.yaml + value: ./helm/examples/dev/enclave-gpu.yaml # Port Re-Mapping - op: replace path: dev.mongo.ports[0].port diff --git a/packages/grid/enclave/attestation/enclave-development.md b/packages/grid/enclave/attestation/enclave-development.md index 5a9a2526e54..9e1563504b6 100644 --- a/packages/grid/enclave/attestation/enclave-development.md +++ b/packages/grid/enclave/attestation/enclave-development.md @@ -95,9 +95,9 @@ from nv_attestation_sdk import attestation NRAS_URL="https://nras.attestation.nvidia.com/v1/attest/gpu" client = attestation.Attestation() -client.set_name("thisNode1") +client.set_name("thisServer1") client.set_nonce("931d8dd0add203ac3d8b4fbde75e115278eefcdceac5b87671a748f32364dfcb") -print ("[RemoteGPUTest] node name :", client.get_name()) +print ("[RemoteGPUTest] server name :", client.get_name()) client.add_verifier(attestation.Devices.GPU, attestation.Environment.REMOTE, NRAS_URL, "") client.attest() diff --git a/packages/grid/enclave/attestation/server/gpu_attestation.py b/packages/grid/enclave/attestation/server/gpu_attestation.py index 299ad663aeb..38eccc8a6df 100644 --- a/packages/grid/enclave/attestation/server/gpu_attestation.py +++ b/packages/grid/enclave/attestation/server/gpu_attestation.py @@ -23,10 +23,10 @@ def extract_token(captured_value: str) -> str: def attest_gpu() -> tuple[str, str]: # Fetch report from Nvidia Attestation SDK - client = attestation.Attestation("Attestation Node") + client = attestation.Attestation("Attestation Server") # TODO: Add the ability to generate nonce later. - logger.info("[RemoteGPUTest] node name : {}", client.get_name()) + logger.info("[RemoteGPUTest] server name : {}", client.get_name()) client.add_verifier( attestation.Devices.GPU, attestation.Environment.REMOTE, NRAS_URL, "" diff --git a/packages/grid/frontend/README.md b/packages/grid/frontend/README.md index e912f1dd172..6684d9fcba0 100644 --- a/packages/grid/frontend/README.md +++ b/packages/grid/frontend/README.md @@ -1,12 +1,12 @@ -# PyGrid UI +# Syft UI -The PyGrid UI is the user interface that allows data owners to manage their -**deployed** PyGrid domains and networks. +The Syft UI is the user interface that allows data owners to manage their +**deployed** Syft datasites and gateways. ## Installation ```bash -cd /packages/grid/frontend +cd /packages/grid/frontend pnpm install ``` diff --git a/packages/grid/frontend/frontend.dockerfile b/packages/grid/frontend/frontend.dockerfile index d003cd86ca2..7dd46d4a888 100644 --- a/packages/grid/frontend/frontend.dockerfile +++ b/packages/grid/frontend/frontend.dockerfile @@ -22,16 +22,16 @@ FROM base AS dependencies RUN --mount=type=cache,id=pnpm,target=/pnpm/store pnpm install --frozen-lockfile -FROM dependencies as grid-ui-tests +FROM dependencies as syft-ui-tests COPY vite.config.ts ./ COPY ./tests ./tests COPY ./src/ ./src CMD pnpm test:unit -FROM dependencies as grid-ui-development +FROM dependencies as syft-ui-development -ENV NODE_ENV=development +ENV SERVER_ENV=development COPY . . CMD pnpm dev @@ -41,9 +41,9 @@ FROM dependencies AS builder COPY . . RUN pnpm build -FROM base AS grid-ui-production +FROM base AS syft-ui-production -ENV NODE_ENV=production +ENV SERVER_ENV=production COPY --from=dependencies /app/node_modules ./node_modules COPY --from=builder /app ./ diff --git a/packages/grid/frontend/package.json b/packages/grid/frontend/package.json index 15fddc2a6d2..8ffa52384fc 100644 --- a/packages/grid/frontend/package.json +++ b/packages/grid/frontend/package.json @@ -1,6 +1,6 @@ { - "name": "pygrid-ui", - "version": "0.8.7-beta.13", + "name": "syft-ui", + "version": "0.8.8-beta.2", "private": true, "scripts": { "dev": "pnpm i && vite dev --host --port 80", diff --git a/packages/grid/frontend/src/_routes/(app)/account/+page.svelte b/packages/grid/frontend/src/_routes/(app)/account/+page.svelte index 10780f2c120..f9771b7d562 100644 --- a/packages/grid/frontend/src/_routes/(app)/account/+page.svelte +++ b/packages/grid/frontend/src/_routes/(app)/account/+page.svelte @@ -6,7 +6,7 @@ import GreenCheck from '$lib/components/icons/GreenCheck.svelte'; import Info from '$lib/components/icons/Info.svelte'; - let showDeleteNodeModal = false; + let showDeleteServerModal = false; let showDeleteAccountModal = false; let showDeleteConfirmModal = false; @@ -23,7 +23,7 @@ >

- Your profile information is public-facing information that other users and node owners can + Your profile information is public-facing information that other users and server owners can see.

@@ -85,8 +85,8 @@

When you delete your user account all information relating to you will be deleted as well - as any permissions and requests. If you are the domain owner the domain node will be - deleted as well and will be closed to all users. To transfer ownership of a domain node + as any permissions and requests. If you are the datasite owner the datasite server will be + deleted as well and will be closed to all users. To transfer ownership of a datasite server before deleting your account you can follow the instructions here

@@ -127,27 +127,27 @@ {/if} - {#if showDeleteNodeModal} + {#if showDeleteServerModal}
-

Are you sure you want to delete your node?

+

Are you sure you want to delete your server?

- Because you are the domain owner, the domain node along with all uploaded datasets, user + Because you are the datasite owner, the datasite server along with all uploaded datasets, user accounts, and requests will be deleted. All network memberships will also be removed. If you - would like to keep this domain node but no longer want to be an owner press “cancel” and - follow the instructions here to transfer ownership of your domain node. + would like to keep this datasite server but no longer want to be an owner press “cancel” and + follow the instructions here to transfer ownership of your datasite server.

Cancel @@ -165,7 +165,7 @@

To help us improve future experiences could you share with us any frustrations or - suggestions you have with or for the PyGridUI Platform? + suggestions you have with or for the Syft UI Platform?

diff --git a/packages/grid/frontend/src/_routes/(app)/config/+page.svelte b/packages/grid/frontend/src/_routes/(app)/config/+page.svelte index 03469b6e007..80b79071e63 100644 --- a/packages/grid/frontend/src/_routes/(app)/config/+page.svelte +++ b/packages/grid/frontend/src/_routes/(app)/config/+page.svelte @@ -19,7 +19,7 @@ let current_user = data.current_user let tabs = [ - { label: "Domain", id: "tab1" }, + { label: "Datasite", id: "tab1" }, // { label: 'Connection', id: 'tab2' } // { label: 'Permissions', id: 'tab3' }, ] @@ -27,7 +27,7 @@ let currentTab = tabs[0].id $: profileInformation = [ - { label: "Domain name", id: "domain_name", value: metadata?.name }, + { label: "Datasite name", id: "datasite_name", value: metadata?.name }, { label: "Organization", id: "organization", @@ -44,7 +44,7 @@ const handleUpdate = async () => { let newMetadata = { - name: openModal === "domain_name" ? name : null, + name: openModal === "datasite_name" ? name : null, organization: openModal === "organization" ? organization : null, description: openModal === "description" ? description : null, } @@ -69,7 +69,7 @@ $: current_user = data.current_user $: metadata = data.metadata - $: domainInitials = getInitials(metadata?.name) + $: datasiteInitials = getInitials(metadata?.name) $: userInitials = getInitials(current_user?.name) @@ -82,7 +82,7 @@ Loading... {:else} - +
@@ -121,7 +121,7 @@

ID#:

- {metadata.node_id} + {metadata.server_id}

Deployed on:

@@ -141,11 +141,11 @@ Turned off

- When remote execution is turned on for the domain, it means that you are allowing + When remote execution is turned on for the datasite, it means that you are allowing PySyft to execute code submitted by users as is against the real private data instead of the mock data when a request is approved. If this is a third-party user please review the function and policy code carefully before approving this request. You can - turn off "Remote Execution" for your domain by going to your "Domain Settings" or + turn off "Remote Execution" for your datasite by going to your "Datasite Settings" or clicking the link below.

@@ -167,8 +167,8 @@
- {#if openModal === "domain_name"} - + {#if openModal === "datasite_name"} + {:else if openModal === "organization"} { diff --git a/packages/grid/frontend/src/_routes/(app)/users/[slug]/+page.svelte b/packages/grid/frontend/src/_routes/(app)/users/[slug]/+page.svelte index f893eece0e9..a261ab5ebae 100644 --- a/packages/grid/frontend/src/_routes/(app)/users/[slug]/+page.svelte +++ b/packages/grid/frontend/src/_routes/(app)/users/[slug]/+page.svelte @@ -3,7 +3,7 @@ import Badge from "$lib/components/Badge.svelte" import CaretLeft from "$lib/components/icons/CaretLeft.svelte" import { getInitials, getUserRole } from "$lib/utils" - import type { UserView } from "../../../../types/domain/users" + import type { UserView } from "../../../../types/datasite/users" import type { PageData } from "./$types" export let data: PageData diff --git a/packages/grid/frontend/src/_routes/(auth)/login/+page.server.ts b/packages/grid/frontend/src/_routes/(auth)/login/+page.server.ts index 55ae3091828..38c9793b054 100644 --- a/packages/grid/frontend/src/_routes/(auth)/login/+page.server.ts +++ b/packages/grid/frontend/src/_routes/(auth)/login/+page.server.ts @@ -8,7 +8,7 @@ export const actions: Actions = { default: async ({ cookies, request }) => { try { const data = await request.formData() - const { email, password, node_id } = get_form_data_values(data) + const { email, password, server_id } = get_form_data_values(data) if ( !email || @@ -16,14 +16,14 @@ export const actions: Actions = { typeof email !== "string" || typeof password !== "string" ) { - throw new Error(`invalid form data: email:${email} node:${node_id}`) + throw new Error(`invalid form data: email:${email} server:${server_id}`) } const { signing_key, uid } = await login({ email, password }) const cookie_user = { uid, - node_id, + server_id, } cookies.set( diff --git a/packages/grid/frontend/src/_routes/(auth)/login/+page.svelte b/packages/grid/frontend/src/_routes/(auth)/login/+page.svelte index 1b6ddaf5b69..26b2438d9f2 100644 --- a/packages/grid/frontend/src/_routes/(auth)/login/+page.svelte +++ b/packages/grid/frontend/src/_routes/(auth)/login/+page.svelte @@ -2,10 +2,10 @@ import { enhance } from "$app/forms" import Button from "$lib/components/Button.svelte" import Modal from "$lib/components/Modal.svelte" - import DomainMetadataPanel from "$lib/components/authentication/DomainMetadataPanel.svelte" + import DatasiteMetadataPanel from "$lib/components/authentication/DatasiteMetadataPanel.svelte" import Input from "$lib/components/Input.svelte" - import DomainOnlineIndicator from "$lib/components/DomainOnlineIndicator.svelte" - import type { DomainOnlineStatus } from "../../../types/domain/onlineIndicator" + import DatasiteOnlineIndicator from "$lib/components/DatasiteOnlineIndicator.svelte" + import type { DatasiteOnlineStatus } from "../../../types/datasite/onlineIndicator" import type { PageData, ActionData } from "./$types" export let data: PageData @@ -13,13 +13,13 @@ const { metadata } = data - let status: DomainOnlineStatus = "online" + let status: DatasiteOnlineStatus = "online"
- +
@@ -38,16 +38,16 @@

{/if}
- +

{#if status === "pending"} Checking connection {:else} - Domain {status} + Datasite {status} {/if}

- + import { enhance } from "$app/forms" import Button from "$lib/components/Button.svelte" - import DomainMetadataPanel from "$lib/components/authentication/DomainMetadataPanel.svelte" + import DatasiteMetadataPanel from "$lib/components/authentication/DatasiteMetadataPanel.svelte" import Input from "$lib/components/Input.svelte" import Modal from "$lib/components/Modal.svelte" import type { ActionData, PageData } from "./$types" @@ -15,7 +15,7 @@
- +
{ try { const { page_size, page_index } = get_url_page_params(url) - const { signing_key, node_id } = unload_cookies(cookies) + const { signing_key, server_id } = unload_cookies(cookies) const dataset = await jsSyftCall({ path: "dataset.get_all", payload: { page_size, page_index }, - node_id, + server_id, signing_key, }) diff --git a/packages/grid/frontend/src/_routes/_syft_api/metadata/+server.ts b/packages/grid/frontend/src/_routes/_syft_api/metadata/+server.ts index c774c41899b..366cf4dba1a 100644 --- a/packages/grid/frontend/src/_routes/_syft_api/metadata/+server.ts +++ b/packages/grid/frontend/src/_routes/_syft_api/metadata/+server.ts @@ -17,9 +17,9 @@ export const GET: RequestHandler = async () => { highest_version: metadata_raw?.highest_version, lowest_version: metadata_raw?.lowest_version, name: metadata_raw?.name, - node_id: metadata_raw?.id?.value, - node_side: metadata_raw?.node_side_type, - node_type: metadata_raw?.node_type?.value, + server_id: metadata_raw?.id?.value, + server_side: metadata_raw?.server_side_type, + server_type: metadata_raw?.server_type?.value, organization: metadata_raw?.organization, signup_enabled: metadata_raw?.signup_enabled, syft_version: metadata_raw?.syft_version, @@ -32,7 +32,7 @@ export const GET: RequestHandler = async () => { export const PATCH: RequestHandler = async ({ cookies, request }) => { try { - const { signing_key, node_id } = unload_cookies(cookies) + const { signing_key, server_id } = unload_cookies(cookies) const metadata = await request.json() @@ -41,10 +41,10 @@ export const PATCH: RequestHandler = async ({ cookies, request }) => { payload: { settings: { ...metadata, - fqn: "syft.service.settings.settings.NodeSettingsUpdate", + fqn: "syft.service.settings.settings.ServerSettingsUpdate", }, }, - node_id, + server_id, signing_key, }) diff --git a/packages/grid/frontend/src/_routes/_syft_api/users/+server.ts b/packages/grid/frontend/src/_routes/_syft_api/users/+server.ts index f9351d70286..f757af854cf 100644 --- a/packages/grid/frontend/src/_routes/_syft_api/users/+server.ts +++ b/packages/grid/frontend/src/_routes/_syft_api/users/+server.ts @@ -8,12 +8,12 @@ export const GET: RequestHandler = async ({ cookies, url }) => { const page_size = parseInt(url.searchParams.get("page_size") || "10") const page_index = parseInt(url.searchParams.get("page_index") || "0") - const { signing_key, node_id } = unload_cookies(cookies) + const { signing_key, server_id } = unload_cookies(cookies) const users = await jsSyftCall({ path: "user.get_all", payload: { page_size, page_index }, - node_id, + server_id, signing_key, }) @@ -26,7 +26,7 @@ export const GET: RequestHandler = async ({ cookies, url }) => { export const POST: RequestHandler = async ({ cookies, request }) => { try { - const { signing_key, node_id } = unload_cookies(cookies) + const { signing_key, server_id } = unload_cookies(cookies) const new_user = await request.json() @@ -35,7 +35,7 @@ export const POST: RequestHandler = async ({ cookies, request }) => { payload: { user_create: { ...new_user, fqn: "syft.service.user.user.UserCreate" }, }, - node_id, + server_id, signing_key, }) diff --git a/packages/grid/frontend/src/_routes/_syft_api/users/[uid]/+server.ts b/packages/grid/frontend/src/_routes/_syft_api/users/[uid]/+server.ts index 491d773808a..2523a2776db 100644 --- a/packages/grid/frontend/src/_routes/_syft_api/users/[uid]/+server.ts +++ b/packages/grid/frontend/src/_routes/_syft_api/users/[uid]/+server.ts @@ -7,12 +7,12 @@ export const GET: RequestHandler = async ({ cookies, params }) => { try { const requested_uid = params.uid - const { signing_key, node_id } = unload_cookies(cookies) + const { signing_key, server_id } = unload_cookies(cookies) const user = await jsSyftCall({ path: "user.view", payload: { uid: { value: requested_uid, fqn: "syft.types.uid.UID" } }, - node_id, + server_id, signing_key, }) @@ -34,7 +34,7 @@ export const GET: RequestHandler = async ({ cookies, params }) => { export const PUT: RequestHandler = async ({ cookies, params, request }) => { try { - const { signing_key, node_id } = unload_cookies(cookies) + const { signing_key, server_id } = unload_cookies(cookies) const requested_uid = params.uid const body = await request.json() @@ -54,7 +54,7 @@ export const PUT: RequestHandler = async ({ cookies, params, request }) => { fqn: "syft.service.user.user.UserUpdate", }, }, - node_id, + server_id, signing_key, }) diff --git a/packages/grid/frontend/src/_routes/_syft_api/users/search/+server.ts b/packages/grid/frontend/src/_routes/_syft_api/users/search/+server.ts index 19678b3c292..7ec0322ff51 100644 --- a/packages/grid/frontend/src/_routes/_syft_api/users/search/+server.ts +++ b/packages/grid/frontend/src/_routes/_syft_api/users/search/+server.ts @@ -9,7 +9,7 @@ export const GET: RequestHandler = async ({ cookies, url }) => { const page_index = parseInt(url.searchParams.get("page_index") || "0") const name = url.searchParams.get("name") - const { signing_key, node_id } = unload_cookies(cookies) + const { signing_key, server_id } = unload_cookies(cookies) const users = await jsSyftCall({ path: "user.search", @@ -21,7 +21,7 @@ export const GET: RequestHandler = async ({ cookies, url }) => { page_index, page_size, }, - node_id, + server_id, signing_key, }) diff --git a/packages/grid/frontend/src/app.html b/packages/grid/frontend/src/app.html index 5ee015f0cf1..e1934dec53b 100644 --- a/packages/grid/frontend/src/app.html +++ b/packages/grid/frontend/src/app.html @@ -5,7 +5,7 @@ %sveltekit.head% - PyGrid + Syft UI
%sveltekit.body%
diff --git a/packages/grid/frontend/src/lib/api/syft_api.ts b/packages/grid/frontend/src/lib/api/syft_api.ts index f7ddec743f1..2261bf99455 100644 --- a/packages/grid/frontend/src/lib/api/syft_api.ts +++ b/packages/grid/frontend/src/lib/api/syft_api.ts @@ -7,7 +7,7 @@ const SYFT_MSG_URL = `${API_BASE_URL}/api_call` const FQN = { VERIFY_KEY: "nacl.signing.VerifyKey", - SYFT_VERIFY_KEY: "syft.node.credentials.SyftVerifyKey", + SYFT_VERIFY_KEY: "syft.server.credentials.SyftVerifyKey", UID: "syft.types.uid.UID", SYFT_API_CALL: "syft.client.api.SyftAPICall", SIGNED_SYFT_API_CALL: "syft.client.api.SignedSyftAPICall", @@ -17,7 +17,7 @@ interface SyftAPICall { path: string payload: Record signing_key: string | Uint8Array - node_id: string + server_id: string } const getKeyPair = (signing_key: string | Uint8Array) => @@ -28,12 +28,12 @@ const getKeyPair = (signing_key: string | Uint8Array) => const getSignedMessage = ({ path, payload, - node_id, + server_id, signing_key, }: Omit & { signing_key: Uint8Array }) => { const syftAPIPayload = { path, - node_uid: { value: node_id, fqn: FQN.UID }, + server_uid: { value: server_id, fqn: FQN.UID }, args: [], kwargs: new Map(Object.entries(payload)), fqn: FQN.SYFT_API_CALL, @@ -87,13 +87,13 @@ export const jsSyftCall = async ({ path, payload, signing_key, - node_id, + server_id, }: SyftAPICall): Promise => { const key = getKeyPair(signing_key) const signedMessage = getSignedMessage({ path, payload, - node_id, + server_id, signing_key: key, }) return await send(signedMessage) diff --git a/packages/grid/frontend/src/lib/client/jsPyClassMap.js b/packages/grid/frontend/src/lib/client/jsPyClassMap.js index 91bce7c022d..2cc055b774f 100644 --- a/packages/grid/frontend/src/lib/client/jsPyClassMap.js +++ b/packages/grid/frontend/src/lib/client/jsPyClassMap.js @@ -3,7 +3,7 @@ import { UUID } from "$lib/client/objects/uid.ts" import { APICall } from "$lib/client/messages/syftMessage.ts" export const classMapping = { - "syft.node.credentials.SyftVerifyKey": SyftVerifyKey, + "syft.server.credentials.SyftVerifyKey": SyftVerifyKey, "nacl.signing.VerifyKey": VerifyKey, "syft.types.uid.UID": UUID, "syft.client.api.SyftAPICall": APICall, diff --git a/packages/grid/frontend/src/lib/client/jsclient/jsClient.svelte b/packages/grid/frontend/src/lib/client/jsclient/jsClient.svelte index c68c8364ead..3baa59e61db 100644 --- a/packages/grid/frontend/src/lib/client/jsclient/jsClient.svelte +++ b/packages/grid/frontend/src/lib/client/jsclient/jsClient.svelte @@ -25,7 +25,7 @@ } this.userId = window.localStorage.getItem('id'); - this.nodeId = window.localStorage.getItem('nodeId'); + this.serverId = window.localStorage.getItem('serverId'); return this; } @@ -150,7 +150,7 @@ // Create a new object called 'newMetadata' with updated fields and a new property called 'fqn' with a value. const updateMetadata = { ...newMetadata, - fqn: 'syft.service.metadata.node_metadata.NodeMetadataUpdate' + fqn: 'syft.service.metadata.server_metadata.ServerMetadataUpdate' }; // Create a new object called 'reqFields' with one property: 'metadata', which is set to 'updateMetadata'. @@ -249,7 +249,7 @@ * @throws {Error} An error is thrown if the message signature and public key don't match. */ async send(args, kwargs, path) { - const signedCall = new APICall(this.nodeId, path, args, kwargs).sign(this.key, this.serde); + const signedCall = new APICall(this.serverId, path, args, kwargs).sign(this.key, this.serde); try { // Make a POST request to the server with the signed call. diff --git a/packages/grid/frontend/src/lib/client/messages/syftMessage.ts b/packages/grid/frontend/src/lib/client/messages/syftMessage.ts index 1dbb2a82285..24d5dabd56c 100644 --- a/packages/grid/frontend/src/lib/client/messages/syftMessage.ts +++ b/packages/grid/frontend/src/lib/client/messages/syftMessage.ts @@ -30,14 +30,14 @@ export class SignedAPICall { } export class APICall { - node_uid: UUID + server_uid: UUID path: string args: object kwargs: object blocking: boolean constructor(id, path, args, kwargs, blocking = true) { - this.node_uid = new UUID(id) + this.server_uid = new UUID(id) this.path = path this.args = args if (kwargs) { diff --git a/packages/grid/frontend/src/lib/client/objects/key.ts b/packages/grid/frontend/src/lib/client/objects/key.ts index a340db2a9c0..964d0debb5b 100644 --- a/packages/grid/frontend/src/lib/client/objects/key.ts +++ b/packages/grid/frontend/src/lib/client/objects/key.ts @@ -14,6 +14,6 @@ export class SyftVerifyKey { constructor(verify_key: Uint8Array) { this.verify_key = new VerifyKey(verify_key) - this.fqn = "syft.node.credentials.SyftVerifyKey" + this.fqn = "syft.server.credentials.SyftVerifyKey" } } diff --git a/packages/grid/frontend/src/lib/client/objects/userCode.ts b/packages/grid/frontend/src/lib/client/objects/userCode.ts index 733487eb1ed..49e4e96cc6e 100644 --- a/packages/grid/frontend/src/lib/client/objects/userCode.ts +++ b/packages/grid/frontend/src/lib/client/objects/userCode.ts @@ -39,7 +39,7 @@ interface InputObject { class InputPolicy { id: UUID inputs: Map - node_id?: UUID + server_id?: UUID /** * Creates a new instance of InputPolicy. diff --git a/packages/grid/frontend/src/lib/components/AccountSettings.svelte b/packages/grid/frontend/src/lib/components/AccountSettings.svelte index 0b08334b172..9aa88814ef4 100644 --- a/packages/grid/frontend/src/lib/components/AccountSettings.svelte +++ b/packages/grid/frontend/src/lib/components/AccountSettings.svelte @@ -296,8 +296,8 @@

When you delete your user account all information relating to you will be deleted as well as - any permissions and requests. If you are the domain owner the domain node will be deleted as - well and will be closed to all users. To transfer ownership of a domain node before deleting + any permissions and requests. If you are the datasite owner the datasite server will be deleted as + well and will be closed to all users. To transfer ownership of a datasite server before deleting your account you can follow the instructions here.

diff --git a/packages/grid/frontend/src/lib/components/Datasets/DatasetListItem.svelte b/packages/grid/frontend/src/lib/components/Datasets/DatasetListItem.svelte index 606ebebce17..b4b1da788e0 100644 --- a/packages/grid/frontend/src/lib/components/Datasets/DatasetListItem.svelte +++ b/packages/grid/frontend/src/lib/components/Datasets/DatasetListItem.svelte @@ -3,7 +3,7 @@ import TableIcon from '$lib/components/icons/TableIcon.svelte'; import Tooltip from '$lib/components/Tooltip.svelte'; import TooltipText from '../TooltipText.svelte'; - import type { Dataset } from '../../../types/domain/dataset'; + import type { Dataset } from '../../../types/datasite/dataset'; export let dataset: Dataset; diff --git a/packages/grid/frontend/src/lib/components/Datasets/DatasetNoneFound.svelte b/packages/grid/frontend/src/lib/components/Datasets/DatasetNoneFound.svelte index fe02b87ef03..21977baa9ea 100644 --- a/packages/grid/frontend/src/lib/components/Datasets/DatasetNoneFound.svelte +++ b/packages/grid/frontend/src/lib/components/Datasets/DatasetNoneFound.svelte @@ -1,7 +1,7 @@

No Datasets Uploaded

- To begin adding datasets to this domain node please click the "+" button and follow + To begin adding datasets to this datasite server please click the "+" button and follow instructions.

diff --git a/packages/grid/frontend/src/lib/components/DomainOnlineIndicator.svelte b/packages/grid/frontend/src/lib/components/DatasiteOnlineIndicator.svelte similarity index 68% rename from packages/grid/frontend/src/lib/components/DomainOnlineIndicator.svelte rename to packages/grid/frontend/src/lib/components/DatasiteOnlineIndicator.svelte index 1232283ac6c..b534a481813 100644 --- a/packages/grid/frontend/src/lib/components/DomainOnlineIndicator.svelte +++ b/packages/grid/frontend/src/lib/components/DatasiteOnlineIndicator.svelte @@ -1,6 +1,6 @@
diff --git a/packages/grid/frontend/src/lib/components/Navigation/SideNavDOHandbook.svelte b/packages/grid/frontend/src/lib/components/Navigation/SideNavDOHandbook.svelte index f72f9dddd45..081139b9cd8 100644 --- a/packages/grid/frontend/src/lib/components/Navigation/SideNavDOHandbook.svelte +++ b/packages/grid/frontend/src/lib/components/Navigation/SideNavDOHandbook.svelte @@ -27,7 +27,7 @@

Data Owner handbook

Check out the data owner handbook to learn more tips & tricks about how to manage your - domain node. + datasite server.

diff --git a/packages/grid/frontend/src/lib/components/Navigation/TopNav.svelte b/packages/grid/frontend/src/lib/components/Navigation/TopNav.svelte index 9e127766cf3..1a3dc762b4e 100644 --- a/packages/grid/frontend/src/lib/components/Navigation/TopNav.svelte +++ b/packages/grid/frontend/src/lib/components/Navigation/TopNav.svelte @@ -13,7 +13,7 @@ { href: "", icon: BellIcon, disabled: true }, ] - $: domainInitials = getInitials(metadata.name) + $: datasiteInitials = getInitials(metadata.name) $: userInitials = getInitials(user.name) @@ -21,7 +21,7 @@ class="w-full py-2 px-6 flex items-center justify-between tablet:justify-end shadow-topbar-1 tablet:shadow-none" >
- +
    {#each links as link} diff --git a/packages/grid/frontend/src/lib/components/OnboardingModal.svelte b/packages/grid/frontend/src/lib/components/OnboardingModal.svelte index 8e381c18630..9f2fe27871f 100644 --- a/packages/grid/frontend/src/lib/components/OnboardingModal.svelte +++ b/packages/grid/frontend/src/lib/components/OnboardingModal.svelte @@ -7,7 +7,7 @@ import Progress from "$lib/components/Progress.svelte" import Input from "$lib/components/Input.svelte" import ButtonGhost from "$lib/components/ButtonGhost.svelte" - import NodeIcon from "$lib/components/icons/NodeIcon.svelte" + import ServerIcon from "$lib/components/icons/ServerIcon.svelte" import CheckIcon from "$lib/components/icons/CheckIcon.svelte" export let metadata @@ -23,15 +23,15 @@ website: "", } - let domainSettings = { + let datasiteSettings = { name: "", description: "", organization: "", on_board: false, } - let checkRequiredDomainFields = () => { - return domainSettings.name !== "" ? true : false + let checkRequiredDatasiteFields = () => { + return datasiteSettings.name !== "" ? true : false } let checkRequiredUserFields = () => { @@ -49,7 +49,7 @@ headers: { "Content-Type": "application/json", }, - body: JSON.stringify(domainSettings), + body: JSON.stringify(datasiteSettings), }) await fetch(`/_syft_api/users/${userSettings.id}`, { @@ -86,7 +86,7 @@ website: "", } - domainSettings = { + datasiteSettings = { name: "", description: "", organization: "", @@ -105,13 +105,13 @@ Welcome to PyGrid

- Welcome to PyGrid Admin! + Welcome to Syft UI!

Step 1 of 4

@@ -125,7 +125,7 @@

- Congratulations on logging into {metadata?.name ?? ""} node. This wizard + Congratulations on logging into {metadata?.name ?? ""} server. This wizard will help get you started in setting up your user account. You can skip this wizard by pressing “Cancel” below. You can edit any of your responses later by going to "Account Settings" indicated by your avatar in the top @@ -148,10 +148,10 @@

- +
-

Domain Profile

+

Datasite Profile

Step 2 of 4

@@ -182,28 +182,28 @@

- Let's begin by describing some basic information about this domain - node. This information will be shown to outside users to help them - find and understand what your domain offers. + Let's begin by describing some basic information about this datasite + server. This information will be shown to outside users to help them + find and understand what your datasite offers.

@@ -232,7 +232,7 @@

- Now that we have described our domain, let's update our password and + Now that we have described our datasite, let's update our password and describe some basic information about ourselves for our "User Profile". User profile information will be shown to teammates and collaborators when working on studies together. diff --git a/packages/grid/frontend/src/lib/components/Users/UserCreateModal.svelte b/packages/grid/frontend/src/lib/components/Users/UserCreateModal.svelte index 77ebcc3fb70..05c4a8ee18c 100644 --- a/packages/grid/frontend/src/lib/components/Users/UserCreateModal.svelte +++ b/packages/grid/frontend/src/lib/components/Users/UserCreateModal.svelte @@ -16,19 +16,19 @@ roleId: 2, title: "Data Scientist", description: - "This role is for users who will be performing computations on your datasets. They may be users you know directly or those who found your domain through search and discovery. By default this user can see a list of your datasets and can request to get results.", + "This role is for users who will be performing computations on your datasets. They may be users you know directly or those who found your datasite through search and discovery. By default this user can see a list of your datasets and can request to get results.", }, { roleId: 32, title: "Data Owner", description: - "This role is for users on your team who will be responsible for uploading data to the domain.", + "This role is for users on your team who will be responsible for uploading data to the datasite.", }, { roleId: 128, title: "Admin", description: - "This role is for users who will help you manage your node. This should be users you trust as they will be users who will have full permissions to the node.", + "This role is for users who will help you manage your server. This should be users you trust as they will be users who will have full permissions to the server.", }, ] @@ -105,7 +105,7 @@

To begin let's select the role this user is going to have on your - domain node. + datasite server.

{#each cardsContent as { title, description, roleId }} @@ -221,8 +221,8 @@ Welcome to {metadata?.name} {name},
- You are formally invited you to join {metadata?.name} Domain. Below is - your login credentials and the URL to the domain. After logging in you + You are formally invited you to join {metadata?.name} Datasite. Below is + your login credentials and the URL to the datasite. After logging in you will be prompted to customize your account.

{href} diff --git a/packages/grid/frontend/src/lib/components/Users/UserListItem.svelte b/packages/grid/frontend/src/lib/components/Users/UserListItem.svelte index b5ca6737d6f..ad34edaf1cb 100644 --- a/packages/grid/frontend/src/lib/components/Users/UserListItem.svelte +++ b/packages/grid/frontend/src/lib/components/Users/UserListItem.svelte @@ -1,6 +1,6 @@
{#if metadata}
- +
- +

{metadata.name}

{#if metadata.organization} @@ -29,15 +29,15 @@

Id#:

- {metadata.node_id} + {metadata.server_id}

Side:

-

{metadata.node_side}

+

{metadata.server_side}

Type:

-

{metadata.node_type}

+

{metadata.server_type}

{/if} diff --git a/packages/grid/frontend/src/lib/components/authentication/Nav.svelte b/packages/grid/frontend/src/lib/components/authentication/Nav.svelte index da591ae26f7..5b2ed997dfa 100644 --- a/packages/grid/frontend/src/lib/components/authentication/Nav.svelte +++ b/packages/grid/frontend/src/lib/components/authentication/Nav.svelte @@ -9,7 +9,7 @@
PyGrid logo {#if version} diff --git a/packages/grid/frontend/src/lib/components/icons/NodeIcon.svelte b/packages/grid/frontend/src/lib/components/icons/ServerIcon.svelte similarity index 100% rename from packages/grid/frontend/src/lib/components/icons/NodeIcon.svelte rename to packages/grid/frontend/src/lib/components/icons/ServerIcon.svelte diff --git a/packages/grid/frontend/src/lib/utils.ts b/packages/grid/frontend/src/lib/utils.ts index 2ffed30dec5..cd8dcc89b52 100644 --- a/packages/grid/frontend/src/lib/utils.ts +++ b/packages/grid/frontend/src/lib/utils.ts @@ -1,4 +1,4 @@ -import { ServiceRoles } from "../types/domain/users" +import { ServiceRoles } from "../types/datasite/users" import { COOKIES } from "./constants" import type { CookieSerializeOptions } from "cookie" import type { Cookies } from "@sveltejs/kit" @@ -33,7 +33,7 @@ export function getInitials(name: string) { export function logout() { window.localStorage.removeItem("id") - window.localStorage.removeItem("nodeId") + window.localStorage.removeItem("serverId") window.localStorage.removeItem("key") } @@ -51,7 +51,7 @@ export const default_cookie_config: CookieSerializeOptions = { interface CookieData { uid: string - node_id: string + server_id: string signing_key: string } diff --git a/packages/grid/frontend/src/routes/+page.svelte b/packages/grid/frontend/src/routes/+page.svelte index 3637adb7bd1..da607f84d1b 100644 --- a/packages/grid/frontend/src/routes/+page.svelte +++ b/packages/grid/frontend/src/routes/+page.svelte @@ -15,5 +15,5 @@
- PyGrid + Syft UI
diff --git a/packages/grid/frontend/src/routes/[...all]/+page.svelte b/packages/grid/frontend/src/routes/[...all]/+page.svelte index b1bb3b1cb01..0994b12a178 100644 --- a/packages/grid/frontend/src/routes/[...all]/+page.svelte +++ b/packages/grid/frontend/src/routes/[...all]/+page.svelte @@ -1,6 +1,6 @@ +""" + +model_card_html = """ + + ${model_card} + +""" + +model_repr_template = ( + f""" + +
+ + +{JS_DOWNLOAD_FONTS} + + + +{CSS_CODE} + + + +{header_line_html} + +""" + + ( + """ + +${attrs_html} + +""" + ) + + """ + +
+
+ +""" +) + + +asset_repr_template = ( + f""" + +
+ + +{JS_DOWNLOAD_FONTS} + + + +{CSS_CODE} + + + +{header_line_html} + +""" + + ( + """ + +${attrs_html} + +""" + ) + + """ +
+
+ +""" +) diff --git a/packages/syft/src/syft/service/model/model_service.py b/packages/syft/src/syft/service/model/model_service.py index d8a0a727d14..85710297d2c 100644 --- a/packages/syft/src/syft/service/model/model_service.py +++ b/packages/syft/src/syft/service/model/model_service.py @@ -62,7 +62,7 @@ def _paginate_model_collection( @instrument -@serializable() +@serializable(canonical_name="ModelService", version=1) class ModelService(AbstractService): store: DocumentStore stash: ModelStash @@ -82,8 +82,6 @@ def add( """Add a model""" model = model.to(Model, context=context) - print("got model", model) - result = self.stash.set( context.credentials, model, @@ -96,8 +94,8 @@ def add( if result.is_err(): return SyftError(message=str(result.err())) return SyftSuccess( - message=f"Model uploaded to '{context.node.name}'. " - f"To see the models uploaded by a client on this node, use command `[your_client].models`" + message=f"Model uploaded to '{context.server.name}'. " + f"To see the models uploaded by a client on this server, use command `[your_client].models`" ) @service_method( diff --git a/packages/syft/src/syft/service/model/model_stash.py b/packages/syft/src/syft/service/model/model_stash.py index 36fcf8bf446..01003ed97e9 100644 --- a/packages/syft/src/syft/service/model/model_stash.py +++ b/packages/syft/src/syft/service/model/model_stash.py @@ -2,8 +2,8 @@ from result import Result # relative -from ...node.credentials import SyftVerifyKey from ...serde.serializable import serializable +from ...server.credentials import SyftVerifyKey from ...store.document_store import BaseUIDStoreStash from ...store.document_store import DocumentStore from ...store.document_store import PartitionKey @@ -16,7 +16,7 @@ @instrument -@serializable() +@serializable(canonical_name="ModelStash", version=1) class ModelStash(BaseUIDStoreStash): object_type = Model settings: PartitionSettings = PartitionSettings( diff --git a/packages/syft/src/syft/service/network/association_request.py b/packages/syft/src/syft/service/network/association_request.py index 2771eb208b7..e6bc86be29b 100644 --- a/packages/syft/src/syft/service/network/association_request.py +++ b/packages/syft/src/syft/service/network/association_request.py @@ -11,25 +11,37 @@ from ...client.client import SyftClient from ...serde.serializable import serializable from ...types.syft_object import SYFT_OBJECT_VERSION_1 +from ...types.syft_object import SYFT_OBJECT_VERSION_2 from ..context import ChangeContext from ..request.request import Change from ..response import SyftError from ..response import SyftSuccess -from .node_peer import NodePeer +from .routes import ServerRoute +from .server_peer import ServerPeer @serializable() -class AssociationRequestChange(Change): +class AssociationRequestChangeV1(Change): __canonical_name__ = "AssociationRequestChange" __version__ = SYFT_OBJECT_VERSION_1 - remote_peer: NodePeer + self_server_route: ServerRoute + remote_peer: ServerPeer + challenge: bytes + + +@serializable() +class AssociationRequestChange(Change): + __canonical_name__ = "AssociationRequestChange" + __version__ = SYFT_OBJECT_VERSION_2 + + remote_peer: ServerPeer - __repr_attrs__ = ["self_node_route", "remote_peer"] + __repr_attrs__ = ["remote_peer"] def _run( self, context: ChangeContext, apply: bool - ) -> Result[tuple[bytes, NodePeer], SyftError]: + ) -> Result[tuple[bytes, ServerPeer], SyftError]: """ Executes the association request. @@ -38,7 +50,7 @@ def _run( apply (bool): A flag indicating whether to apply the association request. Returns: - Result[tuple[bytes, NodePeer], SyftError]: The result of the association request. + Result[tuple[bytes, ServerPeer], SyftError]: The result of the association request. """ # relative from .network_service import NetworkService @@ -52,7 +64,7 @@ def _run( # Get the network service service_ctx = context.to_service_ctx() network_service = cast( - NetworkService, service_ctx.node.get_service(NetworkService) + NetworkService, service_ctx.server.get_service(NetworkService) ) network_stash = network_service.stash @@ -67,7 +79,7 @@ def _run( if add_rtunnel_route: network_service.set_reverse_tunnel_config( context=context, - remote_node_peer=self.remote_peer, + remote_server_peer=self.remote_peer, ) else: # Pinging the remote peer to verify the connection @@ -103,7 +115,7 @@ def _run( # Adding the remote peer to the network stash result = network_stash.create_or_update_peer( - service_ctx.node.verify_key, self.remote_peer + service_ctx.server.verify_key, self.remote_peer ) if result.is_err(): diff --git a/packages/syft/src/syft/service/network/network_service.py b/packages/syft/src/syft/service/network/network_service.py index 0c65f15f8be..7659d0e1f07 100644 --- a/packages/syft/src/syft/service/network/network_service.py +++ b/packages/syft/src/syft/service/network/network_service.py @@ -10,17 +10,17 @@ from result import Result # relative -from ...abstract_node import NodeType +from ...abstract_server import ServerType from ...client.client import HTTPConnection from ...client.client import PythonConnection from ...client.client import SyftClient -from ...node.credentials import SyftVerifyKey -from ...node.worker_settings import WorkerSettings from ...serde.serializable import serializable -from ...service.settings.settings import NodeSettings +from ...server.credentials import SyftVerifyKey +from ...server.worker_settings import WorkerSettings +from ...service.settings.settings import ServerSettings from ...store.document_store import DocumentStore from ...store.document_store import PartitionKey -from ...types.grid_url import GridURL +from ...types.server_url import ServerURL from ...types.transforms import TransformContext from ...types.transforms import keep from ...types.transforms import make_set_default @@ -33,7 +33,7 @@ from ...util.util import prompt_warning_message from ...util.util import str_to_bool from ..context import AuthedServiceContext -from ..metadata.node_metadata import NodeMetadata +from ..metadata.server_metadata import ServerMetadata from ..request.request import Request from ..request.request import RequestStatus from ..request.request import SubmitRequest @@ -50,18 +50,18 @@ from ..warnings import CRUDWarning from .association_request import AssociationRequestChange from .network_stash import NetworkStash -from .node_peer import NodePeer -from .node_peer import NodePeerUpdate from .reverse_tunnel_service import ReverseTunnelService -from .routes import HTTPNodeRoute -from .routes import NodeRoute -from .routes import NodeRouteType -from .routes import PythonNodeRoute +from .routes import HTTPServerRoute +from .routes import PythonServerRoute +from .routes import ServerRoute +from .routes import ServerRouteType +from .server_peer import ServerPeer +from .server_peer import ServerPeerUpdate logger = logging.getLogger(__name__) VerifyKeyPartitionKey = PartitionKey(key="verify_key", type_=SyftVerifyKey) -NodeTypePartitionKey = PartitionKey(key="node_type", type_=NodeType) +ServerTypePartitionKey = PartitionKey(key="server_type", type_=ServerType) OrderByNamePartitionKey = PartitionKey(key="name", type_=str) REVERSE_TUNNEL_ENABLED = "REVERSE_TUNNEL_ENABLED" @@ -71,15 +71,15 @@ def reverse_tunnel_enabled() -> bool: return str_to_bool(get_env(REVERSE_TUNNEL_ENABLED, "false")) -@serializable() -class NodePeerAssociationStatus(Enum): +@serializable(canonical_name="ServerPeerAssociationStatus", version=1) +class ServerPeerAssociationStatus(Enum): PEER_ASSOCIATED = "PEER_ASSOCIATED" PEER_ASSOCIATION_PENDING = "PEER_ASSOCIATION_PENDING" PEER_NOT_FOUND = "PEER_NOT_FOUND" @instrument -@serializable() +@serializable(canonical_name="NetworkService", version=1) class NetworkService(AbstractService): store: DocumentStore stash: NetworkStash @@ -99,47 +99,47 @@ def __init__(self, store: DocumentStore) -> None: def exchange_credentials_with( self, context: AuthedServiceContext, - self_node_route: NodeRoute, - remote_node_route: NodeRoute, - remote_node_verify_key: SyftVerifyKey, + self_server_route: ServerRoute, + remote_server_route: ServerRoute, + remote_server_verify_key: SyftVerifyKey, reverse_tunnel: bool = False, ) -> Request | SyftSuccess | SyftError: """ - Exchange Route With Another Node. If there is a pending association request, return it + Exchange Route With Another Server. If there is a pending association request, return it """ # Step 1: Validate the Route - self_node_peer = self_node_route.validate_with_context(context=context) + self_server_peer = self_server_route.validate_with_context(context=context) if reverse_tunnel and not reverse_tunnel_enabled(): - return SyftError(message="Reverse tunneling is not enabled on this node.") + return SyftError(message="Reverse tunneling is not enabled on this server.") elif reverse_tunnel: - _rtunnel_route = self_node_peer.node_routes[-1] + _rtunnel_route = self_server_peer.server_routes[-1] _rtunnel_route.rtunnel_token = generate_token() - _rtunnel_route.host_or_ip = f"{self_node_peer.name}.syft.local" - self_node_peer.node_routes[-1] = _rtunnel_route + _rtunnel_route.host_or_ip = f"{self_server_peer.name}.syft.local" + self_server_peer.server_routes[-1] = _rtunnel_route - if isinstance(self_node_peer, SyftError): - return self_node_peer + if isinstance(self_server_peer, SyftError): + return self_server_peer - # Step 2: Send the Node Peer to the remote node + # Step 2: Send the Server Peer to the remote server # Also give them their own to validate that it belongs to them # random challenge prevents replay attacks - remote_client: SyftClient = remote_node_route.client_with_context( + remote_client: SyftClient = remote_server_route.client_with_context( context=context ) - remote_node_peer = NodePeer.from_client(remote_client) + remote_server_peer = ServerPeer.from_client(remote_client) - # Step 3: Check remotely if the self node already exists as a peer + # Step 3: Check remotely if the self server already exists as a peer # Update the peer if it exists, otherwise add it - remote_self_node_peer = remote_client.api.services.network.get_peer_by_name( - name=self_node_peer.name + remote_self_server_peer = remote_client.api.services.network.get_peer_by_name( + name=self_server_peer.name ) association_request_approved = True - if isinstance(remote_self_node_peer, NodePeer): - updated_peer = NodePeerUpdate( - id=self_node_peer.id, node_routes=self_node_peer.node_routes + if isinstance(remote_self_server_peer, ServerPeer): + updated_peer = ServerPeerUpdate( + id=self_server_peer.id, server_routes=self_server_peer.server_routes ) result = remote_client.api.services.network.update_peer( peer_update=updated_peer @@ -152,15 +152,15 @@ def exchange_credentials_with( message=f"Failed to add peer information on remote client : {remote_client.id}" ) - # If peer does not exist, ask the remote client to add this node - # (represented by `self_node_peer`) as a peer - if remote_self_node_peer is None: + # If peer does not exist, ask the remote client to add this server + # (represented by `self_server_peer`) as a peer + if remote_self_server_peer is None: random_challenge = secrets.token_bytes(16) remote_res = remote_client.api.services.network.add_peer( - peer=self_node_peer, + peer=self_server_peer, challenge=random_challenge, - self_node_route=remote_node_route, - verify_key=remote_node_verify_key, + self_server_route=remote_server_route, + verify_key=remote_server_verify_key, ) if isinstance(remote_res, SyftError): @@ -172,12 +172,12 @@ def exchange_credentials_with( # Step 4: Save the remote peer for later result = self.stash.create_or_update_peer( - context.node.verify_key, - remote_node_peer, + context.server.verify_key, + remote_server_peer, ) if result.is_err(): logging.error( - f"Failed to save peer: {remote_node_peer}. Error: {result.err()}" + f"Failed to save peer: {remote_server_peer}. Error: {result.err()}" ) return SyftError(message="Failed to update route information.") @@ -185,8 +185,8 @@ def exchange_credentials_with( if reverse_tunnel and reverse_tunnel_enabled(): self.set_reverse_tunnel_config( context=context, - self_node_peer=self_node_peer, - remote_node_peer=remote_node_peer, + self_server_peer=self_server_peer, + remote_server_peer=remote_server_peer, ) return ( @@ -199,27 +199,27 @@ def exchange_credentials_with( def add_peer( self, context: AuthedServiceContext, - peer: NodePeer, + peer: ServerPeer, ) -> Request | SyftSuccess | SyftError: - """Add a Network Node Peer. Called by a remote node to add - itself as a peer for the current node. + """Add a Network Server Peer. Called by a remote server to add + itself as a peer for the current server. """ - # check if the peer already is a node peer - existing_peer_res = self.stash.get_by_uid(context.node.verify_key, peer.id) + # check if the peer already is a server peer + existing_peer_res = self.stash.get_by_uid(context.server.verify_key, peer.id) if existing_peer_res.is_err(): return SyftError( message=f"Failed to query peer from stash: {existing_peer_res.err()}" ) - if isinstance(existing_peer := existing_peer_res.ok(), NodePeer): + if isinstance(existing_peer := existing_peer_res.ok(), ServerPeer): msg = [ - f"The peer '{peer.name}' is already associated with '{context.node.name}'" + f"The peer '{peer.name}' is already associated with '{context.server.name}'" ] if existing_peer != peer: result = self.stash.create_or_update_peer( - context.node.verify_key, + context.server.verify_key, peer, ) msg.append("Peer information change detected.") @@ -250,13 +250,15 @@ def add_peer( changes=[association_request_change], requesting_user_verify_key=context.credentials, ) - request_submit_method = context.node.get_service_method(RequestService.submit) + request_submit_method = context.server.get_service_method(RequestService.submit) request = request_submit_method(context, submit_request) if ( isinstance(request, Request) - and context.node.settings.association_request_auto_approval + and context.server.settings.association_request_auto_approval ): - request_apply_method = context.node.get_service_method(RequestService.apply) + request_apply_method = context.server.get_service_method( + RequestService.apply + ) return request_apply_method(context, uid=request.id) return request @@ -267,16 +269,16 @@ def add_peer( def challenge_nonce( self, context: AuthedServiceContext, challenge: bytes ) -> bytes | SyftError: - """To check authenticity of the remote node""" + """To check authenticity of the remote server""" - # # Only the root user can ping the node to check its state - # if context.node.verify_key != context.credentials: + # # Only the root user can ping the server to check its state + # if context.server.verify_key != context.credentials: # return SyftError(message=("Only the root user can access ping endpoint")) # this way they can match up who we are with who they think we are # Sending a signed messages for the peer to verify - challenge_signature = context.node.signing_key.signing_key.sign( + challenge_signature = context.server.signing_key.signing_key.sign( challenge ).signature @@ -284,9 +286,9 @@ def challenge_nonce( @service_method(path="network.ping", name="ping", roles=GUEST_ROLE_LEVEL) def ping(self, context: AuthedServiceContext) -> SyftSuccess: - """To check liveness of the remote node""" + """To check liveness of the remote server""" return SyftSuccess( - message=f"Reply from remote node:{context.node.name}-<{context.node.id}>" + message=f"Reply from remote server:{context.server.name}-<{context.server.id}>" ) @service_method( @@ -297,10 +299,10 @@ def ping(self, context: AuthedServiceContext) -> SyftSuccess: def ping_peer( self, context: AuthedServiceContext, verify_key: SyftVerifyKey ) -> SyftSuccess | SyftError: - """Ping a remote node by its verify key""" + """Ping a remote server by its verify key""" remote_peer = self.stash.get_by_verify_key( - credentials=context.node.verify_key, verify_key=verify_key + credentials=context.server.verify_key, verify_key=verify_key ) if remote_peer.is_err(): return SyftError( @@ -330,16 +332,16 @@ def ping_peer( ) def check_peer_association( self, context: AuthedServiceContext, peer_id: UID - ) -> NodePeerAssociationStatus | SyftError: + ) -> ServerPeerAssociationStatus | SyftError: """Check if a peer exists in the network stash""" - # get the node peer for the given sender peer_id - peer = self.stash.get_by_uid(context.node.verify_key, peer_id) + # get the server peer for the given sender peer_id + peer = self.stash.get_by_uid(context.server.verify_key, peer_id) if err := peer.is_err(): return SyftError(message=f"Failed to query peer from stash. Err: {err}") - if isinstance(peer.ok(), NodePeer): - return NodePeerAssociationStatus.PEER_ASSOCIATED + if isinstance(peer.ok(), ServerPeer): + return ServerPeerAssociationStatus.PEER_ASSOCIATED if peer.ok() is None: # peer is either pending or not found association_requests: list[Request] = ( @@ -351,20 +353,20 @@ def check_peer_association( association_requests and association_requests[-1].status == RequestStatus.PENDING ): - return NodePeerAssociationStatus.PEER_ASSOCIATION_PENDING + return ServerPeerAssociationStatus.PEER_ASSOCIATION_PENDING - return NodePeerAssociationStatus.PEER_NOT_FOUND + return ServerPeerAssociationStatus.PEER_NOT_FOUND @service_method( path="network.get_all_peers", name="get_all_peers", roles=GUEST_ROLE_LEVEL ) def get_all_peers( self, context: AuthedServiceContext - ) -> list[NodePeer] | SyftError: + ) -> list[ServerPeer] | SyftError: """Get all Peers""" result = self.stash.get_all( - credentials=context.node.verify_key, + credentials=context.server.verify_key, order_by=OrderByNamePartitionKey, ) if result.is_ok(): @@ -377,11 +379,11 @@ def get_all_peers( ) def get_peer_by_name( self, context: AuthedServiceContext, name: str - ) -> NodePeer | None | SyftError: + ) -> ServerPeer | None | SyftError: """Get Peer by Name""" result = self.stash.get_by_name( - credentials=context.node.verify_key, + credentials=context.server.verify_key, name=name, ) if result.is_ok(): @@ -395,11 +397,11 @@ def get_peer_by_name( roles=GUEST_ROLE_LEVEL, ) def get_peers_by_type( - self, context: AuthedServiceContext, node_type: NodeType - ) -> list[NodePeer] | SyftError: - result = self.stash.get_by_node_type( - credentials=context.node.verify_key, - node_type=node_type, + self, context: AuthedServiceContext, server_type: ServerType + ) -> list[ServerPeer] | SyftError: + result = self.stash.get_by_server_type( + credentials=context.server.verify_key, + server_type=server_type, ) if result.is_err(): @@ -416,12 +418,12 @@ def get_peers_by_type( def update_peer( self, context: AuthedServiceContext, - peer_update: NodePeerUpdate, + peer_update: ServerPeerUpdate, ) -> SyftSuccess | SyftError: - # try setting all fields of NodePeerUpdate according to NodePeer + # try setting all fields of ServerPeerUpdate according to ServerPeer result = self.stash.update( - credentials=context.node.verify_key, + credentials=context.server.verify_key, peer_update=peer_update, ) if result.is_err(): @@ -431,7 +433,7 @@ def update_peer( peer = result.ok() - self.set_reverse_tunnel_config(context=context, remote_node_peer=peer) + self.set_reverse_tunnel_config(context=context, remote_server_peer=peer) return SyftSuccess( message=f"Peer '{result.ok().name}' information successfully updated." ) @@ -439,28 +441,28 @@ def update_peer( def set_reverse_tunnel_config( self, context: AuthedServiceContext, - remote_node_peer: NodePeer, - self_node_peer: NodePeer | None = None, + remote_server_peer: ServerPeer, + self_server_peer: ServerPeer | None = None, ) -> None: - node_type = cast(NodeType, context.node.node_type) - if node_type.value == NodeType.GATEWAY.value: - rtunnel_route = remote_node_peer.get_rtunnel_route() + server_type = cast(ServerType, context.server.server_type) + if server_type.value == ServerType.GATEWAY.value: + rtunnel_route = remote_server_peer.get_rtunnel_route() ( - self.rtunnel_service.set_server_config(remote_node_peer) + self.rtunnel_service.set_server_config(remote_server_peer) if rtunnel_route else None ) else: - self_node_peer = ( - context.node.settings.to(NodePeer) - if self_node_peer is None - else self_node_peer + self_server_peer = ( + context.server.settings.to(ServerPeer) + if self_server_peer is None + else self_server_peer ) - rtunnel_route = self_node_peer.get_rtunnel_route() + rtunnel_route = self_server_peer.get_rtunnel_route() ( self.rtunnel_service.set_client_config( - self_node_peer=self_node_peer, - remote_node_route=remote_node_peer.pick_highest_priority_route(), + self_server_peer=self_server_peer, + remote_server_route=remote_server_peer.pick_highest_priority_route(), ) if rtunnel_route else None @@ -474,16 +476,16 @@ def set_reverse_tunnel_config( def delete_peer_by_id( self, context: AuthedServiceContext, uid: UID ) -> SyftSuccess | SyftError: - """Delete Node Peer""" + """Delete Server Peer""" retrieve_result = self.stash.get_by_uid(context.credentials, uid) if err := retrieve_result.is_err(): return SyftError( message=f"Failed to retrieve peer with UID {uid}: {retrieve_result.err()}." ) - peer_to_delete = cast(NodePeer, retrieve_result.ok()) + peer_to_delete = cast(ServerPeer, retrieve_result.ok()) - node_side_type = cast(NodeType, context.node.node_type) - if node_side_type.value == NodeType.GATEWAY.value: + server_side_type = cast(ServerType, context.server.server_type) + if server_side_type.value == ServerType.GATEWAY.value: rtunnel_route = peer_to_delete.get_rtunnel_route() ( self.rtunnel_service.clear_server_config(peer_to_delete) @@ -491,7 +493,7 @@ def delete_peer_by_id( else None ) - # TODO: Handle the case when peer is deleted from domain node + # TODO: Handle the case when peer is deleted from datasite server result = self.stash.delete_by_uid(context.credentials, uid) if err := result.is_err(): @@ -501,7 +503,7 @@ def delete_peer_by_id( context=context, peer_id=uid ) for request in association_requests: - request_delete_method = context.node.get_service_method( + request_delete_method = context.server.get_service_method( RequestService.delete_by_uid ) res = request_delete_method(context, request.id) @@ -509,29 +511,29 @@ def delete_peer_by_id( return res # TODO: Notify the peer (either by email or by other form of notifications) # that it has been deleted from the network - return SyftSuccess(message=f"Node Peer with id {uid} deleted.") + return SyftSuccess(message=f"Server Peer with id {uid} deleted.") @service_method(path="network.add_route_on_peer", name="add_route_on_peer") def add_route_on_peer( self, context: AuthedServiceContext, - peer: NodePeer, - route: NodeRoute, + peer: ServerPeer, + route: ServerRoute, ) -> SyftSuccess | SyftError: """ Add or update the route information on the remote peer. Args: context (AuthedServiceContext): The authentication context. - peer (NodePeer): The peer representing the remote node. - route (NodeRoute): The route to be added. + peer (ServerPeer): The peer representing the remote server. + route (ServerRoute): The route to be added. Returns: SyftSuccess | SyftError: A success message if the route is verified, otherwise an error message. """ - # creates a client on the remote node based on the credentials - # of the current node's client + # creates a client on the remote server based on the credentials + # of the current server's client remote_client = peer.client_with_context(context=context) if remote_client.is_err(): return SyftError( @@ -539,7 +541,7 @@ def add_route_on_peer( f"{peer.id}. Error: {remote_client.err()}" ) remote_client = remote_client.ok() - # ask the remote node to add the route to the self node + # ask the remote server to add the route to the self server result = remote_client.api.services.network.add_route( peer_verify_key=context.credentials, route=route, @@ -552,16 +554,16 @@ def add_route( self, context: AuthedServiceContext, peer_verify_key: SyftVerifyKey, - route: NodeRoute, + route: ServerRoute, called_by_peer: bool = False, ) -> SyftSuccess | SyftError: """ Add a route to the peer. If the route already exists, update its priority. Args: - context (AuthedServiceContext): The authentication context of the remote node. - peer_verify_key (SyftVerifyKey): The verify key of the remote node peer. - route (NodeRoute): The route to be added. + context (AuthedServiceContext): The authentication context of the remote server. + peer_verify_key (SyftVerifyKey): The verify key of the remote server peer. + route (ServerRoute): The route to be added. called_by_peer (bool): The flag to indicate that it's called by a remote peer. Returns: @@ -576,49 +578,49 @@ def add_route( ) ) # get the full peer object from the store to update its routes - remote_node_peer: NodePeer | SyftError = ( - self._get_remote_node_peer_by_verify_key(context, peer_verify_key) + remote_server_peer: ServerPeer | SyftError = ( + self._get_remote_server_peer_by_verify_key(context, peer_verify_key) ) - if isinstance(remote_node_peer, SyftError): - return remote_node_peer + if isinstance(remote_server_peer, SyftError): + return remote_server_peer # add and update the priority for the peer - if route in remote_node_peer.node_routes: + if route in remote_server_peer.server_routes: return SyftSuccess( - message=f"The route already exists between '{context.node.name}' and " - f"peer '{remote_node_peer.name}'." + message=f"The route already exists between '{context.server.name}' and " + f"peer '{remote_server_peer.name}'." ) - remote_node_peer.update_route(route=route) + remote_server_peer.update_route(route=route) # update the peer in the store with the updated routes - peer_update = NodePeerUpdate( - id=remote_node_peer.id, node_routes=remote_node_peer.node_routes + peer_update = ServerPeerUpdate( + id=remote_server_peer.id, server_routes=remote_server_peer.server_routes ) result = self.stash.update( - credentials=context.node.verify_key, + credentials=context.server.verify_key, peer_update=peer_update, ) if result.is_err(): return SyftError(message=str(result.err())) return SyftSuccess( message=f"New route ({str(route)}) with id '{route.id}' " - f"to peer {remote_node_peer.node_type.value} '{remote_node_peer.name}' " - f"was added for {str(context.node.node_type)} '{context.node.name}'" + f"to peer {remote_server_peer.server_type.value} '{remote_server_peer.name}' " + f"was added for {str(context.server.server_type)} '{context.server.name}'" ) @service_method(path="network.delete_route_on_peer", name="delete_route_on_peer") def delete_route_on_peer( self, context: AuthedServiceContext, - peer: NodePeer, - route: NodeRoute, + peer: ServerPeer, + route: ServerRoute, ) -> SyftSuccess | SyftError | SyftInfo: """ Delete the route on the remote peer. Args: context (AuthedServiceContext): The authentication context for the service. - peer (NodePeer): The peer for which the route will be deleted. - route (NodeRoute): The route to be deleted. + peer (ServerPeer): The peer for which the route will be deleted. + route (ServerRoute): The route to be deleted. Returns: SyftSuccess: If the route is successfully deleted. @@ -626,8 +628,8 @@ def delete_route_on_peer( SyftInfo: If there is only one route left for the peer and the admin chose not to remove it """ - # creates a client on the remote node based on the credentials - # of the current node's client + # creates a client on the remote server based on the credentials + # of the current server's client remote_client = peer.client_with_context(context=context) if remote_client.is_err(): return SyftError( @@ -635,7 +637,7 @@ def delete_route_on_peer( f"{peer.id}. Error: {remote_client.err()}" ) remote_client = remote_client.ok() - # ask the remote node to delete the route to the self node, + # ask the remote server to delete the route to the self server, result = remote_client.api.services.network.delete_route( peer_verify_key=context.credentials, route=route, @@ -650,7 +652,7 @@ def delete_route( self, context: AuthedServiceContext, peer_verify_key: SyftVerifyKey, - route: NodeRoute | None = None, + route: ServerRoute | None = None, called_by_peer: bool = False, ) -> SyftSuccess | SyftError | SyftInfo: """ @@ -660,8 +662,8 @@ def delete_route( Args: context (AuthedServiceContext): The authentication context for the service. - peer_verify_key (SyftVerifyKey): The verify key of the remote node peer. - route (NodeRoute): The route to be deleted. + peer_verify_key (SyftVerifyKey): The verify key of the remote server peer. + route (ServerRoute): The route to be deleted. called_by_peer (bool): The flag to indicate that it's called by a remote peer. Returns: @@ -679,18 +681,18 @@ def delete_route( ) ) - remote_node_peer: NodePeer | SyftError = ( - self._get_remote_node_peer_by_verify_key( + remote_server_peer: ServerPeer | SyftError = ( + self._get_remote_server_peer_by_verify_key( context=context, peer_verify_key=peer_verify_key ) ) - if len(remote_node_peer.node_routes) == 1: + if len(remote_server_peer.server_routes) == 1: warning_message = ( f"There is only one route left to peer " - f"{remote_node_peer.node_type.value} '{remote_node_peer.name}'. " + f"{remote_server_peer.server_type.value} '{remote_server_peer.name}'. " f"Removing this route will remove the peer for " - f"{str(context.node.node_type)} '{context.node.name}'." + f"{str(context.server.server_type)} '{context.server.name}'." ) response: bool = prompt_warning_message( message=warning_message, @@ -698,40 +700,40 @@ def delete_route( ) if not response: return SyftInfo( - message=f"The last route to {remote_node_peer.node_type.value} " - f"'{remote_node_peer.name}' with id " - f"'{remote_node_peer.node_routes[0].id}' was not deleted." + message=f"The last route to {remote_server_peer.server_type.value} " + f"'{remote_server_peer.name}' with id " + f"'{remote_server_peer.server_routes[0].id}' was not deleted." ) - result = remote_node_peer.delete_route(route=route) + result = remote_server_peer.delete_route(route=route) return_message = ( f"Route '{str(route)}' to peer " - f"{remote_node_peer.node_type.value} '{remote_node_peer.name}' " - f"was deleted for {str(context.node.node_type)} '{context.node.name}'." + f"{remote_server_peer.server_type.value} '{remote_server_peer.name}' " + f"was deleted for {str(context.server.server_type)} '{context.server.name}'." ) if isinstance(result, SyftError): return result - if len(remote_node_peer.node_routes) == 0: + if len(remote_server_peer.server_routes) == 0: # remove the peer # TODO: should we do this as we are deleting the peer with a guest role level? result = self.stash.delete_by_uid( - credentials=context.node.verify_key, uid=remote_node_peer.id + credentials=context.server.verify_key, uid=remote_server_peer.id ) if isinstance(result, SyftError): return result return_message += ( f" There is no routes left to connect to peer " - f"{remote_node_peer.node_type.value} '{remote_node_peer.name}', so it is deleted for " - f"{str(context.node.node_type)} '{context.node.name}'." + f"{remote_server_peer.server_type.value} '{remote_server_peer.name}', so it is deleted for " + f"{str(context.server.server_type)} '{context.server.name}'." ) else: # update the peer with the route removed - peer_update = NodePeerUpdate( - id=remote_node_peer.id, node_routes=remote_node_peer.node_routes + peer_update = ServerPeerUpdate( + id=remote_server_peer.id, server_routes=remote_server_peer.server_routes ) result = self.stash.update( - credentials=context.node.verify_key, peer_update=peer_update + credentials=context.server.verify_key, peer_update=peer_update ) if result.is_err(): return SyftError(message=str(result.err())) @@ -745,8 +747,8 @@ def delete_route( def update_route_priority_on_peer( self, context: AuthedServiceContext, - peer: NodePeer, - route: NodeRoute, + peer: ServerPeer, + route: ServerRoute, priority: int | None = None, ) -> SyftSuccess | SyftError: """ @@ -754,8 +756,8 @@ def update_route_priority_on_peer( Args: context (AuthedServiceContext): The authentication context. - peer (NodePeer): The peer representing the remote node. - route (NodeRoute): The route to be added. + peer (ServerPeer): The peer representing the remote server. + route (ServerRoute): The route to be added. priority (int | None): The new priority value for the route. If not provided, it will be assigned the highest priority among all peers @@ -763,8 +765,8 @@ def update_route_priority_on_peer( SyftSuccess | SyftError: A success message if the route is verified, otherwise an error message. """ - # creates a client on the remote node based on the credentials - # of the current node's client + # creates a client on the remote server based on the credentials + # of the current server's client remote_client = peer.client_with_context(context=context) if remote_client.is_err(): return SyftError( @@ -789,7 +791,7 @@ def update_route_priority( self, context: AuthedServiceContext, peer_verify_key: SyftVerifyKey, - route: NodeRoute, + route: ServerRoute, priority: int | None = None, called_by_peer: bool = False, ) -> SyftSuccess | SyftError: @@ -799,7 +801,7 @@ def update_route_priority( Args: context (AuthedServiceContext): The authentication context for the service. peer_verify_key (SyftVerifyKey): The verify key of the peer whose route priority needs to be updated. - route (NodeRoute): The route for which the priority needs to be updated. + route (ServerRoute): The route for which the priority needs to be updated. priority (int | None): The new priority value for the route. If not provided, it will be assigned the highest priority among all peers @@ -814,54 +816,54 @@ def update_route_priority( ) ) # get the full peer object from the store to update its routes - remote_node_peer: NodePeer | SyftError = ( - self._get_remote_node_peer_by_verify_key(context, peer_verify_key) + remote_server_peer: ServerPeer | SyftError = ( + self._get_remote_server_peer_by_verify_key(context, peer_verify_key) ) - if isinstance(remote_node_peer, SyftError): - return remote_node_peer + if isinstance(remote_server_peer, SyftError): + return remote_server_peer # update the route's priority for the peer - updated_node_route: NodeRouteType | SyftError = ( - remote_node_peer.update_existed_route_priority( + updated_server_route: ServerRouteType | SyftError = ( + remote_server_peer.update_existed_route_priority( route=route, priority=priority ) ) - if isinstance(updated_node_route, SyftError): - return updated_node_route - new_priority: int = updated_node_route.priority + if isinstance(updated_server_route, SyftError): + return updated_server_route + new_priority: int = updated_server_route.priority # update the peer in the store - peer_update = NodePeerUpdate( - id=remote_node_peer.id, node_routes=remote_node_peer.node_routes + peer_update = ServerPeerUpdate( + id=remote_server_peer.id, server_routes=remote_server_peer.server_routes ) - result = self.stash.update(context.node.verify_key, peer_update) + result = self.stash.update(context.server.verify_key, peer_update) if result.is_err(): return SyftError(message=str(result.err())) return SyftSuccess( message=f"Route {route.id}'s priority updated to " - f"{new_priority} for peer {remote_node_peer.name}" + f"{new_priority} for peer {remote_server_peer.name}" ) - def _get_remote_node_peer_by_verify_key( + def _get_remote_server_peer_by_verify_key( self, context: AuthedServiceContext, peer_verify_key: SyftVerifyKey - ) -> NodePeer | SyftError: + ) -> ServerPeer | SyftError: """ - Helper function to get the full node peer object from t + Helper function to get the full server peer object from t he stash using its verify key """ - remote_node_peer: Result[NodePeer | None, SyftError] = ( + remote_server_peer: Result[ServerPeer | None, SyftError] = ( self.stash.get_by_verify_key( - credentials=context.node.verify_key, + credentials=context.server.verify_key, verify_key=peer_verify_key, ) ) - if remote_node_peer.is_err(): - return SyftError(message=str(remote_node_peer.err())) - remote_node_peer = remote_node_peer.ok() - if remote_node_peer is None: + if remote_server_peer.is_err(): + return SyftError(message=str(remote_server_peer.err())) + remote_server_peer = remote_server_peer.ok() + if remote_server_peer is None: return SyftError( - message=f"Can't retrieve {remote_node_peer.name} from the store of peers (None)." + message=f"Can't retrieve {remote_server_peer.name} from the store of peers (None)." ) - return remote_node_peer + return remote_server_peer def _get_association_requests_by_peer_id( self, context: AuthedServiceContext, peer_id: UID @@ -869,7 +871,7 @@ def _get_association_requests_by_peer_id( """ Get all the association requests from a peer. The association requests are sorted by request_time. """ - request_get_all_method: Callable = context.node.get_service_method( + request_get_all_method: Callable = context.server.get_service_method( RequestService.get_all ) all_requests: list[Request] = request_get_all_method(context) @@ -888,11 +890,11 @@ def _get_association_requests_by_peer_id( ) -TYPE_TO_SERVICE[NodePeer] = NetworkService -SERVICE_TO_TYPES[NetworkService].update({NodePeer}) +TYPE_TO_SERVICE[ServerPeer] = NetworkService +SERVICE_TO_TYPES[NetworkService].update({ServerPeer}) -def from_grid_url(context: TransformContext) -> TransformContext: +def from_server_url(context: TransformContext) -> TransformContext: if context.obj is not None and context.output is not None: url = context.obj.url.as_container_host() context.output["host_or_ip"] = url.host_or_ip @@ -906,36 +908,38 @@ def from_grid_url(context: TransformContext) -> TransformContext: return context -@transform(HTTPConnection, HTTPNodeRoute) -def http_connection_to_node_route() -> list[Callable]: - return [from_grid_url] +@transform(HTTPConnection, HTTPServerRoute) +def http_connection_to_server_route() -> list[Callable]: + return [from_server_url] -def get_python_node_route(context: TransformContext) -> TransformContext: +def get_python_server_route(context: TransformContext) -> TransformContext: if context.output is not None and context.obj is not None: - context.output["id"] = context.obj.node.id - context.output["worker_settings"] = WorkerSettings.from_node(context.obj.node) + context.output["id"] = context.obj.server.id + context.output["worker_settings"] = WorkerSettings.from_server( + context.obj.server + ) context.output["proxy_target_uid"] = context.obj.proxy_target_uid return context -@transform(PythonConnection, PythonNodeRoute) -def python_connection_to_node_route() -> list[Callable]: - return [get_python_node_route] +@transform(PythonConnection, PythonServerRoute) +def python_connection_to_server_route() -> list[Callable]: + return [get_python_server_route] -@transform_method(PythonNodeRoute, PythonConnection) -def node_route_to_python_connection( +@transform_method(PythonServerRoute, PythonConnection) +def server_route_to_python_connection( obj: Any, context: TransformContext | None = None ) -> list[Callable]: - return PythonConnection(node=obj.node, proxy_target_uid=obj.proxy_target_uid) + return PythonConnection(server=obj.server, proxy_target_uid=obj.proxy_target_uid) -@transform_method(HTTPNodeRoute, HTTPConnection) -def node_route_to_http_connection( +@transform_method(HTTPServerRoute, HTTPConnection) +def server_route_to_http_connection( obj: Any, context: TransformContext | None = None ) -> list[Callable]: - url = GridURL( + url = ServerURL( protocol=obj.protocol, host_or_ip=obj.host_or_ip, port=obj.port ).as_container_host() return HTTPConnection( @@ -945,7 +949,7 @@ def node_route_to_http_connection( ) -@transform(NodeMetadata, NodePeer) +@transform(ServerMetadata, ServerPeer) def metadata_to_peer() -> list[Callable]: return [ keep( @@ -953,15 +957,15 @@ def metadata_to_peer() -> list[Callable]: "id", "name", "verify_key", - "node_type", + "server_type", ] ), make_set_default("admin_email", ""), ] -@transform(NodeSettings, NodePeer) +@transform(ServerSettings, ServerPeer) def settings_to_peer() -> list[Callable]: return [ - keep(["id", "name", "verify_key", "node_type", "admin_email"]), + keep(["id", "name", "verify_key", "server_type", "admin_email"]), ] diff --git a/packages/syft/src/syft/service/network/network_stash.py b/packages/syft/src/syft/service/network/network_stash.py index 2fa27f57056..3459584adcf 100644 --- a/packages/syft/src/syft/service/network/network_stash.py +++ b/packages/syft/src/syft/service/network/network_stash.py @@ -2,9 +2,9 @@ from result import Result # relative -from ...abstract_node import NodeType -from ...node.credentials import SyftVerifyKey +from ...abstract_server import ServerType from ...serde.serializable import serializable +from ...server.credentials import SyftVerifyKey from ...store.document_store import BaseUIDStoreStash from ...store.document_store import DocumentStore from ...store.document_store import PartitionKey @@ -13,20 +13,20 @@ from ...util.telemetry import instrument from ..data_subject.data_subject import NamePartitionKey from ..response import SyftError -from .node_peer import NodePeer -from .node_peer import NodePeerUpdate +from .server_peer import ServerPeer +from .server_peer import ServerPeerUpdate VerifyKeyPartitionKey = PartitionKey(key="verify_key", type_=SyftVerifyKey) -NodeTypePartitionKey = PartitionKey(key="node_type", type_=NodeType) +ServerTypePartitionKey = PartitionKey(key="server_type", type_=ServerType) OrderByNamePartitionKey = PartitionKey(key="name", type_=str) @instrument -@serializable() +@serializable(canonical_name="NetworkStash", version=1) class NetworkStash(BaseUIDStoreStash): - object_type = NodePeer + object_type = ServerPeer settings: PartitionSettings = PartitionSettings( - name=NodePeer.__canonical_name__, object_type=NodePeer + name=ServerPeer.__canonical_name__, object_type=ServerPeer ) def __init__(self, store: DocumentStore) -> None: @@ -34,46 +34,46 @@ def __init__(self, store: DocumentStore) -> None: def get_by_name( self, credentials: SyftVerifyKey, name: str - ) -> Result[NodePeer | None, str]: + ) -> Result[ServerPeer | None, str]: qks = QueryKeys(qks=[NamePartitionKey.with_obj(name)]) return self.query_one(credentials=credentials, qks=qks) def update( self, credentials: SyftVerifyKey, - peer_update: NodePeerUpdate, + peer_update: ServerPeerUpdate, has_permission: bool = False, - ) -> Result[NodePeer, str]: - valid = self.check_type(peer_update, NodePeerUpdate) + ) -> Result[ServerPeer, str]: + valid = self.check_type(peer_update, ServerPeerUpdate) if valid.is_err(): return SyftError(message=valid.err()) return super().update(credentials, peer_update, has_permission=has_permission) def create_or_update_peer( - self, credentials: SyftVerifyKey, peer: NodePeer - ) -> Result[NodePeer, str]: + self, credentials: SyftVerifyKey, peer: ServerPeer + ) -> Result[ServerPeer, str]: """ Update the selected peer and its route priorities if the peer already exists If the peer does not exist, simply adds it to the database. Args: credentials (SyftVerifyKey): The credentials used to authenticate the request. - peer (NodePeer): The peer to be updated or added. + peer (ServerPeer): The peer to be updated or added. Returns: - Result[NodePeer, str]: The updated or added peer if the operation + Result[ServerPeer, str]: The updated or added peer if the operation was successful, or an error message if the operation failed. """ - valid = self.check_type(peer, NodePeer) + valid = self.check_type(peer, ServerPeer) if valid.is_err(): return SyftError(message=valid.err()) existing = self.get_by_uid(credentials=credentials, uid=peer.id) if existing.is_ok() and existing.ok() is not None: - existing_peer: NodePeer = existing.ok() - existing_peer.update_routes(peer.node_routes) - peer_update = NodePeerUpdate( - id=peer.id, node_routes=existing_peer.node_routes + existing_peer: ServerPeer = existing.ok() + existing_peer.update_routes(peer.server_routes) + peer_update = ServerPeerUpdate( + id=peer.id, server_routes=existing_peer.server_routes ) result = self.update(credentials, peer_update) return result @@ -83,14 +83,14 @@ def create_or_update_peer( def get_by_verify_key( self, credentials: SyftVerifyKey, verify_key: SyftVerifyKey - ) -> Result[NodePeer | None, SyftError]: + ) -> Result[ServerPeer | None, SyftError]: qks = QueryKeys(qks=[VerifyKeyPartitionKey.with_obj(verify_key)]) return self.query_one(credentials, qks) - def get_by_node_type( - self, credentials: SyftVerifyKey, node_type: NodeType - ) -> Result[list[NodePeer], SyftError]: - qks = QueryKeys(qks=[NodeTypePartitionKey.with_obj(node_type)]) + def get_by_server_type( + self, credentials: SyftVerifyKey, server_type: ServerType + ) -> Result[list[ServerPeer], SyftError]: + qks = QueryKeys(qks=[ServerTypePartitionKey.with_obj(server_type)]) return self.query_all( credentials=credentials, qks=qks, order_by=OrderByNamePartitionKey ) diff --git a/packages/syft/src/syft/service/network/rathole.py b/packages/syft/src/syft/service/network/rathole.py index 4fd0be445b1..96abcd08af6 100644 --- a/packages/syft/src/syft/service/network/rathole.py +++ b/packages/syft/src/syft/service/network/rathole.py @@ -5,14 +5,14 @@ from ...serde.serializable import serializable from ...types.base import SyftBaseModel from ...util.util import get_env -from .node_peer import NodePeer +from .server_peer import ServerPeer def get_rathole_port() -> int: return int(get_env("RATHOLE_PORT", "2333")) -@serializable() +@serializable(canonical_name="RatholeConfig", version=1) class RatholeConfig(SyftBaseModel): uuid: str secret_token: str @@ -25,14 +25,14 @@ def local_address(self) -> str: return f"{self.local_addr_host}:{self.local_addr_port}" @classmethod - def from_peer(cls, peer: NodePeer) -> Self: + def from_peer(cls, peer: ServerPeer) -> Self: # relative - from .routes import HTTPNodeRoute + from .routes import HTTPServerRoute high_priority_route = peer.pick_highest_priority_route() - if not isinstance(high_priority_route, HTTPNodeRoute): - raise ValueError("Rathole only supports HTTPNodeRoute") + if not isinstance(high_priority_route, HTTPServerRoute): + raise ValueError("Rathole only supports HTTPServerRoute") return cls( uuid=peer.id, diff --git a/packages/syft/src/syft/service/network/rathole_config_builder.py b/packages/syft/src/syft/service/network/rathole_config_builder.py index 15d1e8fb7eb..fb0ef01d798 100644 --- a/packages/syft/src/syft/service/network/rathole_config_builder.py +++ b/packages/syft/src/syft/service/network/rathole_config_builder.py @@ -10,11 +10,11 @@ from ...custom_worker.k8s import KubeUtils from ...custom_worker.k8s import get_kr8s_client from ...types.uid import UID -from .node_peer import NodePeer from .rathole import RatholeConfig from .rathole import get_rathole_port from .rathole_toml import RatholeClientToml from .rathole_toml import RatholeServerToml +from .server_peer import ServerPeer RATHOLE_TOML_CONFIG_MAP = "rathole-config" RATHOLE_PROXY_CONFIG_MAP = "proxy-config-dynamic" @@ -26,11 +26,11 @@ class RatholeConfigBuilder: def __init__(self) -> None: self.k8rs_client = get_kr8s_client() - def add_host_to_server(self, peer: NodePeer) -> None: + def add_host_to_server(self, peer: ServerPeer) -> None: """Add a host to the rathole server toml file. Args: - peer (NodePeer): The peer to be added to the rathole server. + peer (ServerPeer): The peer to be added to the rathole server. Returns: None diff --git a/packages/syft/src/syft/service/network/reverse_tunnel_service.py b/packages/syft/src/syft/service/network/reverse_tunnel_service.py index 36d5b14e151..e155dd3f5b5 100644 --- a/packages/syft/src/syft/service/network/reverse_tunnel_service.py +++ b/packages/syft/src/syft/service/network/reverse_tunnel_service.py @@ -1,8 +1,8 @@ # relative -from ...types.grid_url import GridURL -from .node_peer import NodePeer +from ...types.server_url import ServerURL from .rathole_config_builder import RatholeConfigBuilder -from .routes import NodeRoute +from .routes import ServerRoute +from .server_peer import ServerPeer class ReverseTunnelService: @@ -11,38 +11,38 @@ def __init__(self) -> None: def set_client_config( self, - self_node_peer: NodePeer, - remote_node_route: NodeRoute, + self_server_peer: ServerPeer, + remote_server_route: ServerRoute, ) -> None: - rathole_route = self_node_peer.get_rtunnel_route() + rathole_route = self_server_peer.get_rtunnel_route() if not rathole_route: raise Exception( "Failed to exchange routes via . " - + f"Peer: {self_node_peer} has no rathole route: {rathole_route}" + + f"Peer: {self_server_peer} has no rathole route: {rathole_route}" ) - remote_url = GridURL( - host_or_ip=remote_node_route.host_or_ip, port=remote_node_route.port + remote_url = ServerURL( + host_or_ip=remote_server_route.host_or_ip, port=remote_server_route.port ) rathole_remote_addr = remote_url.as_container_host() remote_addr = rathole_remote_addr.url_no_protocol self.builder.add_host_to_client( - peer_name=self_node_peer.name, - peer_id=str(self_node_peer.id), + peer_name=self_server_peer.name, + peer_id=str(self_server_peer.id), rtunnel_token=rathole_route.rtunnel_token, remote_addr=remote_addr, ) - def set_server_config(self, remote_peer: NodePeer) -> None: + def set_server_config(self, remote_peer: ServerPeer) -> None: rathole_route = remote_peer.get_rtunnel_route() self.builder.add_host_to_server(remote_peer) if rathole_route else None - def clear_client_config(self, self_node_peer: NodePeer) -> None: - self.builder.remove_host_from_client(str(self_node_peer.id)) + def clear_client_config(self, self_server_peer: ServerPeer) -> None: + self.builder.remove_host_from_client(str(self_server_peer.id)) - def clear_server_config(self, remote_peer: NodePeer) -> None: + def clear_server_config(self, remote_peer: ServerPeer) -> None: self.builder.remove_host_from_server( str(remote_peer.id), server_name=remote_peer.name ) diff --git a/packages/syft/src/syft/service/network/routes.py b/packages/syft/src/syft/service/network/routes.py index 78ae53f0009..4f6ef2f4e0f 100644 --- a/packages/syft/src/syft/service/network/routes.py +++ b/packages/syft/src/syft/service/network/routes.py @@ -10,55 +10,57 @@ from typing_extensions import Self # relative -from ...abstract_node import AbstractNode +from ...abstract_server import AbstractServer from ...client.client import HTTPConnection -from ...client.client import NodeConnection from ...client.client import PythonConnection +from ...client.client import ServerConnection from ...client.client import SyftClient -from ...node.worker_settings import WorkerSettings from ...serde.serializable import serializable +from ...server.worker_settings import WorkerSettings from ...types.syft_object import SYFT_OBJECT_VERSION_1 -from ...types.syft_object import SYFT_OBJECT_VERSION_3 from ...types.syft_object import SyftObject from ...types.transforms import TransformContext from ...types.uid import UID from ..context import AuthedServiceContext -from ..context import NodeServiceContext +from ..context import ServerServiceContext from ..response import SyftError if TYPE_CHECKING: # relative - from .node_peer import NodePeer + from .server_peer import ServerPeer -class NodeRoute: +@serializable(canonical_name="ServerRoute", version=1) +class ServerRoute: def client_with_context( - self, context: NodeServiceContext + self, context: ServerServiceContext ) -> SyftClient | SyftError: """ Convert the current route (self) to a connection (either HTTP, Veilid or Python) and create a SyftClient from the connection. Args: - context (NodeServiceContext): The NodeServiceContext containing the node information. + context (ServerServiceContext): The ServerServiceContext containing the server information. Returns: SyftClient | SyftError: Returns the created SyftClient, or SyftError - if the client type is not valid or if the context's node is None. + if the client type is not valid or if the context's server is None. """ connection = route_to_connection(route=self, context=context) client_type = connection.get_client_type() if isinstance(client_type, SyftError): return client_type - return client_type(connection=connection, credentials=context.node.signing_key) + return client_type( + connection=connection, credentials=context.server.signing_key + ) def validate_with_context( self, context: AuthedServiceContext - ) -> NodePeer | SyftError: + ) -> ServerPeer | SyftError: # relative - from .node_peer import NodePeer + from .server_peer import ServerPeer - # Step 1: Check if the given route is able to reach the given node + # Step 1: Check if the given route is able to reach the given server # As we allow the user to give custom routes, we need to check the reachability of the route self_client = self.client_with_context(context=context) @@ -73,23 +75,23 @@ def validate_with_context( try: # Verifying if the challenge is valid - context.node.verify_key.verify_key.verify( + context.server.verify_key.verify_key.verify( random_challenge, challenge_signature ) except Exception: return SyftError(message="Signature Verification Failed in ping") - # Step 2: Create a Node Peer with the given route - self_node_peer: NodePeer = context.node.settings.to(NodePeer) - self_node_peer.node_routes.append(self) + # Step 2: Create a Server Peer with the given route + self_server_peer: ServerPeer = context.server.settings.to(ServerPeer) + self_server_peer.server_routes.append(self) - return self_node_peer + return self_server_peer @serializable() -class HTTPNodeRoute(SyftObject, NodeRoute): - __canonical_name__ = "HTTPNodeRoute" - __version__ = SYFT_OBJECT_VERSION_3 +class HTTPServerRoute(SyftObject, ServerRoute): + __canonical_name__ = "HTTPServerRoute" + __version__ = SYFT_OBJECT_VERSION_1 id: UID | None = None # type: ignore host_or_ip: str @@ -101,7 +103,7 @@ class HTTPNodeRoute(SyftObject, NodeRoute): rtunnel_token: str | None = None def __eq__(self, other: Any) -> bool: - if not isinstance(other, HTTPNodeRoute): + if not isinstance(other, HTTPServerRoute): return False return hash(self) == hash(other) @@ -119,9 +121,9 @@ def __str__(self) -> str: @serializable() -class PythonNodeRoute(SyftObject, NodeRoute): - __canonical_name__ = "PythonNodeRoute" - __version__ = SYFT_OBJECT_VERSION_3 +class PythonServerRoute(SyftObject, ServerRoute): + __canonical_name__ = "PythonServerRoute" + __version__ = SYFT_OBJECT_VERSION_1 id: UID | None = None # type: ignore worker_settings: WorkerSettings @@ -129,29 +131,29 @@ class PythonNodeRoute(SyftObject, NodeRoute): priority: int = 1 @property - def node(self) -> AbstractNode | None: + def server(self) -> AbstractServer | None: # relative - from ...node.worker import Worker + from ...server.worker import Worker - node = Worker( + server = Worker( id=self.worker_settings.id, name=self.worker_settings.name, - node_type=self.worker_settings.node_type, - node_side_type=self.worker_settings.node_side_type, + server_type=self.worker_settings.server_type, + server_side_type=self.worker_settings.server_side_type, signing_key=self.worker_settings.signing_key, document_store_config=self.worker_settings.document_store_config, action_store_config=self.worker_settings.action_store_config, processes=1, ) - return node + return server @classmethod - def with_node(cls, node: AbstractNode) -> Self: - worker_settings = WorkerSettings.from_node(node) + def with_server(cls, server: AbstractServer) -> Self: + worker_settings = WorkerSettings.from_server(server) return cls(id=worker_settings.id, worker_settings=worker_settings) def __eq__(self, other: Any) -> bool: - if not isinstance(other, PythonNodeRoute): + if not isinstance(other, PythonServerRoute): return False return hash(self) == hash(other) @@ -159,18 +161,18 @@ def __hash__(self) -> int: return ( hash(self.worker_settings.id) + hash(self.worker_settings.name) - + hash(self.worker_settings.node_type) - + hash(self.worker_settings.node_side_type) + + hash(self.worker_settings.server_type) + + hash(self.worker_settings.server_side_type) + hash(self.worker_settings.signing_key) ) def __str__(self) -> str: - return "PythonNodeRoute" + return "PythonServerRoute" @serializable() -class VeilidNodeRoute(SyftObject, NodeRoute): - __canonical_name__ = "VeilidNodeRoute" +class VeilidServerRoute(SyftObject, ServerRoute): + __canonical_name__ = "VeilidServerRoute" __version__ = SYFT_OBJECT_VERSION_1 vld_key: str @@ -178,7 +180,7 @@ class VeilidNodeRoute(SyftObject, NodeRoute): priority: int = 1 def __eq__(self, other: Any) -> bool: - if not isinstance(other, VeilidNodeRoute): + if not isinstance(other, VeilidServerRoute): return False return hash(self) == hash(other) @@ -186,25 +188,25 @@ def __hash__(self) -> int: return hash(self.vld_key) + hash(self.proxy_target_uid) -NodeRouteTypeV1 = HTTPNodeRoute | PythonNodeRoute | VeilidNodeRoute -NodeRouteType = HTTPNodeRoute | PythonNodeRoute +ServerRouteTypeV1 = HTTPServerRoute | PythonServerRoute | VeilidServerRoute +ServerRouteType = HTTPServerRoute | PythonServerRoute def route_to_connection( - route: NodeRoute, context: TransformContext | None = None -) -> NodeConnection: - if isinstance(route, HTTPNodeRoute): + route: ServerRoute, context: TransformContext | None = None +) -> ServerConnection: + if isinstance(route, HTTPServerRoute): return route.to(HTTPConnection, context=context) - elif isinstance(route, PythonNodeRoute): + elif isinstance(route, PythonServerRoute): return route.to(PythonConnection, context=context) else: raise ValueError(f"Route {route} is not supported.") -def connection_to_route(connection: NodeConnection) -> NodeRoute: +def connection_to_route(connection: ServerConnection) -> ServerRoute: if isinstance(connection, HTTPConnection): - return connection.to(HTTPNodeRoute) + return connection.to(HTTPServerRoute) elif isinstance(connection, PythonConnection): # type: ignore[unreachable] - return connection.to(PythonNodeRoute) + return connection.to(PythonServerRoute) else: raise ValueError(f"Connection {connection} is not supported.") diff --git a/packages/syft/src/syft/service/network/node_peer.py b/packages/syft/src/syft/service/network/server_peer.py similarity index 56% rename from packages/syft/src/syft/service/network/node_peer.py rename to packages/syft/src/syft/service/network/server_peer.py index 23c1ffc7057..941396820d5 100644 --- a/packages/syft/src/syft/service/network/node_peer.py +++ b/packages/syft/src/syft/service/network/server_peer.py @@ -9,72 +9,50 @@ from result import Result # relative -from ...abstract_node import NodeType -from ...client.client import NodeConnection +from ...abstract_server import ServerType +from ...client.client import ServerConnection from ...client.client import SyftClient -from ...node.credentials import SyftSigningKey -from ...node.credentials import SyftVerifyKey from ...serde.serializable import serializable +from ...server.credentials import SyftSigningKey +from ...server.credentials import SyftVerifyKey from ...service.response import SyftError from ...types.datetime import DateTime -from ...types.syft_migration import migrate from ...types.syft_object import PartialSyftObject from ...types.syft_object import SYFT_OBJECT_VERSION_1 -from ...types.syft_object import SYFT_OBJECT_VERSION_2 -from ...types.syft_object import SYFT_OBJECT_VERSION_3 from ...types.syft_object import SyftObject from ...types.transforms import TransformContext from ...types.uid import UID -from ..context import NodeServiceContext -from ..metadata.node_metadata import NodeMetadata -from .routes import HTTPNodeRoute -from .routes import NodeRoute -from .routes import NodeRouteType -from .routes import NodeRouteTypeV1 -from .routes import PythonNodeRoute -from .routes import VeilidNodeRoute +from ..context import ServerServiceContext +from ..metadata.server_metadata import ServerMetadata +from .routes import HTTPServerRoute +from .routes import PythonServerRoute +from .routes import ServerRoute +from .routes import ServerRouteType +from .routes import VeilidServerRoute from .routes import connection_to_route from .routes import route_to_connection logger = logging.getLogger(__name__) -@serializable() -class NodePeerConnectionStatus(Enum): +@serializable(canonical_name="ServerPeerConnectionStatus", version=1) +class ServerPeerConnectionStatus(Enum): ACTIVE = "ACTIVE" INACTIVE = "INACTIVE" TIMEOUT = "TIMEOUT" @serializable() -class NodePeerV2(SyftObject): - # version - __canonical_name__ = "NodePeer" - __version__ = SYFT_OBJECT_VERSION_2 - - __attr_searchable__ = ["name", "node_type"] - __attr_unique__ = ["verify_key"] - __repr_attrs__ = ["name", "node_type", "admin_email"] - - id: UID | None = None # type: ignore[assignment] - name: str - verify_key: SyftVerifyKey - node_routes: list[NodeRouteTypeV1] = [] - node_type: NodeType - admin_email: str - - -@serializable() -class NodePeer(SyftObject): +class ServerPeer(SyftObject): # version - __canonical_name__ = "NodePeer" - __version__ = SYFT_OBJECT_VERSION_3 + __canonical_name__ = "ServerPeer" + __version__ = SYFT_OBJECT_VERSION_1 - __attr_searchable__ = ["name", "node_type"] + __attr_searchable__ = ["name", "server_type"] __attr_unique__ = ["verify_key"] __repr_attrs__ = [ "name", - "node_type", + "server_type", "admin_email", "ping_status", "ping_status_message", @@ -84,51 +62,53 @@ class NodePeer(SyftObject): id: UID | None = None # type: ignore[assignment] name: str verify_key: SyftVerifyKey - node_routes: list[NodeRouteType] = [] - node_type: NodeType + server_routes: list[ServerRouteType] = [] + server_type: ServerType admin_email: str - ping_status: NodePeerConnectionStatus | None = None + ping_status: ServerPeerConnectionStatus | None = None ping_status_message: str | None = None pinged_timestamp: DateTime | None = None - def existed_route(self, route: NodeRouteType) -> tuple[bool, int | None]: - """Check if a route exists in self.node_routes + def existed_route(self, route: ServerRouteType) -> tuple[bool, int | None]: + """Check if a route exists in self.server_routes Args: route: the route to be checked. For now it can be either - HTTPNodeRoute or PythonNodeRoute + HTTPServerRoute or PythonServerRoute Returns: - if the route exists, returns (True, index of the existed route in self.node_routes) + if the route exists, returns (True, index of the existed route in self.server_routes) if the route does not exist returns (False, None) """ if route: - if not isinstance(route, HTTPNodeRoute | PythonNodeRoute | VeilidNodeRoute): + if not isinstance( + route, HTTPServerRoute | PythonServerRoute | VeilidServerRoute + ): raise ValueError(f"Unsupported route type: {type(route)}") - for i, r in enumerate(self.node_routes): + for i, r in enumerate(self.server_routes): if route == r: return (True, i) return (False, None) - def update_route_priority(self, route: NodeRoute) -> NodeRoute: + def update_route_priority(self, route: ServerRoute) -> ServerRoute: """ Assign the new_route's priority to be current max + 1 Args: - route (NodeRoute): The new route whose priority is to be updated. + route (ServerRoute): The new route whose priority is to be updated. Returns: - NodeRoute: The new route with the updated priority + ServerRoute: The new route with the updated priority """ - current_max_priority: int = max(route.priority for route in self.node_routes) + current_max_priority: int = max(route.priority for route in self.server_routes) route.priority = current_max_priority + 1 return route - def pick_highest_priority_route(self, oldest: bool = True) -> NodeRoute: + def pick_highest_priority_route(self, oldest: bool = True) -> ServerRoute: """ - Picks the route with the highest priority from the list of node routes. + Picks the route with the highest priority from the list of server routes. Args: oldest (bool): @@ -138,11 +118,11 @@ def pick_highest_priority_route(self, oldest: bool = True) -> NodeRoute: meaning the route with max priority value. Returns: - NodeRoute: The route with the highest priority. + ServerRoute: The route with the highest priority. """ - highest_priority_route: NodeRoute = self.node_routes[-1] - for route in self.node_routes[:-1]: + highest_priority_route: ServerRoute = self.server_routes[-1] + for route in self.server_routes[:-1]: if oldest: if route.priority < highest_priority_route.priority: highest_priority_route = route @@ -151,34 +131,34 @@ def pick_highest_priority_route(self, oldest: bool = True) -> NodeRoute: highest_priority_route = route return highest_priority_route - def update_route(self, route: NodeRoute) -> None: + def update_route(self, route: ServerRoute) -> None: """ - Update the route for the node. + Update the route for the server. If the route already exists, return it. If the route is new, assign it to have the priority of (current_max + 1) Args: - route (NodeRoute): The new route to be added to the peer. + route (ServerRoute): The new route to be added to the peer. """ existed, idx = self.existed_route(route) if existed: - self.node_routes[idx] = route # type: ignore + self.server_routes[idx] = route # type: ignore else: new_route = self.update_route_priority(route) - self.node_routes.append(new_route) + self.server_routes.append(new_route) - def update_routes(self, new_routes: list[NodeRoute]) -> None: + def update_routes(self, new_routes: list[ServerRoute]) -> None: """ - Update multiple routes of the node peer. + Update multiple routes of the server peer. This method takes a list of new routes as input. It first updates the priorities of the new routes. - Then, for each new route, it checks if the route already exists for the node peer. + Then, for each new route, it checks if the route already exists for the server peer. If it does, it updates the priority of the existing route. - If it doesn't, it adds the new route to the node. + If it doesn't, it adds the new route to the server. Args: - new_routes (list[NodeRoute]): The new routes to be added to the node. + new_routes (list[ServerRoute]): The new routes to be added to the server. Returns: None @@ -187,18 +167,18 @@ def update_routes(self, new_routes: list[NodeRoute]) -> None: self.update_route(new_route) def update_existed_route_priority( - self, route: NodeRoute, priority: int | None = None - ) -> NodeRouteType | SyftError: + self, route: ServerRoute, priority: int | None = None + ) -> ServerRouteType | SyftError: """ Update the priority of an existed route. Args: - route (NodeRoute): The route whose priority is to be updated. + route (ServerRoute): The route whose priority is to be updated. priority (int | None): The new priority of the route. If not given, the route will be assigned with the highest priority. Returns: - NodeRoute: The route with updated priority if the route exists + ServerRoute: The route with updated priority if the route exists SyftError: If the route does not exist or the priority is invalid """ if priority is not None and priority <= 0: @@ -212,63 +192,61 @@ def update_existed_route_priority( return SyftError(message=f"Route with id {route.id} does not exist.") if priority is not None: - self.node_routes[index].priority = priority + self.server_routes[index].priority = priority else: - self.node_routes[index].priority = self.update_route_priority( + self.server_routes[index].priority = self.update_route_priority( route ).priority - return self.node_routes[index] + return self.server_routes[index] @staticmethod - def from_client(client: SyftClient) -> "NodePeer": + def from_client(client: SyftClient) -> "ServerPeer": if not client.metadata: raise ValueError("Client has to have metadata first") - peer = client.metadata.to(NodeMetadata).to(NodePeer) + peer = client.metadata.to(ServerMetadata).to(ServerPeer) route = connection_to_route(client.connection) - peer.node_routes.append(route) + peer.server_routes.append(route) return peer @property - def latest_added_route(self) -> NodeRoute | None: + def latest_added_route(self) -> ServerRoute | None: """ - Returns the latest added route from the list of node routes. + Returns the latest added route from the list of server routes. Returns: - NodeRoute | None: The latest added route, or None if there are no routes. + ServerRoute | None: The latest added route, or None if there are no routes. """ - return self.node_routes[-1] if self.node_routes else None + return self.server_routes[-1] if self.server_routes else None def client_with_context( - self, context: NodeServiceContext + self, context: ServerServiceContext ) -> Result[type[SyftClient], str]: # third party - if len(self.node_routes) < 1: + if len(self.server_routes) < 1: raise ValueError(f"No routes to peer: {self}") # select the route with highest priority to connect to the peer - final_route: NodeRoute = self.pick_highest_priority_route() - connection: NodeConnection = route_to_connection(route=final_route) + final_route: ServerRoute = self.pick_highest_priority_route() + connection: ServerConnection = route_to_connection(route=final_route) try: client_type = connection.get_client_type() except Exception as e: - msg = ( - f"Failed to establish a connection with {self.node_type} '{self.name}'" - ) + msg = f"Failed to establish a connection with {self.server_type} '{self.name}'" logger.error(msg, exc_info=e) return Err(msg) if isinstance(client_type, SyftError): return Err(client_type.message) return Ok( - client_type(connection=connection, credentials=context.node.signing_key) + client_type(connection=connection, credentials=context.server.signing_key) ) def client_with_key(self, credentials: SyftSigningKey) -> SyftClient | SyftError: - if len(self.node_routes) < 1: + if len(self.server_routes) < 1: raise ValueError(f"No routes to peer: {self}") - final_route: NodeRoute = self.pick_highest_priority_route() + final_route: ServerRoute = self.pick_highest_priority_route() connection = route_to_connection(route=final_route) client_type = connection.get_client_type() @@ -285,26 +263,26 @@ def guest_client(self) -> SyftClient: def proxy_from(self, client: SyftClient) -> SyftClient: return client.proxy_to(self) - def get_rtunnel_route(self) -> HTTPNodeRoute | None: - for route in self.node_routes: + def get_rtunnel_route(self) -> HTTPServerRoute | None: + for route in self.server_routes: if hasattr(route, "rtunnel_token") and route.rtunnel_token: return route return None - def delete_route(self, route: NodeRouteType) -> SyftError | None: + def delete_route(self, route: ServerRouteType) -> SyftError | None: """ Deletes a route from the peer's route list. - Takes O(n) where is n is the number of routes in self.node_routes. + Takes O(n) where is n is the number of routes in self.server_routes. Args: - route (NodeRouteType): The route to be deleted; + route (ServerRouteType): The route to be deleted; Returns: - SyftError: If failing to delete node route + SyftError: If failing to delete server route """ if route: try: - self.node_routes = [r for r in self.node_routes if r != route] + self.server_routes = [r for r in self.server_routes if r != route] except Exception as e: return SyftError( message=f"Error deleting route with id {route.id}. Exception: {e}" @@ -314,15 +292,15 @@ def delete_route(self, route: NodeRouteType) -> SyftError | None: @serializable() -class NodePeerUpdate(PartialSyftObject): - __canonical_name__ = "NodePeerUpdate" +class ServerPeerUpdate(PartialSyftObject): + __canonical_name__ = "ServerPeerUpdate" __version__ = SYFT_OBJECT_VERSION_1 id: UID name: str - node_routes: list[NodeRouteType] + server_routes: list[ServerRouteType] admin_email: str - ping_status: NodePeerConnectionStatus + ping_status: ServerPeerConnectionStatus ping_status_message: str pinged_timestamp: DateTime @@ -330,23 +308,13 @@ class NodePeerUpdate(PartialSyftObject): def drop_veilid_route() -> Callable: def _drop_veilid_route(context: TransformContext) -> TransformContext: if context.output: - node_routes = context.output["node_routes"] + server_routes = context.output["server_routes"] new_routes = [ - node_route - for node_route in node_routes - if not isinstance(node_route, VeilidNodeRoute) + server_route + for server_route in server_routes + if not isinstance(server_route, VeilidServerRoute) ] - context.output["node_routes"] = new_routes + context.output["server_routes"] = new_routes return context return _drop_veilid_route - - -@migrate(NodePeerV2, NodePeer) -def upgrade_node_peer() -> list[Callable]: - return [drop_veilid_route()] - - -@migrate(NodePeerV2, NodePeer) -def downgrade_node_peer() -> list[Callable]: - return [] diff --git a/packages/syft/src/syft/service/network/utils.py b/packages/syft/src/syft/service/network/utils.py index 476411bc6e6..af16cc53c02 100644 --- a/packages/syft/src/syft/service/network/utils.py +++ b/packages/syft/src/syft/service/network/utils.py @@ -1,24 +1,36 @@ # stdlib +from enum import Enum +import itertools import logging import threading import time from typing import cast +# third party +from rich.box import DOUBLE_EDGE +from rich.console import Console +from rich.table import Table +from rich.text import Text + # relative +from ...client.client import SyftClient from ...serde.serializable import serializable from ...types.datetime import DateTime from ..context import AuthedServiceContext +from ..request.request import Request from ..response import SyftError +from ..response import SyftSuccess +from ..user.user_roles import ServiceRole from .network_service import NetworkService -from .network_service import NodePeerAssociationStatus -from .node_peer import NodePeer -from .node_peer import NodePeerConnectionStatus -from .node_peer import NodePeerUpdate +from .network_service import ServerPeerAssociationStatus +from .server_peer import ServerPeer +from .server_peer import ServerPeerConnectionStatus +from .server_peer import ServerPeerUpdate logger = logging.getLogger(__name__) -@serializable(without=["thread"]) +@serializable(without=["thread"], canonical_name="PeerHealthCheckTask", version=1) class PeerHealthCheckTask: repeat_time = 10 # in seconds @@ -40,19 +52,21 @@ def peer_route_heathcheck(self, context: AuthedServiceContext) -> SyftError | No None """ - network_service = cast(NetworkService, context.node.get_service(NetworkService)) + network_service = cast( + NetworkService, context.server.get_service(NetworkService) + ) network_stash = network_service.stash - result = network_stash.get_all(context.node.verify_key) + result = network_stash.get_all(context.server.verify_key) if result.is_err(): logger.error(f"Failed to fetch peers from stash: {result.err()}") return SyftError(message=f"{result.err()}") - all_peers: list[NodePeer] = result.ok() + all_peers: list[ServerPeer] = result.ok() for peer in all_peers: - peer_update = NodePeerUpdate(id=peer.id) + peer_update = ServerPeerUpdate(id=peer.id) peer_update.pinged_timestamp = DateTime.now() try: peer_client = peer.client_with_context(context=context) @@ -60,23 +74,23 @@ def peer_route_heathcheck(self, context: AuthedServiceContext) -> SyftError | No logger.error( f"Failed to create client for peer: {peer}: {peer_client.err()}" ) - peer_update.ping_status = NodePeerConnectionStatus.TIMEOUT + peer_update.ping_status = ServerPeerConnectionStatus.TIMEOUT peer_client = None except Exception as e: logger.error(f"Failed to create client for peer: {peer}", exc_info=e) - peer_update.ping_status = NodePeerConnectionStatus.TIMEOUT + peer_update.ping_status = ServerPeerConnectionStatus.TIMEOUT peer_client = None if peer_client is not None: peer_client = peer_client.ok() peer_status = peer_client.api.services.network.check_peer_association( - peer_id=context.node.id + peer_id=context.server.id ) peer_update.ping_status = ( - NodePeerConnectionStatus.ACTIVE - if peer_status == NodePeerAssociationStatus.PEER_ASSOCIATED - else NodePeerConnectionStatus.INACTIVE + ServerPeerConnectionStatus.ACTIVE + if peer_status == ServerPeerAssociationStatus.PEER_ASSOCIATED + else ServerPeerConnectionStatus.INACTIVE ) if isinstance(peer_status, SyftError): peer_update.ping_status_message = ( @@ -89,7 +103,7 @@ def peer_route_heathcheck(self, context: AuthedServiceContext) -> SyftError | No ) result = network_stash.update( - credentials=context.node.verify_key, + credentials=context.server.verify_key, peer_update=peer_update, has_permission=True, ) @@ -128,3 +142,121 @@ def stop(self) -> None: self.thread = None self.started_time = None logger.info("Peer health check task stopped.") + + +def exchange_routes(clients: list[SyftClient], auto_approve: bool = False) -> None: + """Exchange routes between a list of clients.""" + # Rich Table + console = Console() + + table = Table( + show_header=True, header_style="bold magenta", box=DOUBLE_EDGE, pad_edge=False + ) + table.add_column("Connect To", style="black", width=25, overflow="fold") + table.add_column("Connect From", style="black", width=25, overflow="fold") + table.add_column("Status", style="black", width=25, overflow="fold") + + if auto_approve: + # Check that all clients are admin clients + for client in clients: + if not client.user_role == ServiceRole.ADMIN: + return SyftError( + message=f"Client {client} is not an admin client. " + "Only admin clients can auto-approve connection requests." + ) + + for client1, client2 in itertools.combinations(clients, 2): + peer1 = ServerPeer.from_client(client1) + peer2 = ServerPeer.from_client(client2) + + client1_connection_request = client1.api.services.network.add_peer(peer2) + if isinstance(client1_connection_request, SyftError): + return SyftError( + message=f"Failed to add peer {peer2} to {client1}: {client1_connection_request}" + ) + + client2_connection_request = client2.api.services.network.add_peer(peer1) + if isinstance(client2_connection_request, SyftError): + return SyftError( + message=f"Failed to add peer {peer1} to {client2}: {client2_connection_request}" + ) + + if auto_approve: + if isinstance(client1_connection_request, Request): + res1 = client1_connection_request.approve() + if isinstance(res1, SyftError): + return SyftError( + message=f"Failed to approve connection request between {client1} and {client2}: {res1}" + ) + if isinstance(client2_connection_request, Request): + res2 = client2_connection_request.approve() + if isinstance(res2, SyftError): + return SyftError( + message=f"Failed to approve connection request between {client2} and {client1}: {res2}" + ) + table.add_row( + Text(f"{client1.name}-{client1.id.short()}", no_wrap=False), + f"{client2.name}-{client2.id.short()}", + "Connected ✅", + ) + table.add_row( + f"{client2.name}-{client2.id.short()}", + f"{client1.name}-{client1.id.short()}", + "Connected ✅", + ) + else: + client1_res = ( + "Connected ✅" + if isinstance(client1_connection_request, SyftSuccess) + else "Request Sent 📨" + ) + client2_res = ( + "Connected ✅" + if isinstance(client2_connection_request, SyftSuccess) + else "Request Sent 📨" + ) + table.add_row( + Text(f"{client1.name}-{client1.id.short()}", no_wrap=False), + Text(f"{client2.name}-{client2.id.short()}", no_wrap=False), + client1_res, + ) + table.add_row( + Text(f"{client2.name}-{client2.id.short()}", no_wrap=False), + Text(f"{client1.name}-{client1.id.short()}", no_wrap=False), + client2_res, + ) + + console.print(table) + + +class NetworkTopology(Enum): + STAR = "STAR" + MESH = "MESH" + HYBRID = "HYBRID" + + +def check_route_reachability( + clients: list[SyftClient], topology: NetworkTopology = NetworkTopology.MESH +) -> SyftSuccess | SyftError: + if topology == NetworkTopology.STAR: + return SyftError(message="STAR topology is not supported yet") + elif topology == NetworkTopology.MESH: + return check_mesh_topology(clients) + else: + return SyftError(message=f"Invalid topology: {topology}") + + +def check_mesh_topology(clients: list[SyftClient]) -> SyftSuccess | SyftError: + for client in clients: + for other_client in clients: + if client == other_client: + continue + result = client.api.services.network.ping_peer( + verify_key=other_client.root_verify_key + ) + if isinstance(result, SyftError): + return SyftError( + message=f"{client.name}-<{client.id}> - cannot reach" + + f"{other_client.name}-<{other_client.id} - {result.message}" + ) + return SyftSuccess(message="All clients are reachable") diff --git a/packages/syft/src/syft/service/notification/email_templates.py b/packages/syft/src/syft/service/notification/email_templates.py index 5d32025b1fd..f8baceee38a 100644 --- a/packages/syft/src/syft/service/notification/email_templates.py +++ b/packages/syft/src/syft/service/notification/email_templates.py @@ -3,6 +3,7 @@ from typing import cast # relative +from ...serde.serializable import serializable from ...store.linked_obj import LinkedObject from ..context import AuthedServiceContext @@ -21,14 +22,15 @@ def email_body(notification: "Notification", context: AuthedServiceContext) -> s return "" +@serializable(canonical_name="OnboardEmailTemplate", version=1) class OnBoardEmailTemplate(EmailTemplate): @staticmethod def email_title(notification: "Notification", context: AuthedServiceContext) -> str: - return f"Welcome to {context.node.name} node!" + return f"Welcome to {context.server.name} server!" @staticmethod def email_body(notification: "Notification", context: AuthedServiceContext) -> str: - user_service = context.node.get_service("userservice") + user_service = context.server.get_service("userservice") admin_name = user_service.get_by_verify_key( user_service.admin_verify_key() ).name @@ -36,7 +38,7 @@ def email_body(notification: "Notification", context: AuthedServiceContext) -> s head = ( f""" - Welcome to {context.node.name} + Welcome to {context.server.name} """ + """
- Logo -

Welcome to $domain_name

+

Welcome to $datasite_name

- URL: $node_url
- Node Description: $description
- Node Type: $node_type
- Node Side Type:$node_side_type
- Syft Version: $node_version
+ URL: $server_url
+ Server Description: $description
+ Server Type: $server_type
+ Server Side Type:$server_side_type
+ Syft Version: $server_version
ⓘ  - This domain is run by the library PySyft to learn more about how it works visit + This datasite is run by the library PySyft to learn more about how it works visit github.com/OpenMined/PySyft.

Commands to Get Started

diff --git a/packages/syft/src/syft/util/table.py b/packages/syft/src/syft/util/table.py index 611ca5e33a2..8bfd88a4f58 100644 --- a/packages/syft/src/syft/util/table.py +++ b/packages/syft/src/syft/util/table.py @@ -18,7 +18,12 @@ def _syft_in_mro(self: Any, item: Any) -> bool: if hasattr(type(item), "mro") and type(item) != type: - mro = type(item).mro() + # if unbound method, supply self + if hasattr(type(item).mro, "__self__"): + mro = type(item).mro() + else: + mro = type(item).mro(type(item)) # type: ignore + elif hasattr(item, "mro") and type(item) != type: mro = item.mro() else: @@ -55,6 +60,22 @@ def _create_table_rows( extra_fields: list | None = None, add_index: bool = True, ) -> list[dict[str, Any]]: + """ + Creates row data for a table based on input object obj. + + If valid table data cannot be created, an empty list is returned. + + Args: + _self (Mapping | Iterable): The input data as a Mapping or Iterable. + is_homogenous (bool): A boolean indicating whether the data is homogenous. + extra_fields (list | None, optional): Additional fields to include in the table. Defaults to None. + add_index (bool, optional): Whether to add an index column. Defaults to True. + + Returns: + list[dict[str, Any]]: A list of dictionaries where each dictionary represents a row in the table. + + """ + if extra_fields is None: extra_fields = [] @@ -139,9 +160,8 @@ def _create_table_rows( col_lengths = {len(cols[col]) for col in cols.keys()} if len(col_lengths) != 1: - raise ValueError( - "Cannot create table for items with different number of fields." - ) + logger.debug("Cannot create table for items with different number of fields.") + return [] num_rows = col_lengths.pop() if add_index and TABLE_INDEX_KEY not in cols: @@ -194,19 +214,29 @@ def prepare_table_data( add_index: bool = True, ) -> tuple[list[dict], dict]: """ - Returns table_data, table_metadata + Creates table data and metadata for a given object. + + If a tabular representation cannot be created, an empty list and empty dict are returned instead. + + Args: + obj (Any): The input object for which table data is prepared. + add_index (bool, optional): Whether to add an index column to the table. Defaults to True. + + Returns: + tuple: A tuple (table_data, table_metadata) where table_data is a list of dictionaries + where each dictionary represents a row in the table and table_metadata is a dictionary + containing metadata about the table such as name, icon, etc. - table_data is a list of dictionaries where each dictionary represents a row in the table. - table_metadata is a dictionary containing metadata about the table such as name, icon, etc. """ values = _get_values_for_table_repr(obj) if len(values) == 0: return [], {} + # check first value and obj itself to see if syft in mro. If not, don't create table first_value = values[0] if not _syft_in_mro(obj, first_value): - raise ValueError("Cannot create table for Non-syft objects.") + return [], {} extra_fields = getattr(first_value, "__repr_attrs__", []) is_homogenous = len({type(x) for x in values}) == 1 @@ -228,6 +258,10 @@ def prepare_table_data( extra_fields=extra_fields, add_index=add_index, ) + # if empty result, collection objects have no table representation + if not table_data: + return [], {} + table_data = _sort_table_rows(table_data, sort_key) table_metadata = { diff --git a/packages/syft/src/syft/util/update_commit.py b/packages/syft/src/syft/util/update_commit.py new file mode 100644 index 00000000000..359120c37c2 --- /dev/null +++ b/packages/syft/src/syft/util/update_commit.py @@ -0,0 +1,48 @@ +# stdlib +import os +import subprocess # nosec +import sys + + +def get_commit_hash() -> str: + cwd = os.path.dirname(os.path.abspath(__file__)) + try: + output = subprocess.check_output( + "git rev-parse --short HEAD".split(" "), + cwd=cwd, # nosec + ) + return output.strip().decode("ascii") + except subprocess.CalledProcessError as e: + print(f"Error getting commit hash: {e}") + sys.exit(1) + + +def update_commit_variable(file_path: str, commit_hash: str) -> None: + """Replace the __commit__ variable with the actual commit hash.""" + try: + with open(file_path) as file: + lines = file.readlines() + + with open(file_path, "w") as file: + updated = False + for line in lines: + if "__commit__ = " in line: + file.write(f'__commit__ = "{commit_hash}"\n') + updated = True + else: + file.write(line) + if not updated: + print("No __commit__ variable found in the file.") + except OSError as e: + print(f"Error reading or writing file: {e}") + sys.exit(1) + + +if __name__ == "__main__": + if len(sys.argv) != 2: + print("Usage: python update_commit.py ") + sys.exit(1) + + file_path = sys.argv[1] + commit_hash = get_commit_hash() + update_commit_variable(file_path, commit_hash) diff --git a/packages/syft/tests/conftest.py b/packages/syft/tests/conftest.py index 2d781f817d7..7deec7e632c 100644 --- a/packages/syft/tests/conftest.py +++ b/packages/syft/tests/conftest.py @@ -10,16 +10,18 @@ # third party from faker import Faker +import numpy as np import pytest # syft absolute import syft as sy -from syft.abstract_node import NodeSideType -from syft.client.domain_client import DomainClient -from syft.node.worker import Worker +from syft import Dataset +from syft.abstract_server import ServerSideType +from syft.client.datasite_client import DatasiteClient from syft.protocol.data_protocol import get_data_protocol from syft.protocol.data_protocol import protocol_release_dir from syft.protocol.data_protocol import stage_protocol_changes +from syft.server.worker import Worker from syft.service.user import user # relative @@ -113,10 +115,11 @@ def stage_protocol(protocol_file: Path): dp.save_history(dp.protocol_history) # Cleanup release dir, remove unused released files - for _file_path in protocol_release_dir().iterdir(): - for version in dp.read_json(_file_path): - if version not in dp.protocol_history.keys(): - _file_path.unlink() + if os.path.exists(protocol_release_dir()): + for _file_path in protocol_release_dir().iterdir(): + for version in dp.read_json(_file_path): + if version not in dp.protocol_history.keys(): + _file_path.unlink() @pytest.fixture @@ -134,7 +137,7 @@ def worker() -> Worker: @pytest.fixture(scope="function") def second_worker() -> Worker: - # Used in node syncing tests + # Used in server syncing tests worker = sy.Worker.named(name=token_hex(8)) yield worker worker.cleanup() @@ -143,7 +146,9 @@ def second_worker() -> Worker: @pytest.fixture(scope="function") def high_worker() -> Worker: - worker = sy.Worker.named(name=token_hex(8), node_side_type=NodeSideType.HIGH_SIDE) + worker = sy.Worker.named( + name=token_hex(8), server_side_type=ServerSideType.HIGH_SIDE + ) yield worker worker.cleanup() del worker @@ -151,14 +156,16 @@ def high_worker() -> Worker: @pytest.fixture(scope="function") def low_worker() -> Worker: - worker = sy.Worker.named(name=token_hex(8), node_side_type=NodeSideType.LOW_SIDE) + worker = sy.Worker.named( + name=token_hex(8), server_side_type=ServerSideType.LOW_SIDE + ) yield worker worker.cleanup() del worker @pytest.fixture -def root_domain_client(worker) -> DomainClient: +def root_datasite_client(worker) -> DatasiteClient: yield worker.root_client @@ -168,7 +175,7 @@ def root_verify_key(worker): @pytest.fixture -def guest_client(worker) -> DomainClient: +def guest_client(worker) -> DatasiteClient: yield worker.guest_client @@ -178,8 +185,8 @@ def guest_verify_key(worker): @pytest.fixture -def guest_domain_client(root_domain_client) -> DomainClient: - yield root_domain_client.guest() +def guest_datasite_client(root_datasite_client) -> DatasiteClient: + yield root_datasite_client.guest() @pytest.fixture @@ -246,6 +253,39 @@ def patched_user(monkeypatch): ) +@pytest.fixture +def small_dataset() -> Dataset: + dataset = Dataset( + name="small_dataset", + asset_list=[ + sy.Asset( + name="small_dataset", + data=np.array([1, 2, 3]), + mock=np.array([1, 1, 1]), + ) + ], + ) + yield dataset + + +@pytest.fixture +def big_dataset() -> Dataset: + num_elements = 20 * 1024 * 1024 + data_big = np.random.randint(0, 100, size=num_elements) + mock_big = np.random.randint(0, 100, size=num_elements) + dataset = Dataset( + name="big_dataset", + asset_list=[ + sy.Asset( + name="big_dataset", + data=data_big, + mock=mock_big, + ) + ], + ) + yield dataset + + __all__ = [ "mongo_store_partition", "mongo_document_store", diff --git a/packages/syft/tests/syft/action_test.py b/packages/syft/tests/syft/action_test.py index 79a6d115644..c7829daa9d3 100644 --- a/packages/syft/tests/syft/action_test.py +++ b/packages/syft/tests/syft/action_test.py @@ -9,10 +9,9 @@ # syft absolute from syft import ActionObject from syft.client.api import SyftAPICall -from syft.node.worker import Worker +from syft.server.worker import Worker from syft.service.action.action_object import Action from syft.service.response import SyftError -from syft.service.user.user import UserUpdate from syft.service.user.user_roles import ServiceRole from syft.types.uid import LineageID @@ -20,12 +19,13 @@ from ..utils.custom_markers import currently_fail_on_python_3_12 +@pytest.mark.skip(reason="Disabled until we bring back eager execution") def test_actionobject_method(worker): - root_domain_client = worker.root_client - assert root_domain_client.settings.enable_eager_execution(enable=True) + root_datasite_client = worker.root_client + assert root_datasite_client.settings.enable_eager_execution(enable=True) action_store = worker.get_service("actionservice").store obj = ActionObject.from_obj("abc") - pointer = obj.send(root_domain_client) + pointer = obj.send(root_datasite_client) assert len(action_store.data) == 1 res = pointer.capitalize() assert len(action_store.data) == 2 @@ -58,12 +58,10 @@ def test_new_admin_has_action_object_permission( admin = root_client.login(email=email, password=pw) - root_client.api.services.user.update( - admin.me.id, UserUpdate(role=ServiceRole.ADMIN) - ) + root_client.api.services.user.update(uid=admin.account.id, role=ServiceRole.ADMIN) if delete_original_admin: - res = root_client.api.services.user.delete(root_client.me.id) + res = root_client.api.services.user.delete(root_client.account.id) assert not isinstance(res, SyftError) assert admin.api.services.action.get(obj.id) == obj @@ -71,8 +69,8 @@ def test_new_admin_has_action_object_permission( @currently_fail_on_python_3_12(raises=AttributeError) def test_lib_function_action(worker): - root_domain_client = worker.root_client - numpy_client = root_domain_client.api.lib.numpy + root_datasite_client = worker.root_client + numpy_client = root_datasite_client.api.lib.numpy res = numpy_client.zeros_like([1, 2, 3]) assert isinstance(res, ActionObject) @@ -81,13 +79,13 @@ def test_lib_function_action(worker): def test_call_lib_function_action2(worker): - root_domain_client = worker.root_client - assert root_domain_client.api.lib.numpy.add(1, 2) == 3 + root_datasite_client = worker.root_client + assert root_datasite_client.api.lib.numpy.add(1, 2) == 3 def test_lib_class_init_action(worker): - root_domain_client = worker.root_client - numpy_client = root_domain_client.api.lib.numpy + root_datasite_client = worker.root_client + numpy_client = root_datasite_client.api.lib.numpy res = numpy_client.float32(4.0) assert isinstance(res, ActionObject) @@ -96,9 +94,9 @@ def test_lib_class_init_action(worker): def test_call_lib_wo_permission(worker): - root_domain_client = worker.root_client + root_datasite_client = worker.root_client fname = ActionObject.from_obj("my_fake_file") - obj1_pointer = fname.send(root_domain_client) + obj1_pointer = fname.send(root_datasite_client) action = Action( path="numpy", op="fromfile", @@ -108,17 +106,17 @@ def test_call_lib_wo_permission(worker): ) kwargs = {"action": action} api_call = SyftAPICall( - node_uid=worker.id, path="action.execute", args=[], kwargs=kwargs + server_uid=worker.id, path="action.execute", args=[], kwargs=kwargs ) - res = root_domain_client.api.make_call(api_call) + res = root_datasite_client.api.make_call(api_call) assert isinstance(res, SyftError) def test_call_lib_custom_signature(worker): - root_domain_client = worker.root_client + root_datasite_client = worker.root_client # concatenate has a manually set signature assert all( - root_domain_client.api.lib.numpy.concatenate( + root_datasite_client.api.lib.numpy.concatenate( ([1, 2, 3], [4, 5, 6]) ).syft_action_data == np.array([1, 2, 3, 4, 5, 6]) @@ -159,7 +157,7 @@ def test_call_lib_custom_signature(worker): # return action_service_execute_method(context, action) # with mock.patch( -# "syft.core.node.new.action_object.ActionObjectPointer.execute_action", mock_func +# "syft.core.server.new.action_object.ActionObjectPointer.execute_action", mock_func # ): # result = pointer1 + pointer2 diff --git a/packages/syft/tests/syft/api_test.py b/packages/syft/tests/syft/api_test.py index 180060caffb..fd3d9f7339b 100644 --- a/packages/syft/tests/syft/api_test.py +++ b/packages/syft/tests/syft/api_test.py @@ -8,12 +8,11 @@ # syft absolute import syft as sy from syft.service.response import SyftAttributeError -from syft.service.user.user import UserUpdate from syft.service.user.user_roles import ServiceRole def test_api_cache_invalidation(worker): - root_domain_client = worker.root_client + root_datasite_client = worker.root_client dataset = sy.Dataset( name="test", asset_list=[ @@ -25,8 +24,8 @@ def test_api_cache_invalidation(worker): ) ], ) - root_domain_client.upload_dataset(dataset) - asset = root_domain_client.datasets[0].assets[0] + root_datasite_client.upload_dataset(dataset) + asset = root_datasite_client.datasets[0].assets[0] @sy.syft_function( input_policy=sy.ExactMatch(x=asset), @@ -35,9 +34,9 @@ def test_api_cache_invalidation(worker): def my_func(x): return x + 1 - assert root_domain_client.code.request_code_execution(my_func) + assert root_datasite_client.code.request_code_execution(my_func) # check that function is added to api without refreshing the api manually - assert isinstance(root_domain_client.code.my_func, Callable) + assert isinstance(root_datasite_client.code.my_func, Callable) def test_api_cache_invalidation_login(root_verify_key, worker): @@ -62,12 +61,10 @@ def get_role(verify_key): with pytest.raises(SyftAttributeError): assert guest_client.upload_dataset(dataset) - assert guest_client.api.services.user.update( - user_id, UserUpdate(user_id=user_id, name="abcdef") - ) + assert guest_client.api.services.user.update(uid=user_id, name="abcdef") assert worker.root_client.api.services.user.update( - user_id, UserUpdate(user_id=user_id, role=ServiceRole.DATA_OWNER) + uid=user_id, role=ServiceRole.DATA_OWNER ) assert get_role(guest_client.credentials.verify_key) == ServiceRole.DATA_OWNER diff --git a/packages/syft/tests/syft/assets_test.py b/packages/syft/tests/syft/assets_test.py index 498094b9300..acc2297f1df 100644 --- a/packages/syft/tests/syft/assets_test.py +++ b/packages/syft/tests/syft/assets_test.py @@ -9,7 +9,7 @@ def test_load_assets(): - png = load_png_base64("logo.png") + png = load_png_base64("small-syft-symbol-logo.png") assert isinstance(png, str) with pytest.raises(FileNotFoundError): diff --git a/packages/syft/tests/syft/blob_storage/blob_storage_test.py b/packages/syft/tests/syft/blob_storage/blob_storage_test.py index 1889004a47e..ebcaf0235eb 100644 --- a/packages/syft/tests/syft/blob_storage/blob_storage_test.py +++ b/packages/syft/tests/syft/blob_storage/blob_storage_test.py @@ -9,7 +9,9 @@ # syft absolute import syft as sy from syft import ActionObject -from syft.client.domain_client import DomainClient +from syft import Dataset +from syft import Worker +from syft.client.datasite_client import DatasiteClient from syft.service.blob_storage.util import can_upload_to_blob_storage from syft.service.blob_storage.util import min_size_for_blob_storage_upload from syft.service.context import AuthedServiceContext @@ -25,7 +27,7 @@ @pytest.fixture def authed_context(worker): - yield AuthedServiceContext(node=worker, credentials=worker.signing_key.verify_key) + yield AuthedServiceContext(server=worker, credentials=worker.signing_key.verify_key) @pytest.fixture(scope="function") @@ -45,7 +47,7 @@ def test_blob_storage_write(): worker = sy.Worker.named(name=name) blob_storage = worker.get_service("BlobStorageService") authed_context = AuthedServiceContext( - node=worker, credentials=worker.signing_key.verify_key + server=worker, credentials=worker.signing_key.verify_key ) blob_data = CreateBlobStorageEntry.from_obj(data) blob_deposit = blob_storage.allocate(authed_context, blob_data) @@ -63,7 +65,7 @@ def test_blob_storage_write_syft_object(): worker = sy.Worker.named(name=name) blob_storage = worker.get_service("BlobStorageService") authed_context = AuthedServiceContext( - node=worker, credentials=worker.signing_key.verify_key + server=worker, credentials=worker.signing_key.verify_key ) blob_data = CreateBlobStorageEntry.from_obj(data) blob_deposit = blob_storage.allocate(authed_context, blob_data) @@ -81,7 +83,7 @@ def test_blob_storage_read(): worker = sy.Worker.named(name=name) blob_storage = worker.get_service("BlobStorageService") authed_context = AuthedServiceContext( - node=worker, credentials=worker.signing_key.verify_key + server=worker, credentials=worker.signing_key.verify_key ) blob_data = CreateBlobStorageEntry.from_obj(data) blob_deposit = blob_storage.allocate(authed_context, blob_data) @@ -111,13 +113,13 @@ def test_action_obj_send_save_to_blob_storage(worker): data_small: np.ndarray = np.array([1, 2, 3]) action_obj = ActionObject.from_obj(data_small) assert action_obj.dtype == data_small.dtype - root_client: DomainClient = worker.root_client + root_client: DatasiteClient = worker.root_client action_obj.send(root_client) assert action_obj.syft_blob_storage_entry_id is None # big object that should be saved to blob storage assert min_size_for_blob_storage_upload(root_client.api.metadata) == 16 - num_elements = 50 * 1024 * 1024 + num_elements = 20 * 1024 * 1024 data_big = np.random.randint(0, 100, size=num_elements) # 4 bytes per int32 action_obj_2 = ActionObject.from_obj(data_big) assert can_upload_to_blob_storage(action_obj_2, root_client.api.metadata) @@ -125,7 +127,7 @@ def test_action_obj_send_save_to_blob_storage(worker): assert isinstance(action_obj_2.syft_blob_storage_entry_id, sy.UID) # get back the object from blob storage to check if it is the same root_authed_ctx = AuthedServiceContext( - node=worker, credentials=root_client.verify_key + server=worker, credentials=root_client.verify_key ) blob_storage = worker.get_service("BlobStorageService") syft_retrieved_data = blob_storage.read( @@ -135,44 +137,14 @@ def test_action_obj_send_save_to_blob_storage(worker): assert all(syft_retrieved_data.read() == data_big) -def test_upload_dataset_save_to_blob_storage(worker): - root_client: DomainClient = worker.root_client - root_authed_ctx = AuthedServiceContext( - node=worker, credentials=root_client.verify_key - ) - dataset = sy.Dataset( - name="small_dataset", - asset_list=[ - sy.Asset( - name="small_dataset", - data=np.array([1, 2, 3]), - mock=np.array([1, 1, 1]), - ) - ], - ) - root_client.upload_dataset(dataset) - blob_storage = worker.get_service("BlobStorageService") - assert len(blob_storage.get_all_blob_storage_entries(context=root_authed_ctx)) == 0 - - num_elements = 50 * 1024 * 1024 - data_big = np.random.randint(0, 100, size=num_elements) - dataset_big = sy.Dataset( - name="big_dataset", - asset_list=[ - sy.Asset( - name="big_dataset", - data=data_big, - mock=np.array([1, 1, 1]), - ) - ], - ) - root_client.upload_dataset(dataset_big) - # the private data should be saved to the blob storage - blob_entries: list = blob_storage.get_all_blob_storage_entries( - context=root_authed_ctx - ) - assert len(blob_entries) == 1 - data_big_retrieved: SyftObjectRetrieval = blob_storage.read( - context=root_authed_ctx, uid=blob_entries[0].id - ) - assert all(data_big_retrieved.read() == data_big) +def test_upload_dataset_save_to_blob_storage( + worker: Worker, big_dataset: Dataset, small_dataset: Dataset +) -> None: + root_client: DatasiteClient = worker.root_client + # the small dataset should not be saved to the blob storage + root_client.upload_dataset(small_dataset) + assert len(root_client.api.services.blob_storage.get_all()) == 0 + + # the big dataset should be saved to the blob storage + root_client.upload_dataset(big_dataset) + assert len(root_client.api.services.blob_storage.get_all()) == 2 diff --git a/packages/syft/tests/syft/custom_worker/config_test.py b/packages/syft/tests/syft/custom_worker/config_test.py index 76a353e2d3b..3777be6e222 100644 --- a/packages/syft/tests/syft/custom_worker/config_test.py +++ b/packages/syft/tests/syft/custom_worker/config_test.py @@ -166,7 +166,7 @@ def test_load_custom_worker_config( DOCKER_METHODS = ["from_str", "from_path"] DOCKER_CONFIG_OPENDP = f""" - FROM openmined/grid-backend:{sy.__version__} + FROM openmined/syft-backend:{sy.__version__} RUN pip install opendp """ diff --git a/packages/syft/tests/syft/dataset/fixtures.py b/packages/syft/tests/syft/dataset/fixtures.py index ca57f7e9220..9c062e756bc 100644 --- a/packages/syft/tests/syft/dataset/fixtures.py +++ b/packages/syft/tests/syft/dataset/fixtures.py @@ -30,7 +30,7 @@ def mock_dataset_stash(document_store) -> DatasetStash: @pytest.fixture -def mock_asset(worker, root_domain_client) -> Asset: +def mock_asset(worker, root_datasite_client) -> Asset: # sometimes the access rights for client are overwritten # so we need to assing the root_client manually uploader = Contributor( @@ -44,18 +44,18 @@ def mock_asset(worker, root_domain_client) -> Asset: data=np.array([0, 1, 2, 3, 4]), mock=np.array([0, 1, 1, 1, 1]), mock_is_real=False, - node_uid=worker.id, + server_uid=worker.id, uploader=uploader, contributors=[uploader], - syft_node_location=worker.id, - syft_client_verify_key=root_domain_client.credentials.verify_key, + syft_server_location=worker.id, + syft_client_verify_key=root_datasite_client.credentials.verify_key, ) - node_transform_context = TransformContext( - node=worker, - credentials=root_domain_client.credentials.verify_key, + server_transform_context = TransformContext( + server=worker, + credentials=root_datasite_client.credentials.verify_key, obj=create_asset, ) - mock_asset = create_asset.to(Asset, context=node_transform_context) + mock_asset = create_asset.to(Asset, context=server_transform_context) yield mock_asset diff --git a/packages/syft/tests/syft/eager_test.py b/packages/syft/tests/syft/eager_test.py index 7640e295cdc..78f24d4e3da 100644 --- a/packages/syft/tests/syft/eager_test.py +++ b/packages/syft/tests/syft/eager_test.py @@ -1,5 +1,6 @@ # third party import numpy as np +import pytest # syft absolute from syft.service.action.action_object import ActionObject @@ -10,9 +11,12 @@ from ..utils.custom_markers import currently_fail_on_python_3_12 +@pytest.mark.skip(reason="Disabled until we bring back eager execution") def test_eager_permissions(worker, guest_client): - root_domain_client = worker.root_client - assert root_domain_client.settings.enable_eager_execution(enable=True) + root_datasite_client = worker.root_client + + assert root_datasite_client.settings.enable_eager_execution(enable=True) + guest_client = worker.guest_client input_obj = TwinObject( @@ -20,11 +24,11 @@ def test_eager_permissions(worker, guest_client): mock_obj=np.array([[1, 1, 1], [1, 1, 1]]), ) - input_ptr = input_obj.send(root_domain_client) + input_ptr = input_obj.send(root_datasite_client) pointer = guest_client.api.services.action.get_pointer(input_ptr.id) - input_ptr = input_obj.send(root_domain_client) + input_ptr = input_obj.send(root_datasite_client) pointer = guest_client.api.services.action.get_pointer(input_ptr.id) @@ -32,13 +36,16 @@ def test_eager_permissions(worker, guest_client): res_guest = guest_client.api.services.action.get(flat_ptr.id) assert not isinstance(res_guest, ActionObject) - res_root = root_domain_client.api.services.action.get(flat_ptr.id) + res_root = root_datasite_client.api.services.action.get(flat_ptr.id) assert all(res_root == [3, 3, 3, 3, 3, 3]) +@pytest.mark.skip(reason="Disabled until we bring back eager execution") def test_plan(worker): - root_domain_client = worker.root_client - assert root_domain_client.settings.enable_eager_execution(enable=True) + root_datasite_client = worker.root_client + + assert root_datasite_client.settings.enable_eager_execution(enable=True) + guest_client = worker.guest_client @planify @@ -53,7 +60,7 @@ def my_plan(x=np.array([[2, 2, 2], [2, 2, 2]])): # noqa: B008 mock_obj=np.array([[1, 1, 1], [1, 1, 1]]), ) - input_ptr = input_obj.send(root_domain_client) + input_ptr = input_obj.send(root_datasite_client) pointer = guest_client.api.services.action.get_pointer(input_ptr.id) res_ptr = plan_ptr(x=pointer) @@ -65,7 +72,7 @@ def my_plan(x=np.array([[2, 2, 2], [2, 2, 2]])): # noqa: B008 # root can access result assert ( - root_domain_client.api.services.action.get(res_ptr.id) + root_datasite_client.api.services.action.get(res_ptr.id) == np.array([[3, 3, 3], [3, 3, 3]]).flatten().prod() ) @@ -73,15 +80,20 @@ def my_plan(x=np.array([[2, 2, 2], [2, 2, 2]])): # noqa: B008 res_ptr.request(guest_client) # root approves result - root_domain_client.api.services.request[-1].approve_with_client(root_domain_client) + root_datasite_client.api.services.request[-1].approve_with_client( + root_datasite_client + ) assert res_ptr.get_from(guest_client) == 729 +@pytest.mark.skip(reason="Disabled until we bring back eager execution") @currently_fail_on_python_3_12(raises=AttributeError) def test_plan_with_function_call(worker, guest_client): - root_domain_client = worker.root_client - assert root_domain_client.settings.enable_eager_execution(enable=True) + root_datasite_client = worker.root_client + + assert root_datasite_client.settings.enable_eager_execution(enable=True) + guest_client = worker.guest_client @planify @@ -96,16 +108,19 @@ def my_plan(x=np.array([[2, 2, 2], [2, 2, 2]])): # noqa: B008 mock_obj=np.array([[1, 1, 1], [1, 1, 1]]), ) - input_obj = input_obj.send(root_domain_client) + input_obj = input_obj.send(root_datasite_client) pointer = guest_client.api.services.action.get_pointer(input_obj.id) res_ptr = plan_ptr(x=pointer) - assert root_domain_client.api.services.action.get(res_ptr.id) == 18 + assert root_datasite_client.api.services.action.get(res_ptr.id) == 18 +@pytest.mark.skip(reason="Disabled until we bring back eager execution") def test_plan_with_object_instantiation(worker, guest_client): - root_domain_client = worker.root_client - assert root_domain_client.settings.enable_eager_execution(enable=True) + root_datasite_client = worker.root_client + + assert root_datasite_client.settings.enable_eager_execution(enable=True) + guest_client = worker.guest_client @planify @@ -118,20 +133,23 @@ def my_plan(x=np.array([1, 2, 3, 4, 5, 6])): # noqa: B008 private_obj=np.array([1, 2, 3, 4, 5, 6]), mock_obj=np.array([1, 1, 1, 1, 1, 1]) ) - _id = input_obj.send(root_domain_client).id + _id = input_obj.send(root_datasite_client).id pointer = guest_client.api.services.action.get_pointer(_id) res_ptr = plan_ptr(x=pointer) assert all( - root_domain_client.api.services.action.get(res_ptr.id).syft_action_data + root_datasite_client.api.services.action.get(res_ptr.id).syft_action_data == np.array([2, 3, 4, 5, 6, 7]) ) +@pytest.mark.skip(reason="Disabled until we bring back eager execution") def test_setattribute(worker, guest_client): - root_domain_client = worker.root_client - assert root_domain_client.settings.enable_eager_execution(enable=True) + root_datasite_client = worker.root_client + + assert root_datasite_client.settings.enable_eager_execution(enable=True) + guest_client = worker.guest_client private_data, mock_data = ( @@ -143,7 +161,7 @@ def test_setattribute(worker, guest_client): assert private_data.dtype != np.int32 - obj_pointer = obj.send(root_domain_client) + obj_pointer = obj.send(root_datasite_client) obj_pointer = guest_client.api.services.action.get_pointer(obj_pointer.id) original_id = obj_pointer.id @@ -155,7 +173,7 @@ def test_setattribute(worker, guest_client): assert obj_pointer.id.id in worker.action_store.data assert obj_pointer.id != original_id - res = root_domain_client.api.services.action.get(obj_pointer.id) + res = root_datasite_client.api.services.action.get(obj_pointer.id) # check if updated assert res.dtype == np.int32 @@ -168,9 +186,10 @@ def test_setattribute(worker, guest_client): assert not (obj_pointer.syft_action_data == private_data).all() +@pytest.mark.skip(reason="Disabled until we bring back eager execution") def test_getattribute(worker, guest_client): - root_domain_client = worker.root_client - assert root_domain_client.settings.enable_eager_execution(enable=True) + root_datasite_client = worker.root_client + assert root_datasite_client.settings.enable_eager_execution(enable=True) guest_client = worker.guest_client obj = TwinObject( @@ -178,18 +197,19 @@ def test_getattribute(worker, guest_client): mock_obj=np.array([[1, 1, 1], [1, 1, 1]]), ) - obj_pointer = obj.send(root_domain_client) + obj_pointer = obj.send(root_datasite_client) obj_pointer = guest_client.api.services.action.get_pointer(obj_pointer.id) size_pointer = obj_pointer.size # check result assert size_pointer.id.id in worker.action_store.data - assert root_domain_client.api.services.action.get(size_pointer.id) == 6 + assert root_datasite_client.api.services.action.get(size_pointer.id) == 6 +@pytest.mark.skip(reason="Disabled until we bring back eager execution") def test_eager_method(worker, guest_client): - root_domain_client = worker.root_client - assert root_domain_client.settings.enable_eager_execution(enable=True) + root_datasite_client = worker.root_client + assert root_datasite_client.settings.enable_eager_execution(enable=True) guest_client = worker.guest_client obj = TwinObject( @@ -197,7 +217,7 @@ def test_eager_method(worker, guest_client): mock_obj=np.array([[1, 1, 1], [1, 1, 1]]), ) - obj_pointer = obj.send(root_domain_client) + obj_pointer = obj.send(root_datasite_client) obj_pointer = guest_client.api.services.action.get_pointer(obj_pointer.id) flat_pointer = obj_pointer.flatten() @@ -205,14 +225,15 @@ def test_eager_method(worker, guest_client): assert flat_pointer.id.id in worker.action_store.data # check result assert all( - root_domain_client.api.services.action.get(flat_pointer.id) + root_datasite_client.api.services.action.get(flat_pointer.id) == np.array([1, 2, 3, 4, 5, 6]) ) +@pytest.mark.skip(reason="Disabled until we bring back eager execution") def test_eager_dunder_method(worker, guest_client): - root_domain_client = worker.root_client - assert root_domain_client.settings.enable_eager_execution(enable=True) + root_datasite_client = worker.root_client + assert root_datasite_client.settings.enable_eager_execution(enable=True) guest_client = worker.guest_client obj = TwinObject( @@ -220,7 +241,7 @@ def test_eager_dunder_method(worker, guest_client): mock_obj=np.array([[1, 1, 1], [1, 1, 1]]), ) - obj_pointer = obj.send(root_domain_client) + obj_pointer = obj.send(root_datasite_client) obj_pointer = guest_client.api.services.action.get_pointer(obj_pointer.id) first_row_pointer = obj_pointer[0] @@ -228,6 +249,6 @@ def test_eager_dunder_method(worker, guest_client): assert first_row_pointer.id.id in worker.action_store.data # check result assert all( - root_domain_client.api.services.action.get(first_row_pointer.id) + root_datasite_client.api.services.action.get(first_row_pointer.id) == np.array([1, 2, 3]) ) diff --git a/packages/syft/tests/syft/grid_url_test.py b/packages/syft/tests/syft/grid_url_test.py deleted file mode 100644 index ef93d15e783..00000000000 --- a/packages/syft/tests/syft/grid_url_test.py +++ /dev/null @@ -1,27 +0,0 @@ -# third party -import pytest - -# syft absolute -from syft.types.grid_url import GridURL - -test_suite = [ - ("http://0.0.0.0", 8081, "http://0.0.0.0:8081"), - ("http://0.0.0.0", None, "http://0.0.0.0:80"), - (None, None, "http://localhost:80"), - ("http://0.0.0.0:8081", 8082, "http://0.0.0.0:8081"), - ("0.0.0.0:8081", None, "http://0.0.0.0:8081"), - ("domainname.com", None, "http://domainname.com:80"), - ("https://domainname.com", None, "https://domainname.com:80"), -] - - -@pytest.mark.parametrize("url, port, ground_truth", test_suite) -def test_grid_url(url, port, ground_truth) -> None: - if not url and not port: - assert GridURL().base_url == ground_truth - elif not url: - assert GridURL(port=port).base_url == ground_truth - elif not port: - assert GridURL(host_or_ip=url).base_url == ground_truth - else: - assert GridURL(host_or_ip=url, port=port).base_url == ground_truth diff --git a/packages/syft/tests/syft/hash_test.py b/packages/syft/tests/syft/hash_test.py index df97de4a19e..bfdf99219fa 100644 --- a/packages/syft/tests/syft/hash_test.py +++ b/packages/syft/tests/syft/hash_test.py @@ -3,12 +3,16 @@ # syft absolute from syft.serde.serializable import serializable -from syft.types.syft_object import SYFT_OBJECT_VERSION_2 +from syft.types.syft_object import SYFT_OBJECT_VERSION_1 from syft.types.syft_object import SyftBaseObject from syft.types.syft_object import SyftHashableObject -@serializable(attrs=["key", "value", "flag"]) +@serializable( + attrs=["key", "value", "flag"], + canonical_name="MockObject", + version=1, +) class MockObject(SyftHashableObject): key: str value: str @@ -26,7 +30,7 @@ def __init__(self, key, value, flag=None): @serializable(attrs=["id", "data"]) class MockWrapper(SyftBaseObject, SyftHashableObject): __canonical_name__ = "MockWrapper" - __version__ = SYFT_OBJECT_VERSION_2 + __version__ = SYFT_OBJECT_VERSION_1 id: str data: MockObject | None diff --git a/packages/syft/tests/syft/migrations/data_migration_test.py b/packages/syft/tests/syft/migrations/data_migration_test.py index 9d4b0e7c5f7..6f3f1aca96a 100644 --- a/packages/syft/tests/syft/migrations/data_migration_test.py +++ b/packages/syft/tests/syft/migrations/data_migration_test.py @@ -9,7 +9,7 @@ # syft absolute import syft as sy -from syft.client.domain_client import DomainClient +from syft.client.datasite_client import DatasiteClient from syft.service.migration.object_migration_state import MigrationData from syft.service.response import SyftError from syft.service.response import SyftSuccess @@ -52,7 +52,7 @@ def create_dataset(client): return dataset -def make_request(client: DomainClient) -> DomainClient: +def make_request(client: DatasiteClient) -> DatasiteClient: @sy.syft_function_single_use() def compute() -> int: return 42 @@ -60,7 +60,7 @@ def compute() -> int: _ = client.code.request_code_execution(compute) -def prepare_data(client: DomainClient) -> None: +def prepare_data(client: DatasiteClient) -> None: # Create DS, upload dataset, create + approve + execute single request ds_client = register_ds(client) create_dataset(client) @@ -124,12 +124,12 @@ def named_worker_context(name): def test_data_migration_same_version(tmp_path): - node_name = secrets.token_hex(8) + server_name = secrets.token_hex(8) blob_path = tmp_path / "migration.blob" yaml_path = tmp_path / "migration.yaml" # Setup + save migration data - with named_worker_context(node_name) as first_worker: + with named_worker_context(server_name) as first_worker: prepare_data(first_worker.root_client) first_migration_data = first_worker.root_client.get_migration_data() first_migration_data.save(blob_path, yaml_path) @@ -140,15 +140,15 @@ def test_data_migration_same_version(tmp_path): assert isinstance(result, SyftError) # Load migration data on correct worker - # NOTE worker is correct because admin keys and node id are derived from node name, + # NOTE worker is correct because admin keys and server id are derived from server name, # so they match the first worker - with named_worker_context(node_name) as migration_worker: + with named_worker_context(server_name) as migration_worker: client = migration_worker.root_client # DB is new, no DS registered yet assert len(client.users.get_all()) == 1 - assert migration_worker.id == first_migration_data.node_uid + assert migration_worker.id == first_migration_data.server_uid assert migration_worker.verify_key == first_migration_data.root_verify_key result = migration_worker.root_client.load_migration_data(blob_path) diff --git a/packages/syft/tests/syft/migrations/protocol_communication_test.py b/packages/syft/tests/syft/migrations/protocol_communication_test.py index b2b7f5a15e9..d912085095d 100644 --- a/packages/syft/tests/syft/migrations/protocol_communication_test.py +++ b/packages/syft/tests/syft/migrations/protocol_communication_test.py @@ -1,5 +1,6 @@ # stdlib from copy import deepcopy +import os from pathlib import Path from unittest import mock @@ -8,12 +9,12 @@ # syft absolute import syft as sy -from syft.node.worker import Worker from syft.protocol.data_protocol import get_data_protocol from syft.protocol.data_protocol import protocol_release_dir from syft.protocol.data_protocol import stage_protocol_changes from syft.serde.recursive import TYPE_BANK from syft.serde.serializable import serializable +from syft.server.worker import Worker from syft.service.context import AuthedServiceContext from syft.service.response import SyftError from syft.service.service import AbstractService @@ -24,7 +25,7 @@ from syft.store.document_store import DocumentStore from syft.store.document_store import PartitionSettings from syft.types.syft_migration import migrate -from syft.types.syft_object import SYFT_OBJECT_VERSION_2 +from syft.types.syft_object import SYFT_OBJECT_VERSION_1 from syft.types.syft_object import SyftBaseObject from syft.types.syft_object import SyftObject from syft.types.transforms import convert_types @@ -39,7 +40,7 @@ def get_klass_version_1(): @serializable() class SyftMockObjectTestV1(SyftObject): __canonical_name__ = "SyftMockObjectTest" - __version__ = SYFT_OBJECT_VERSION_2 + __version__ = SYFT_OBJECT_VERSION_1 id: UID name: str @@ -52,7 +53,7 @@ def get_klass_version_2(): @serializable() class SyftMockObjectTestV2(SyftObject): __canonical_name__ = "SyftMockObjectTest" - __version__ = SYFT_OBJECT_VERSION_2 + __version__ = SYFT_OBJECT_VERSION_1 id: UID full_name: str @@ -74,7 +75,10 @@ def mock_v2_to_v1(): def get_stash_klass(syft_object: type[SyftBaseObject]): - @serializable() + @serializable( + canonical_name="SyftMockObjectStash", + version=1, + ) class SyftMockObjectStash(BaseStash): object_type = syft_object settings: PartitionSettings = PartitionSettings( @@ -91,7 +95,10 @@ def __init__(self, store: DocumentStore) -> None: def setup_service_method(syft_object): stash_klass: BaseStash = get_stash_klass(syft_object=syft_object) - @serializable() + @serializable( + canonical_name="SyftMockObjectService", + version=1, + ) class SyftMockObjectService(AbstractService): store: DocumentStore stash: stash_klass @@ -115,7 +122,7 @@ def get(self, context: AuthedServiceContext) -> list[syft_object] | SyftError: return SyftMockObjectService -def setup_version_one(node_name: str): +def setup_version_one(server_name: str): syft_klass_version_one = get_klass_version_1() sy.stage_protocol_changes() sy.bump_protocol_version() @@ -124,23 +131,23 @@ def setup_version_one(node_name: str): syft_object=syft_klass_version_one, ) - node = sy.orchestra.launch(node_name, dev_mode=True, reset=True) + server = sy.orchestra.launch(server_name, dev_mode=True, reset=True) - worker: Worker = node.python_node + worker: Worker = server.python_server worker.services.append(syft_service_klass) worker.service_path_map[syft_service_klass.__name__.lower()] = syft_service_klass( store=worker.document_store ) - return node, syft_klass_version_one + return server, syft_klass_version_one def mock_syft_version(): return f"{sy.__version__}.dev" -def setup_version_second(node_name: str, klass_version_one: type): +def setup_version_second(server_name: str, klass_version_one: type): syft_klass_version_second = get_klass_version_2() setup_migration_transforms(klass_version_one, syft_klass_version_second) @@ -149,16 +156,16 @@ def setup_version_second(node_name: str, klass_version_one: type): syft_service_klass = setup_service_method(syft_object=syft_klass_version_second) - node = sy.orchestra.launch(node_name, dev_mode=True) + server = sy.orchestra.launch(server_name, dev_mode=True) - worker: Worker = node.python_node + worker: Worker = server.python_server worker.services.append(syft_service_klass) worker.service_path_map[syft_service_klass.__name__.lower()] = syft_service_klass( store=worker.document_store ) - return node, syft_klass_version_second + return server, syft_klass_version_second @pytest.fixture @@ -174,10 +181,11 @@ def my_stage_protocol(protocol_file: Path): dp.save_history(dp.protocol_history) # Cleanup release dir, remove unused released files - for _file_path in protocol_release_dir().iterdir(): - for version in dp.read_json(_file_path): - if version not in dp.protocol_history.keys(): - _file_path.unlink() + if os.path.exists(protocol_release_dir()): + for _file_path in protocol_release_dir().iterdir(): + for version in dp.read_json(_file_path): + if version not in dp.protocol_history.keys(): + _file_path.unlink() @pytest.mark.skip( @@ -192,7 +200,7 @@ def patched_index_syft_by_module_name(fully_qualified_name: str): return index_syft_by_module_name(fully_qualified_name) - node_name = UID().to_string() + server_name = UID().to_string() with mock.patch("syft.serde.recursive.TYPE_BANK", MOCK_TYPE_BANK): with mock.patch( "syft.protocol.data_protocol.TYPE_BANK", @@ -203,7 +211,7 @@ def patched_index_syft_by_module_name(fully_qualified_name: str): patched_index_syft_by_module_name, ): # Setup mock object version one - nh1, klass_v1 = setup_version_one(node_name) + nh1, klass_v1 = setup_version_one(server_name) assert klass_v1.__canonical_name__ == "SyftMockObjectTest" assert klass_v1.__name__ == "SyftMockObjectTestV1" @@ -221,7 +229,7 @@ def patched_index_syft_by_module_name(fully_qualified_name: str): "syft.protocol.data_protocol.__version__", mock_syft_version() ): nh2, klass_v2 = setup_version_second( - node_name, klass_version_one=klass_v1 + server_name, klass_version_one=klass_v1 ) # Create a sample data in version second @@ -237,9 +245,11 @@ def patched_index_syft_by_module_name(fully_qualified_name: str): assert sample_data_v1.version == int(sample_data.version) # Set the sample data in version second - service_klass = nh1.python_node.get_service("SyftMockObjectService") + service_klass = nh1.python_server.get_service( + "SyftMockObjectService" + ) service_klass.stash.set( - nh1.python_node.root_client.verify_key, + nh1.python_server.root_client.verify_key, sample_data, ) diff --git a/packages/syft/tests/syft/notebook_ui_test.py b/packages/syft/tests/syft/notebook_ui_test.py index eebc249dc82..7129e7beb74 100644 --- a/packages/syft/tests/syft/notebook_ui_test.py +++ b/packages/syft/tests/syft/notebook_ui_test.py @@ -1,52 +1,78 @@ +# stdlib +from typing import Any + # third party import numpy as np import pytest +import torch # syft absolute +from syft import UID from syft.service.action.action_object import ActionObject from syft.service.user.user import User from syft.util.table import TABLE_INDEX_KEY from syft.util.table import prepare_table_data +def table_displayed(obj_to_check: Any) -> bool: + return "Tabulator" in obj_to_check._repr_html_() + + +def no_html_repr_displayed(obj_to_check: Any) -> bool: + return obj_to_check._repr_html_() is None + + +def obj_repr_displayed(obj_to_check: Any) -> bool: + return obj_to_check._repr_html_() == obj_to_check.__repr__() + + def table_test_cases() -> list[tuple[list, str | None]]: ao_1 = ActionObject.from_obj(10.0) ao_2 = ActionObject.from_obj(20.0) np_ao = ActionObject.from_obj(np.array([10, 20])) + torch_ao = ActionObject.from_obj(torch.tensor([10, 20])) user_1 = User(email="x@y.z") user_2 = User(email="a@b.c") # Makes table - homogenous_ao = ([ao_1, ao_2], True) - non_homogenous_same_repr = ([ao_1, ao_2, np_ao], True) - homogenous_user = ([user_1, user_2], True) - # TODO techdebt: makes table because syft misuses _html_repr_ - empty_list = ([], True) + homogenous_ao = ([ao_1, ao_2], table_displayed) + non_homogenous_same_repr = ([ao_1, ao_2, np_ao], table_displayed) + homogenous_user = ([user_1, user_2], table_displayed) + empty_list = ([], obj_repr_displayed) + non_syft_objs = ([1, 2.0, 3, 4], no_html_repr_displayed) # Doesn't make table - non_homogenous_different_repr = ([ao_1, ao_2, user_1, user_2], False) - non_syft_obj_1 = ([1, ao_1, ao_2], False) - non_syft_obj_2 = ([ao_1, ao_2, 1], False) - + non_homogenous_different_repr = ( + [ao_1, ao_2, user_1, user_2], + no_html_repr_displayed, + ) + non_syft_obj_1 = ([1, ao_1, ao_2], no_html_repr_displayed) + non_syft_obj_2 = ([ao_1, ao_2, 1], no_html_repr_displayed) + torch_type_obj = ( + [type(torch_ao.syft_action_data), 1.0, UID()], + no_html_repr_displayed, + ) return [ homogenous_ao, non_homogenous_same_repr, homogenous_user, empty_list, + non_syft_objs, non_homogenous_different_repr, non_syft_obj_1, non_syft_obj_2, + torch_type_obj, ] @pytest.mark.parametrize("test_case", table_test_cases()) def test_list_dict_repr_html(test_case): - obj, expected = test_case + obj, validation_func = test_case - assert (obj._repr_html_() is not None) == expected - assert (dict(enumerate(obj))._repr_html_() is not None) == expected - assert (set(obj)._repr_html_() is not None) == expected - assert (tuple(obj)._repr_html_() is not None) == expected + assert validation_func(obj) + assert validation_func(dict(enumerate(obj))) + assert validation_func(set(obj)) + assert validation_func(tuple(obj)) def test_sort_table_rows(): diff --git a/packages/syft/tests/syft/notifications/fixtures.py b/packages/syft/tests/syft/notifications/fixtures.py index dade06c4424..8a02c0f444f 100644 --- a/packages/syft/tests/syft/notifications/fixtures.py +++ b/packages/syft/tests/syft/notifications/fixtures.py @@ -2,9 +2,9 @@ import pytest # syft absolute -from syft.node.credentials import SyftSigningKey -from syft.node.credentials import SyftVerifyKey -from syft.node.worker import Worker +from syft.server.credentials import SyftSigningKey +from syft.server.credentials import SyftVerifyKey +from syft.server.worker import Worker from syft.service.context import AuthedServiceContext from syft.service.notification.notification_service import NotificationService from syft.service.notification.notification_stash import NotificationStash @@ -34,13 +34,13 @@ def notification_service(document_store): @pytest.fixture def authed_context(admin_user: User, worker: Worker) -> AuthedServiceContext: - yield AuthedServiceContext(credentials=test_verify_key, node=worker) + yield AuthedServiceContext(credentials=test_verify_key, server=worker) @pytest.fixture def linked_object(): yield LinkedObject( - node_uid=UID(), + server_uid=UID(), service_type=NotificationService, object_type=Notification, object_uid=UID(), @@ -57,7 +57,7 @@ def mock_create_notification(faker) -> CreateNotification: mock_notification = CreateNotification( subject="mock_created_notification", id=UID(), - node_uid=UID(), + server_uid=UID(), from_user_verify_key=test_verify_key1, to_user_verify_key=test_verify_key2, created_at=DateTime.now(), @@ -73,7 +73,7 @@ def mock_notification( ) -> Notification: mock_notification = Notification( subject="mock_notification", - node_uid=UID(), + server_uid=UID(), from_user_verify_key=SyftSigningKey.generate().verify_key, to_user_verify_key=SyftSigningKey.generate().verify_key, created_at=DateTime.now(), diff --git a/packages/syft/tests/syft/notifications/notification_service_test.py b/packages/syft/tests/syft/notifications/notification_service_test.py index 868759e348a..806e607193f 100644 --- a/packages/syft/tests/syft/notifications/notification_service_test.py +++ b/packages/syft/tests/syft/notifications/notification_service_test.py @@ -4,8 +4,8 @@ from result import Ok # syft absolute -from syft.node.credentials import SyftSigningKey -from syft.node.credentials import SyftVerifyKey +from syft.server.credentials import SyftSigningKey +from syft.server.credentials import SyftVerifyKey from syft.service.context import AuthedServiceContext from syft.service.notification.notification_service import NotificationService from syft.service.notification.notification_stash import NotificationStash @@ -37,7 +37,7 @@ def add_mock_notification( mock_notification = Notification( subject="mock_notification", - node_uid=UID(), + server_uid=UID(), from_user_verify_key=from_user_verify_key, to_user_verify_key=to_user_verify_key, created_at=DateTime.now(), @@ -566,7 +566,7 @@ def mock_get_service(linked_obj: LinkedObject) -> NotificationService: return test_notification_service monkeypatch.setattr( - authed_context.node, + authed_context.server, "get_service", mock_get_service, ) @@ -601,7 +601,7 @@ def mock_get_service(linked_obj: LinkedObject) -> NotificationService: return test_notification_service monkeypatch.setattr( - authed_context.node, + authed_context.server, "get_service", mock_get_service, ) diff --git a/packages/syft/tests/syft/notifications/notification_stash_test.py b/packages/syft/tests/syft/notifications/notification_stash_test.py index 7a2e88c1e93..2cbc3197799 100644 --- a/packages/syft/tests/syft/notifications/notification_stash_test.py +++ b/packages/syft/tests/syft/notifications/notification_stash_test.py @@ -4,8 +4,8 @@ from result import Err # syft absolute -from syft.node.credentials import SyftSigningKey -from syft.node.credentials import SyftVerifyKey +from syft.server.credentials import SyftSigningKey +from syft.server.credentials import SyftVerifyKey from syft.service.notification.notification_stash import ( OrderByCreatedAtTimeStampPartitionKey, ) @@ -40,7 +40,7 @@ def add_mock_notification( mock_notification = Notification( subject="test_notification", - node_uid=UID(), + server_uid=UID(), from_user_verify_key=from_user_verify_key, to_user_verify_key=to_user_verify_key, created_at=DateTime.now(), diff --git a/packages/syft/tests/syft/project/project_test.py b/packages/syft/tests/syft/project/project_test.py index c186f2f35fa..587f9582274 100644 --- a/packages/syft/tests/syft/project/project_test.py +++ b/packages/syft/tests/syft/project/project_test.py @@ -17,7 +17,7 @@ def test_project_creation(worker): password_verify="bazinga", ) - ds_client = sy.login(node=worker, email="sheldon@caltech.edu", password="bazinga") + ds_client = sy.login(server=worker, email="sheldon@caltech.edu", password="bazinga") new_project = sy.Project( name="My Cool Project", description="My Cool Description", members=[ds_client] @@ -70,9 +70,13 @@ def test_exception_different_email(worker): password_verify="penny", ) - ds_sheldon = sy.login(node=worker, email="sheldon@caltech.edu", password="bazinga") + ds_sheldon = sy.login( + server=worker, email="sheldon@caltech.edu", password="bazinga" + ) - ds_leonard = sy.login(node=worker, email="leonard@princeton.edu", password="penny") + ds_leonard = sy.login( + server=worker, email="leonard@princeton.edu", password="penny" + ) with pytest.raises(ValidationError): sy.Project( diff --git a/packages/syft/tests/syft/request/fixtures.py b/packages/syft/tests/syft/request/fixtures.py index c82cb59f4b4..299403ffdbc 100644 --- a/packages/syft/tests/syft/request/fixtures.py +++ b/packages/syft/tests/syft/request/fixtures.py @@ -4,8 +4,8 @@ # syft absolute from syft.client.client import SyftClient -from syft.node.credentials import SyftVerifyKey -from syft.node.worker import Worker +from syft.server.credentials import SyftVerifyKey +from syft.server.worker import Worker from syft.service.context import AuthedServiceContext from syft.service.request.request_stash import RequestStash from syft.store.document_store import DocumentStore @@ -17,8 +17,8 @@ def request_stash(document_store: DocumentStore) -> RequestStash: @pytest.fixture -def authed_context_guest_domain_client( - guest_domain_client: SyftClient, worker: Worker +def authed_context_guest_datasite_client( + guest_datasite_client: SyftClient, worker: Worker ) -> AuthedServiceContext: - verify_key: SyftVerifyKey = guest_domain_client.credentials.verify_key - yield AuthedServiceContext(credentials=verify_key, node=worker) + verify_key: SyftVerifyKey = guest_datasite_client.credentials.verify_key + yield AuthedServiceContext(credentials=verify_key, server=worker) diff --git a/packages/syft/tests/syft/request/request_code_accept_deny_test.py b/packages/syft/tests/syft/request/request_code_accept_deny_test.py index b21a06579d1..9308fe260c9 100644 --- a/packages/syft/tests/syft/request/request_code_accept_deny_test.py +++ b/packages/syft/tests/syft/request/request_code_accept_deny_test.py @@ -5,7 +5,7 @@ # syft absolute import syft from syft.client.client import SyftClient -from syft.node.worker import Worker +from syft.server.worker import Worker from syft.service.action.action_object import ActionObject from syft.service.action.action_permissions import ActionPermission from syft.service.code.user_code import UserCode @@ -45,7 +45,7 @@ def get_ds_client(faker: Faker, root_client: SyftClient, guest_client: SyftClien def test_object_mutation(worker: Worker): root_client = worker.root_client setting = root_client.api.services.settings.get() - linked_obj = LinkedObject.from_obj(setting, SettingsService, node_uid=worker.id) + linked_obj = LinkedObject.from_obj(setting, SettingsService, server_uid=worker.id) original_name = setting.organization new_name = "Test Organization" @@ -57,7 +57,7 @@ def test_object_mutation(worker: Worker): ) change_context = ChangeContext( - node=worker, + server=worker, approving_user_credentials=root_client.credentials.verify_key, ) @@ -87,7 +87,7 @@ def test_action_store_change(faker: Faker, worker: Worker): ds_client = get_ds_client(faker, root_client, worker.guest_client) action_object_link = LinkedObject.from_obj( - action_obj, node_uid=action_obj.syft_node_uid + action_obj, server_uid=action_obj.syft_server_uid ) permission_change = ActionStoreChange( linked_obj=action_object_link, @@ -95,7 +95,7 @@ def test_action_store_change(faker: Faker, worker: Worker): ) change_context = ChangeContext( - node=worker, + server=worker, approving_user_credentials=root_client.credentials.verify_key, requesting_user_credentials=ds_client.credentials.verify_key, ) @@ -136,7 +136,7 @@ def simple_function(data): user_code: UserCode = ds_client.code.get_all()[0] - linked_user_code = LinkedObject.from_obj(user_code, node_uid=worker.id) + linked_user_code = LinkedObject.from_obj(user_code, server_uid=worker.id) user_code_change = UserCodeStatusChange( value=UserCodeStatus.APPROVED, @@ -145,7 +145,7 @@ def simple_function(data): ) change_context = ChangeContext( - node=worker, + server=worker, approving_user_credentials=root_client.credentials.verify_key, requesting_user_credentials=ds_client.credentials.verify_key, ) diff --git a/packages/syft/tests/syft/request/request_stash_test.py b/packages/syft/tests/syft/request/request_stash_test.py index c3172083d43..e88685a7aa9 100644 --- a/packages/syft/tests/syft/request/request_stash_test.py +++ b/packages/syft/tests/syft/request/request_stash_test.py @@ -5,7 +5,7 @@ # syft absolute from syft.client.client import SyftClient -from syft.node.credentials import SyftVerifyKey +from syft.server.credentials import SyftVerifyKey from syft.service.context import AuthedServiceContext from syft.service.request.request import Request from syft.service.request.request import SubmitRequest @@ -18,11 +18,11 @@ def test_requeststash_get_all_for_verify_key_no_requests( root_verify_key, request_stash: RequestStash, - guest_domain_client: SyftClient, + guest_datasite_client: SyftClient, ) -> None: # test when there are no requests from a client - verify_key: SyftVerifyKey = guest_domain_client.credentials.verify_key + verify_key: SyftVerifyKey = guest_datasite_client.credentials.verify_key requests = request_stash.get_all_for_verify_key( root_verify_key, verify_key=verify_key ) @@ -34,17 +34,17 @@ def test_requeststash_get_all_for_verify_key_no_requests( def test_requeststash_get_all_for_verify_key_success( root_verify_key, request_stash: RequestStash, - guest_domain_client: SyftClient, - authed_context_guest_domain_client: AuthedServiceContext, + guest_datasite_client: SyftClient, + authed_context_guest_datasite_client: AuthedServiceContext, ) -> None: # test when there is one request submit_request: SubmitRequest = SubmitRequest(changes=[]) stash_set_result = request_stash.set( root_verify_key, - submit_request.to(Request, context=authed_context_guest_domain_client), + submit_request.to(Request, context=authed_context_guest_datasite_client), ) - verify_key: SyftVerifyKey = guest_domain_client.credentials.verify_key + verify_key: SyftVerifyKey = guest_datasite_client.credentials.verify_key requests = request_stash.get_all_for_verify_key( credentials=root_verify_key, verify_key=verify_key, @@ -58,7 +58,7 @@ def test_requeststash_get_all_for_verify_key_success( submit_request_2: SubmitRequest = SubmitRequest(changes=[]) stash_set_result_2 = request_stash.set( root_verify_key, - submit_request_2.to(Request, context=authed_context_guest_domain_client), + submit_request_2.to(Request, context=authed_context_guest_datasite_client), ) requests = request_stash.get_all_for_verify_key( @@ -80,9 +80,9 @@ def test_requeststash_get_all_for_verify_key_fail( root_verify_key, request_stash: RequestStash, monkeypatch: MonkeyPatch, - guest_domain_client: SyftClient, + guest_datasite_client: SyftClient, ) -> None: - verify_key: SyftVerifyKey = guest_domain_client.credentials.verify_key + verify_key: SyftVerifyKey = guest_datasite_client.credentials.verify_key mock_error_message = ( "verify key not in the document store's unique or searchable keys" ) @@ -104,9 +104,9 @@ def test_requeststash_get_all_for_verify_key_find_index_fail( root_verify_key, request_stash: RequestStash, monkeypatch: MonkeyPatch, - guest_domain_client: SyftClient, + guest_datasite_client: SyftClient, ) -> None: - verify_key: SyftVerifyKey = guest_domain_client.credentials.verify_key + verify_key: SyftVerifyKey = guest_datasite_client.credentials.verify_key qks = QueryKeys(qks=[RequestingUserVerifyKeyPartitionKey.with_obj(verify_key)]) mock_error_message = f"Failed to query index or search with {qks.all[0]}" diff --git a/packages/syft/tests/syft/serializable_test.py b/packages/syft/tests/syft/serializable_test.py index 6f84f7afde1..154589c2190 100644 --- a/packages/syft/tests/syft/serializable_test.py +++ b/packages/syft/tests/syft/serializable_test.py @@ -21,7 +21,11 @@ class AbstractBase: uid: str -@serializable(attrs=["uid", "value"]) +@serializable( + attrs=["uid", "value"], + canonical_name="Base", + version=1, +) class Base(AbstractBase): """Serialize: uid, value""" @@ -32,7 +36,11 @@ def __init__(self, uid: str, value: int): self.value = value -@serializable(attrs=["status"]) +@serializable( + attrs=["status"], + canonical_name="Derived", + version=1, +) class Derived(Base): """Serialize: uid, value, status""" @@ -43,7 +51,12 @@ def __init__(self, uid: str, value: int, status: int) -> None: self.status = status -@serializable(attrs=["status"], without=["uid"]) +@serializable( + attrs=["status"], + without=["uid"], + canonical_name="DerivedWithoutAttrs", + version=1, +) class DerivedWithoutAttrs(Base): """Serialize: value, status""" @@ -54,7 +67,12 @@ def __init__(self, uid: str, value: int, status: int) -> None: self.status = status -@serializable(attrs=["status"], inherit=False) +@serializable( + attrs=["status"], + inherit=False, + canonical_name="DerivedNoInherit", + version=1, +) class DerivedNoInherit(Base): """Serialize: status""" @@ -65,7 +83,12 @@ def __init__(self, uid: str, value: int, status: int) -> None: self.status = status -@serializable(attrs=["uid", "value"], inheritable=False) +@serializable( + attrs=["uid", "value"], + inheritable=False, + canonical_name="BaseAttrsNonInheritable", + version=1, +) class BaseAttrsNonInheritable(AbstractBase): """Serialize: uid, value (Derived cannot inherit base attrs)""" @@ -76,7 +99,11 @@ def __init__(self, uid: str = None, value: int = None): self.value = value -@serializable(attrs=["status"]) +@serializable( + attrs=["status"], + canonical_name="DerivedWithoutBaseAttrs", + version=1, +) class DerivedWithoutBaseAttrs(BaseAttrsNonInheritable): """Serialize: status (Dervied cannot inherit base attrs)""" @@ -168,7 +195,10 @@ def test_derived_without_base_attrs(): # ------------------------------ Pydantic classes ------------------------------ -@serializable() +@serializable( + canonical_name="PydBase", + version=1, +) class PydBase(BaseModel): """Serialize: uid, value, flag""" @@ -177,7 +207,10 @@ class PydBase(BaseModel): flag: bool | None = None -@serializable() +@serializable( + canonical_name="PydDerived", + version=1, +) class PydDerived(PydBase): """Serialize: uid, value, flag, source, target""" @@ -185,7 +218,11 @@ class PydDerived(PydBase): target: str -@serializable(without=["uid"]) +@serializable( + without=["uid"], + canonical_name="PydDerivedWithoutAttr", + version=1, +) class PydDerivedWithoutAttr(PydBase): """ Serialize: value, flag, source, target @@ -196,7 +233,11 @@ class PydDerivedWithoutAttr(PydBase): target: str -@serializable(without=["uid", "flag", "config"]) +@serializable( + without=["uid", "flag", "config"], + canonical_name="PydDerivedWithoutAttrs", + version=1, +) class PydDerivedWithoutAttrs(PydBase): """ Serialize: value, source, target @@ -208,7 +249,11 @@ class PydDerivedWithoutAttrs(PydBase): config: dict | None = None -@serializable(attrs=["source", "target"]) +@serializable( + attrs=["source", "target"], + canonical_name="PydDerivedOnly", + version=1, +) class PydDerivedOnly(PydBase): """ Serialize: source, target diff --git a/packages/syft/tests/syft/server_url_test.py b/packages/syft/tests/syft/server_url_test.py new file mode 100644 index 00000000000..c23670f9f12 --- /dev/null +++ b/packages/syft/tests/syft/server_url_test.py @@ -0,0 +1,27 @@ +# third party +import pytest + +# syft absolute +from syft.types.server_url import ServerURL + +test_suite = [ + ("http://0.0.0.0", 8081, "http://0.0.0.0:8081"), + ("http://0.0.0.0", None, "http://0.0.0.0:80"), + (None, None, "http://localhost:80"), + ("http://0.0.0.0:8081", 8082, "http://0.0.0.0:8081"), + ("0.0.0.0:8081", None, "http://0.0.0.0:8081"), + ("example.com", None, "http://example.com:80"), + ("https://example.com", None, "https://example.com:80"), +] + + +@pytest.mark.parametrize("url, port, ground_truth", test_suite) +def test_server_url(url, port, ground_truth) -> None: + if not url and not port: + assert ServerURL().base_url == ground_truth + elif not url: + assert ServerURL(port=port).base_url == ground_truth + elif not port: + assert ServerURL(host_or_ip=url).base_url == ground_truth + else: + assert ServerURL(host_or_ip=url, port=port).base_url == ground_truth diff --git a/packages/syft/tests/syft/service/action/action_object_test.py b/packages/syft/tests/syft/service/action/action_object_test.py index dd7351f78e4..04031d64c25 100644 --- a/packages/syft/tests/syft/service/action/action_object_test.py +++ b/packages/syft/tests/syft/service/action/action_object_test.py @@ -20,9 +20,12 @@ from syft.service.action.action_object import HOOK_ON_POINTERS from syft.service.action.action_object import PreHookContext from syft.service.action.action_object import make_action_side_effect -from syft.service.action.action_object import propagate_node_uid +from syft.service.action.action_object import propagate_server_uid from syft.service.action.action_object import send_action_side_effect from syft.service.action.action_types import action_type_for_type +from syft.service.response import SyftError +from syft.service.response import SyftSuccess +from syft.store.blob_storage import SyftObjectRetrieval from syft.types.uid import LineageID from syft.types.uid import UID @@ -32,9 +35,9 @@ def helper_make_action_obj(orig_obj: Any): def helper_make_action_pointers(worker, obj, *args, **kwargs): - root_domain_client = worker.root_client - res = obj.send(root_domain_client) - obj_pointer = root_domain_client.api.services.action.get_pointer(res.id) + root_datasite_client = worker.root_client + res = obj.send(root_datasite_client) + obj_pointer = root_datasite_client.api.services.action.get_pointer(res.id) # The args and kwargs should automatically be pointerized by obj_pointer return obj_pointer, args, kwargs @@ -172,7 +175,7 @@ def test_actionobject_add_pre_hooks(): assert make_action_side_effect in obj.syft_pre_hooks__[HOOK_ALWAYS] assert send_action_side_effect not in obj.syft_pre_hooks__[HOOK_ON_POINTERS] - assert propagate_node_uid not in obj.syft_post_hooks__[HOOK_ALWAYS] + assert propagate_server_uid not in obj.syft_post_hooks__[HOOK_ALWAYS] # eager exec tests: obj._syft_add_pre_hooks__(eager_execution=True) @@ -180,7 +183,7 @@ def test_actionobject_add_pre_hooks(): assert make_action_side_effect in obj.syft_pre_hooks__[HOOK_ALWAYS] assert send_action_side_effect in obj.syft_pre_hooks__[HOOK_ON_POINTERS] - assert propagate_node_uid in obj.syft_post_hooks__[HOOK_ALWAYS] + assert propagate_server_uid in obj.syft_post_hooks__[HOOK_ALWAYS] @pytest.mark.parametrize( @@ -251,12 +254,12 @@ def test_actionobject_hooks_send_action_side_effect_err_invalid_args(worker): ], ) def test_actionobject_hooks_send_action_side_effect_ignore_op( - root_domain_client, orig_obj_op + root_datasite_client, orig_obj_op ): orig_obj, op, args, kwargs = orig_obj_op obj = helper_make_action_obj(orig_obj) - obj = obj.send(root_domain_client) + obj = obj.send(root_datasite_client) context = PreHookContext(obj=obj, op_name=op) result = send_action_side_effect(context, *args, **kwargs) @@ -299,18 +302,18 @@ def test_actionobject_hooks_send_action_side_effect_ok(worker, orig_obj_op): assert context.result_id is not None -def test_actionobject_hooks_propagate_node_uid_err(): +def test_actionobject_hooks_propagate_server_uid_err(): orig_obj = "abc" op = "capitalize" obj = ActionObject.from_obj(orig_obj) context = PreHookContext(obj=obj, op_name=op) - result = propagate_node_uid(context, op=op, result="orig_obj") + result = propagate_server_uid(context, op=op, result="orig_obj") assert result.is_err() -def test_actionobject_hooks_propagate_node_uid_ok(): +def test_actionobject_hooks_propagate_server_uid_ok(): orig_obj = "abc" op = "capitalize" @@ -320,7 +323,7 @@ def test_actionobject_hooks_propagate_node_uid_ok(): obj.syft_point_to(obj_id) context = PreHookContext(obj=obj, op_name=op) - result = propagate_node_uid(context, op=op, result="orig_obj") + result = propagate_server_uid(context, op=op, result="orig_obj") assert result.is_ok() @@ -332,7 +335,7 @@ def test_actionobject_syft_point_to(): obj.syft_point_to(obj_id) - assert obj.syft_node_uid == obj_id + assert obj.syft_server_uid == obj_id @pytest.mark.parametrize( @@ -503,8 +506,8 @@ def test_actionobject_syft_get_path(testcase): ], ) def test_actionobject_syft_send_get(worker, testcase): - root_domain_client = worker.root_client - root_domain_client._fetch_api(root_domain_client.credentials) + root_datasite_client = worker.root_client + root_datasite_client._fetch_api(root_datasite_client.credentials) action_store = worker.get_service("actionservice").store orig_obj = testcase @@ -512,7 +515,7 @@ def test_actionobject_syft_send_get(worker, testcase): assert len(action_store.data) == 0 - ptr = obj.send(root_domain_client) + ptr = obj.send(root_datasite_client) assert len(action_store.data) == 1 retrieved = ptr.get() @@ -581,6 +584,7 @@ def test_actionobject_syft_get_attr_context(): (complex(1, 2), "conjugate", [], {}, complex(1, -2)), ], ) +@pytest.mark.skip(reason="Disabled until we bring back eager execution") def test_actionobject_syft_execute_hooks(worker, testcase): client = worker.root_client assert client.settings.enable_eager_execution(enable=True) @@ -601,9 +605,9 @@ def test_actionobject_syft_execute_hooks(worker, testcase): ) assert context.result_id is not None - context.obj.syft_node_uid = UID() + context.obj.syft_server_uid = UID() result = obj_pointer._syft_run_post_hooks__(context, name=op, result=obj_pointer) - assert result.syft_node_uid == context.obj.syft_node_uid + assert result.syft_server_uid == context.obj.syft_server_uid @pytest.mark.parametrize( @@ -656,7 +660,7 @@ def test_actionobject_syft_wrap_attribute_for_properties(orig_obj): assert prop is not None assert isinstance(prop, ActionObject) assert hasattr(prop, "id") - assert hasattr(prop, "syft_node_uid") + assert hasattr(prop, "syft_server_uid") assert hasattr(prop, "syft_history_hash") @@ -1023,3 +1027,35 @@ def test_actionobject_syft_getattr_pandas(worker): obj.columns = ["a", "b", "c"] assert (obj.columns == ["a", "b", "c"]).all() + + +def test_actionobject_delete(worker): + """ + Test deleting action objects and their corresponding blob storage entries + """ + root_client = worker.root_client + + # small object with no blob store entry + data_small = np.random.randint(0, 100, size=3) + action_obj = ActionObject.from_obj(data_small) + action_obj.send(root_client) + assert action_obj.syft_blob_storage_entry_id is None + del_res = root_client.api.services.action.delete(uid=action_obj.id) + assert isinstance(del_res, SyftSuccess) + + # big object with blob store entry + num_elements = 25 * 1024 * 1024 + data_big = np.random.randint(0, 100, size=num_elements) # 4 bytes per int32 + action_obj_2 = ActionObject.from_obj(data_big) + action_obj_2.send(root_client) + assert isinstance(action_obj_2.syft_blob_storage_entry_id, UID) + read_res = root_client.api.services.blob_storage.read( + action_obj_2.syft_blob_storage_entry_id + ) + assert isinstance(read_res, SyftObjectRetrieval) + del_res = root_client.api.services.action.delete(uid=action_obj_2.id) + assert isinstance(del_res, SyftSuccess) + read_res = root_client.api.services.blob_storage.read( + action_obj_2.syft_blob_storage_entry_id + ) + assert isinstance(read_res, SyftError) diff --git a/packages/syft/tests/syft/service/action/action_service_test.py b/packages/syft/tests/syft/service/action/action_service_test.py index 0c262f839a0..bb8057d4ee3 100644 --- a/packages/syft/tests/syft/service/action/action_service_test.py +++ b/packages/syft/tests/syft/service/action/action_service_test.py @@ -10,15 +10,17 @@ def get_auth_ctx(worker): - return AuthedServiceContext(node=worker, credentials=worker.signing_key.verify_key) + return AuthedServiceContext( + server=worker, credentials=worker.signing_key.verify_key + ) def test_action_service_sanity(worker): service = worker.get_service("actionservice") - root_domain_client = worker.root_client + root_datasite_client = worker.root_client obj = ActionObject.from_obj("abc") - pointer = obj.send(root_domain_client) + pointer = obj.send(root_datasite_client) assert len(service.store.data) == 1 res = pointer.capitalize() diff --git a/packages/syft/tests/syft/service/dataset/dataset_service_test.py b/packages/syft/tests/syft/service/dataset/dataset_service_test.py index 1aa85c07865..5fcc2a29fa1 100644 --- a/packages/syft/tests/syft/service/dataset/dataset_service_test.py +++ b/packages/syft/tests/syft/service/dataset/dataset_service_test.py @@ -12,15 +12,16 @@ # syft absolute import syft as sy -from syft.node.worker import Worker +from syft.server.worker import Worker from syft.service.action.action_object import ActionObject +from syft.service.action.action_object import TwinMode +from syft.service.blob_storage.util import can_upload_to_blob_storage from syft.service.dataset.dataset import CreateAsset as Asset from syft.service.dataset.dataset import CreateDataset as Dataset from syft.service.dataset.dataset import _ASSET_WITH_NONE_MOCK_ERROR_MESSAGE from syft.service.response import SyftError from syft.service.response import SyftException from syft.service.response import SyftSuccess -from syft.types.twin_object import TwinMode def random_hash() -> str: @@ -197,11 +198,11 @@ def test_guest_client_get_empty_mock_as_private_pointer( asset = Asset(**asset_with_empty_mock) dataset = Dataset(name=random_hash(), asset_list=[asset]) - root_domain_client = worker.root_client - root_domain_client.upload_dataset(dataset) + root_datasite_client = worker.root_client + root_datasite_client.upload_dataset(dataset) - guest_domain_client = root_domain_client.guest() - guest_datasets = guest_domain_client.api.services.dataset.get_all() + guest_datasite_client = root_datasite_client.guest() + guest_datasets = guest_datasite_client.api.services.dataset.get_all() guest_dataset = guest_datasets[0] mock = guest_dataset.assets[0].pointer @@ -211,16 +212,16 @@ def test_guest_client_get_empty_mock_as_private_pointer( assert mock.syft_twin_type is TwinMode.MOCK -def test_domain_client_cannot_upload_dataset_with_non_mock(worker: Worker) -> None: +def test_datasite_client_cannot_upload_dataset_with_non_mock(worker: Worker) -> None: assets = [Asset(**make_asset_with_mock()) for _ in range(10)] dataset = Dataset(name=random_hash(), asset_list=assets) dataset.asset_list[0].mock = None - root_domain_client = worker.root_client + root_datasite_client = worker.root_client with pytest.raises(ValueError) as excinfo: - root_domain_client.upload_dataset(dataset) + root_datasite_client.upload_dataset(dataset) assert _ASSET_WITH_NONE_MOCK_ERROR_MESSAGE in str(excinfo.value) @@ -276,13 +277,9 @@ def different_data_types( def test_upload_dataset_with_assets_of_different_data_types( worker: Worker, - different_data_types: int - | str - | dict - | set - | np.ndarray - | pd.DataFrame - | torch.Tensor, + different_data_types: ( + int | str | dict | set | np.ndarray | pd.DataFrame | torch.Tensor + ), ) -> None: asset = sy.Asset( name=random_hash(), @@ -291,13 +288,86 @@ def test_upload_dataset_with_assets_of_different_data_types( ) dataset = Dataset(name=random_hash()) dataset.add_asset(asset) - root_domain_client = worker.root_client - res = root_domain_client.upload_dataset(dataset) + root_datasite_client = worker.root_client + res = root_datasite_client.upload_dataset(dataset) assert isinstance(res, SyftSuccess) - assert len(root_domain_client.api.services.dataset.get_all()) == 1 - assert type(root_domain_client.datasets[0].assets[0].data) == type( + assert len(root_datasite_client.api.services.dataset.get_all()) == 1 + assert type(root_datasite_client.datasets[0].assets[0].data) == type( different_data_types ) - assert type(root_domain_client.datasets[0].assets[0].mock) == type( + assert type(root_datasite_client.datasets[0].assets[0].mock) == type( different_data_types ) + + +def test_delete_small_datasets(worker: Worker, small_dataset: Dataset) -> None: + root_client = worker.root_client + assert not can_upload_to_blob_storage(small_dataset, root_client.metadata) + upload_res = root_client.upload_dataset(small_dataset) + assert isinstance(upload_res, SyftSuccess) + + dataset = root_client.api.services.dataset.get_all()[0] + asset = dataset.asset_list[0] + assert isinstance(asset.data, np.ndarray) + assert isinstance(asset.mock, np.ndarray) + + # delete the dataset without deleting its assets + del_res = root_client.api.services.dataset.delete( + uid=dataset.id, delete_assets=False + ) + assert isinstance(del_res, SyftSuccess) + assert isinstance(asset.data, np.ndarray) + assert isinstance(asset.mock, np.ndarray) + assert len(root_client.api.services.dataset.get_all()) == 0 + # we can still get back the deleted dataset by uid + deleted_dataset = root_client.api.services.dataset.get_by_id(uid=dataset.id) + assert deleted_dataset.name == f"_deleted_{dataset.name}_{dataset.id}" + assert deleted_dataset.to_be_deleted + + # delete the dataset and its assets + del_res = root_client.api.services.dataset.delete( + uid=dataset.id, delete_assets=True + ) + assert isinstance(del_res, SyftSuccess) + assert asset.data is None + assert isinstance(asset.mock, SyftError) + assert len(root_client.api.services.dataset.get_all()) == 0 + + +def test_delete_big_datasets(worker: Worker, big_dataset: Dataset) -> None: + root_client = worker.root_client + assert can_upload_to_blob_storage(big_dataset, root_client.metadata) + upload_res = root_client.upload_dataset(big_dataset) + assert isinstance(upload_res, SyftSuccess) + + dataset = root_client.api.services.dataset.get_all()[0] + asset = dataset.asset_list[0] + assert isinstance(asset.data, np.ndarray) + assert isinstance(asset.mock, np.ndarray) + # test that the data is saved in the blob storage + assert len(root_client.api.services.blob_storage.get_all()) == 2 + + # delete the dataset without deleting its assets + del_res = root_client.api.services.dataset.delete( + uid=dataset.id, delete_assets=False + ) + assert isinstance(del_res, SyftSuccess) + assert isinstance(asset.data, np.ndarray) + assert isinstance(asset.mock, np.ndarray) + assert len(root_client.api.services.dataset.get_all()) == 0 + # we can still get back the deleted dataset by uid + deleted_dataset = root_client.api.services.dataset.get_by_id(uid=dataset.id) + assert deleted_dataset.name == f"_deleted_{dataset.name}_{dataset.id}" + assert deleted_dataset.to_be_deleted + # the dataset's blob entries are still there + assert len(root_client.api.services.blob_storage.get_all()) == 2 + + # delete the dataset + del_res = root_client.api.services.dataset.delete( + uid=dataset.id, delete_assets=True + ) + assert isinstance(del_res, SyftSuccess) + assert asset.data is None + assert isinstance(asset.mock, SyftError) + assert len(root_client.api.services.blob_storage.get_all()) == 0 + assert len(root_client.api.services.dataset.get_all()) == 0 diff --git a/packages/syft/tests/syft/service/jobs/job_stash_test.py b/packages/syft/tests/syft/service/jobs/job_stash_test.py index 2c026fbed76..fc730a78486 100644 --- a/packages/syft/tests/syft/service/jobs/job_stash_test.py +++ b/packages/syft/tests/syft/service/jobs/job_stash_test.py @@ -32,7 +32,7 @@ def test_eta_string(current_iter, n_iters, status, creation_time_delta, expected): job = Job( id=UID(), - node_uid=UID(), + server_uid=UID(), n_iters=n_iters, current_iter=current_iter, creation_time=(datetime.now(tz=timezone.utc) - creation_time_delta).isoformat(), diff --git a/packages/syft/tests/syft/service/sync/sync_resolve_single_test.py b/packages/syft/tests/syft/service/sync/sync_resolve_single_test.py index 83fba7fd168..8aa8103ef0f 100644 --- a/packages/syft/tests/syft/service/sync/sync_resolve_single_test.py +++ b/packages/syft/tests/syft/service/sync/sync_resolve_single_test.py @@ -6,10 +6,11 @@ # syft absolute import syft import syft as sy -from syft.client.domain_client import DomainClient +from syft.client.datasite_client import DatasiteClient from syft.client.sync_decision import SyncDecision from syft.client.syncing import compare_clients from syft.client.syncing import resolve +from syft.server.worker import Worker from syft.service.job.job_stash import Job from syft.service.request.request import RequestStatus from syft.service.response import SyftError @@ -34,8 +35,8 @@ def handle_decision( def compare_and_resolve( *, - from_client: DomainClient, - to_client: DomainClient, + from_client: DatasiteClient, + to_client: DatasiteClient, decision: SyncDecision = SyncDecision.LOW, decision_callback: callable = None, share_private_data: bool = True, @@ -90,7 +91,7 @@ def compute() -> int: return 42 -def get_ds_client(client: DomainClient) -> DomainClient: +def get_ds_client(client: DatasiteClient) -> DatasiteClient: client.register( name="a", email="a@a.com", @@ -101,9 +102,9 @@ def get_ds_client(client: DomainClient) -> DomainClient: def test_diff_state(low_worker, high_worker): - low_client: DomainClient = low_worker.root_client + low_client: DatasiteClient = low_worker.root_client client_low_ds = get_ds_client(low_client) - high_client: DomainClient = high_worker.root_client + high_client: DatasiteClient = high_worker.root_client @sy.syft_function_single_use() def compute() -> int: @@ -132,16 +133,15 @@ def compute() -> int: client_low_ds.refresh() res = client_low_ds.code.compute(blocking=True) - assert res == compute(syft_no_node=True) + assert res == compute(syft_no_server=True) -def test_diff_state_with_dataset(low_worker, high_worker): - low_client: DomainClient = low_worker.root_client +def test_diff_state_with_dataset(low_worker: Worker, high_worker: Worker): + low_client: DatasiteClient = low_worker.root_client client_low_ds = get_ds_client(low_client) - high_client: DomainClient = high_worker.root_client + high_client: DatasiteClient = high_worker.root_client _ = create_dataset(high_client) - _ = create_dataset(low_client) @sy.syft_function_single_use() def compute_mean(data) -> int: @@ -150,7 +150,9 @@ def compute_mean(data) -> int: _ = client_low_ds.code.request_code_execution(compute_mean) result = client_low_ds.code.compute_mean(blocking=False) - assert isinstance(result, SyftError), "DS cannot start a job on low side" + assert isinstance( + result, SyftError + ), "DS cannot start a job on low side since the data was not passed as an argument" diff_state_before, diff_state_after = compare_and_resolve( from_client=low_client, to_client=high_client @@ -162,8 +164,14 @@ def compute_mean(data) -> int: # run_and_deposit_result(high_client) data_high = high_client.datasets[0].assets[0] - result = high_client.code.compute_mean(data=data_high, blocking=True) - high_client.requests[0].deposit_result(result) + mean_result = high_client.code.compute_mean(data=data_high, blocking=True) + high_client.requests[0].deposit_result(mean_result) + + # the high side client delete the dataset after depositing the result + dataset_del_res = high_client.api.services.dataset.delete( + uid=high_client.datasets[0].id + ) + assert isinstance(dataset_del_res, SyftSuccess) diff_state_before, diff_state_after = compare_and_resolve( from_client=high_client, to_client=low_client @@ -181,19 +189,15 @@ def compute_mean(data) -> int: res_blocking = client_low_ds.code.compute_mean(blocking=True) res_non_blocking = client_low_ds.code.compute_mean(blocking=False).wait() - # expected_result = compute_mean(syft_no_node=True, data=) - assert ( - res_blocking - == res_non_blocking - == high_client.datasets[0].assets[0].data.mean() - ) + # expected_result = compute_mean(syft_no_server=True, data=) + assert res_blocking == res_non_blocking == mean_result def test_sync_with_error(low_worker, high_worker): """Check syncing with an error in a syft function""" - low_client: DomainClient = low_worker.root_client + low_client: DatasiteClient = low_worker.root_client client_low_ds = get_ds_client(low_client) - high_client: DomainClient = high_worker.root_client + high_client: DatasiteClient = high_worker.root_client @sy.syft_function_single_use() def compute() -> int: @@ -224,9 +228,9 @@ def compute() -> int: def test_ignore_unignore_single(low_worker, high_worker): - low_client: DomainClient = low_worker.root_client + low_client: DatasiteClient = low_worker.root_client client_low_ds = get_ds_client(low_client) - high_client: DomainClient = high_worker.root_client + high_client: DatasiteClient = high_worker.root_client @sy.syft_function_single_use() def compute() -> int: diff --git a/packages/syft/tests/syft/service_permission_test.py b/packages/syft/tests/syft/service_permission_test.py index edb66dd9f96..a0476d1ff9d 100644 --- a/packages/syft/tests/syft/service_permission_test.py +++ b/packages/syft/tests/syft/service_permission_test.py @@ -19,36 +19,36 @@ def guest_mock_user(root_verify_key, user_stash, guest_user): def test_call_service_syftapi_with_permission(worker, guest_mock_user, update_user): user_id = guest_mock_user.id - res = worker.root_client.api.services.user.update(user_id, update_user) + res = worker.root_client.api.services.user.update(uid=user_id, **update_user) assert res # this throws an AttributeError, maybe we want something more clear? -def test_call_service_syftapi_no_permission(guest_domain_client): +def test_call_service_syftapi_no_permission(guest_datasite_client): with pytest.raises(AttributeError): - guest_domain_client.api.services.user.get_all() + guest_datasite_client.api.services.user.get_all() def test_directly_call_service_with_permission(worker, guest_mock_user, update_user): - root_domain_client = worker.root_client + root_datasite_client = worker.root_client user_id = guest_mock_user.id api_call = SyftAPICall( - node_uid=root_domain_client.id, + server_uid=root_datasite_client.id, path="user.update", - args=[user_id, update_user], - kwargs={}, + args=[], + kwargs={"uid": user_id, **update_user}, ) - signed_call = api_call.sign(root_domain_client.api.signing_key) - signed_result = root_domain_client.api.connection.make_call(signed_call) + signed_call = api_call.sign(root_datasite_client.api.signing_key) + signed_result = root_datasite_client.api.connection.make_call(signed_call) result = signed_result.message.data assert result -def test_directly_call_service_no_permission(guest_domain_client): +def test_directly_call_service_no_permission(guest_datasite_client): api_call = SyftAPICall( - node_uid=guest_domain_client.id, path="user.get_all", args=[], kwargs={} + server_uid=guest_datasite_client.id, path="user.get_all", args=[], kwargs={} ) - signed_call = api_call.sign(guest_domain_client.api.signing_key) - signed_result = guest_domain_client.api.connection.make_call(signed_call) + signed_call = api_call.sign(guest_datasite_client.api.signing_key) + signed_result = guest_datasite_client.api.connection.make_call(signed_call) result = signed_result.message.data assert isinstance(result, SyftError) diff --git a/packages/syft/tests/syft/settings/fixtures.py b/packages/syft/tests/syft/settings/fixtures.py index 65cbf18deca..9cdfae9baaa 100644 --- a/packages/syft/tests/syft/settings/fixtures.py +++ b/packages/syft/tests/syft/settings/fixtures.py @@ -6,12 +6,12 @@ # syft absolute from syft.__init__ import __version__ -from syft.abstract_node import NodeSideType -from syft.abstract_node import NodeType -from syft.node.credentials import SyftSigningKey -from syft.service.metadata.node_metadata import NodeMetadataJSON -from syft.service.settings.settings import NodeSettings -from syft.service.settings.settings import NodeSettingsUpdate +from syft.abstract_server import ServerSideType +from syft.abstract_server import ServerType +from syft.server.credentials import SyftSigningKey +from syft.service.metadata.server_metadata import ServerMetadataJSON +from syft.service.settings.settings import ServerSettings +from syft.service.settings.settings import ServerSettingsUpdate from syft.service.settings.settings_service import SettingsService from syft.service.settings.settings_stash import SettingsStash from syft.types.syft_object import HIGHEST_SYFT_OBJECT_VERSION @@ -25,8 +25,8 @@ def settings_stash(document_store) -> SettingsStash: @pytest.fixture -def settings(worker, faker) -> NodeSettings: - yield NodeSettings( +def settings(worker, faker) -> ServerSettings: + yield ServerSettings( id=UID(), name=worker.name, organization=faker.text(), @@ -35,18 +35,18 @@ def settings(worker, faker) -> NodeSettings: deployed_on=datetime.now().date().strftime("%m/%d/%Y"), signup_enabled=False, admin_email="info@openmined.org", - node_side_type=NodeSideType.LOW_SIDE, + server_side_type=ServerSideType.LOW_SIDE, show_warnings=False, verify_key=SyftSigningKey.generate().verify_key, - node_type=NodeType.DOMAIN, + server_type=ServerType.DATASITE, association_request_auto_approval=False, default_worker_pool="default-pool", ) @pytest.fixture -def update_settings(faker) -> NodeSettingsUpdate: - yield NodeSettingsUpdate( +def update_settings(faker) -> ServerSettingsUpdate: + yield ServerSettingsUpdate( name=faker.name(), description=faker.text(), on_board=faker.boolean(), @@ -54,8 +54,8 @@ def update_settings(faker) -> NodeSettingsUpdate: @pytest.fixture -def metadata_json(faker) -> NodeMetadataJSON: - yield NodeMetadataJSON( +def metadata_json(faker) -> ServerMetadataJSON: + yield ServerMetadataJSON( metadata_version=faker.random_int(), name=faker.name(), id=faker.text(), @@ -63,9 +63,9 @@ def metadata_json(faker) -> NodeMetadataJSON: highest_object_version=HIGHEST_SYFT_OBJECT_VERSION, lowest_object_version=LOWEST_SYFT_OBJECT_VERSION, syft_version=__version__, - node_side_type=NodeSideType.LOW_SIDE.value, + server_side_type=ServerSideType.LOW_SIDE.value, show_warnings=False, - node_type=NodeType.DOMAIN.value, + server_type=ServerType.DATASITE.value, min_size_blob_storage_mb=16, ) diff --git a/packages/syft/tests/syft/settings/metadata_test.py b/packages/syft/tests/syft/settings/metadata_test.py index 9d28d275a27..6dded7e48eb 100644 --- a/packages/syft/tests/syft/settings/metadata_test.py +++ b/packages/syft/tests/syft/settings/metadata_test.py @@ -3,7 +3,7 @@ # syft absolute from syft.__init__ import __version__ -from syft.service.metadata.node_metadata import check_version +from syft.service.metadata.server_metadata import check_version def test_check_base_version_success() -> None: diff --git a/packages/syft/tests/syft/settings/settings_serde_test.py b/packages/syft/tests/syft/settings/settings_serde_test.py index 1a41c927fbc..34a4d22c198 100644 --- a/packages/syft/tests/syft/settings/settings_serde_test.py +++ b/packages/syft/tests/syft/settings/settings_serde_test.py @@ -17,7 +17,7 @@ "metadata_json", ], ) -def test_node_settings_serde(obj: Any, request: FixtureRequest) -> None: +def test_server_settings_serde(obj: Any, request: FixtureRequest) -> None: requested_obj = request.getfixturevalue(obj) ser_data = sy.serialize(requested_obj, to_bytes=True) assert isinstance(ser_data, bytes) diff --git a/packages/syft/tests/syft/settings/settings_service_test.py b/packages/syft/tests/syft/settings/settings_service_test.py index 56bf414f373..286c92ba66f 100644 --- a/packages/syft/tests/syft/settings/settings_service_test.py +++ b/packages/syft/tests/syft/settings/settings_service_test.py @@ -11,14 +11,14 @@ # syft absolute import syft -from syft.abstract_node import NodeSideType -from syft.node.credentials import SyftSigningKey -from syft.node.credentials import SyftVerifyKey +from syft.abstract_server import ServerSideType +from syft.server.credentials import SyftSigningKey +from syft.server.credentials import SyftVerifyKey from syft.service.context import AuthedServiceContext from syft.service.response import SyftError from syft.service.response import SyftSuccess -from syft.service.settings.settings import NodeSettings -from syft.service.settings.settings import NodeSettingsUpdate +from syft.service.settings.settings import ServerSettings +from syft.service.settings.settings import ServerSettingsUpdate from syft.service.settings.settings_service import SettingsService from syft.service.settings.settings_stash import SettingsStash from syft.service.user.user import UserCreate @@ -28,7 +28,7 @@ def test_settingsservice_get_success( monkeypatch: MonkeyPatch, settings_service: SettingsService, - settings: NodeSettings, + settings: ServerSettings, authed_context: AuthedServiceContext, ) -> None: mock_stash_get_all_output = [settings, settings] @@ -41,7 +41,7 @@ def mock_stash_get_all(credentials) -> Ok: response = settings_service.get(context=authed_context) - assert isinstance(response.ok(), NodeSettings) + assert isinstance(response.ok(), ServerSettings) assert response == expected_output @@ -75,20 +75,20 @@ def mock_stash_get_all_error(credentials) -> Err: def test_settingsservice_set_success( settings_service: SettingsService, - settings: NodeSettings, + settings: ServerSettings, authed_context: AuthedServiceContext, ) -> None: response = settings_service.set(authed_context, settings) assert response.is_ok() is True - assert isinstance(response.ok(), NodeSettings) + assert isinstance(response.ok(), ServerSettings) assert response.ok() == settings def test_settingsservice_set_fail( monkeypatch: MonkeyPatch, settings_service: SettingsService, - settings: NodeSettings, + settings: ServerSettings, authed_context: AuthedServiceContext, ) -> None: mock_error_message = "database failure" @@ -107,8 +107,8 @@ def mock_stash_set_error(credentials, a) -> Err: def add_mock_settings( root_verify_key: SyftVerifyKey, settings_stash: SettingsStash, - settings: NodeSettings, -) -> NodeSettings: + settings: ServerSettings, +) -> ServerSettings: # create a mock settings in the stash so that we can update it result = settings_stash.partition.set(root_verify_key, settings) assert result.is_ok() @@ -124,8 +124,8 @@ def test_settingsservice_update_success( monkeypatch: MonkeyPatch, settings_stash: SettingsStash, settings_service: SettingsService, - settings: NodeSettings, - update_settings: NodeSettingsUpdate, + settings: ServerSettings, + update_settings: ServerSettingsUpdate, authed_context: AuthedServiceContext, ) -> None: # add a mock settings to the stash @@ -164,7 +164,7 @@ def mock_stash_get_all(root_verify_key) -> Ok: def test_settingsservice_update_stash_get_all_fail( monkeypatch: MonkeyPatch, settings_service: SettingsService, - update_settings: NodeSettingsUpdate, + update_settings: ServerSettingsUpdate, authed_context: AuthedServiceContext, ) -> None: # the stash.get_all() function fails @@ -182,7 +182,7 @@ def mock_stash_get_all_error(credentials) -> Err: def test_settingsservice_update_stash_empty( settings_service: SettingsService, - update_settings: NodeSettingsUpdate, + update_settings: ServerSettingsUpdate, authed_context: AuthedServiceContext, ) -> None: response = settings_service.update(context=authed_context, settings=update_settings) @@ -193,9 +193,9 @@ def test_settingsservice_update_stash_empty( def test_settingsservice_update_fail( monkeypatch: MonkeyPatch, - settings: NodeSettings, + settings: ServerSettings, settings_service: SettingsService, - update_settings: NodeSettingsUpdate, + update_settings: ServerSettingsUpdate, authed_context: AuthedServiceContext, ) -> None: # the stash has a settings but we could not update it (the stash.update() function fails) @@ -207,9 +207,9 @@ def mock_stash_get_all(credentials) -> Ok: monkeypatch.setattr(settings_service.stash, "get_all", mock_stash_get_all) - mock_update_error_message = "Failed to update obj NodeMetadata" + mock_update_error_message = "Failed to update obj ServerMetadata" - def mock_stash_update_error(credentials, update_settings: NodeSettings) -> Err: + def mock_stash_update_error(credentials, update_settings: ServerSettings) -> Err: return Err(mock_update_error_message) monkeypatch.setattr(settings_service.stash, "update", mock_stash_update_error) @@ -226,7 +226,7 @@ def test_settings_allow_guest_registration( # Create a new worker verify_key = SyftSigningKey.generate().verify_key - mock_node_settings = NodeSettings( + mock_server_settings = ServerSettings( name=faker.name(), verify_key=verify_key, highest_version=1, @@ -234,7 +234,7 @@ def test_settings_allow_guest_registration( syft_version=syft.__version__, signup_enabled=False, admin_email="info@openmined.org", - node_side_type=NodeSideType.LOW_SIDE, + server_side_type=ServerSideType.LOW_SIDE, show_warnings=False, deployed_on=datetime.now().date().strftime("%m/%d/%Y"), association_request_auto_approval=False, @@ -243,22 +243,22 @@ def test_settings_allow_guest_registration( with mock.patch( "syft.Worker.settings", new_callable=mock.PropertyMock, - return_value=mock_node_settings, + return_value=mock_server_settings, ): worker = syft.Worker.named(name=faker.name(), reset=True) - guest_domain_client = worker.guest_client - root_domain_client = worker.root_client + guest_datasite_client = worker.guest_client + root_datasite_client = worker.root_client email1 = faker.email() email2 = faker.email() - response_1 = root_domain_client.register( + response_1 = root_datasite_client.register( email=email1, password="joker123", password_verify="joker123", name="Joker" ) assert isinstance(response_1, SyftSuccess) # by default, the guest client can't register new user - response_2 = guest_domain_client.register( + response_2 = guest_datasite_client.register( email=email2, password="harley123", password_verify="harley123", @@ -266,21 +266,21 @@ def test_settings_allow_guest_registration( ) assert isinstance(response_2, SyftError) - assert any(user.email == email1 for user in root_domain_client.users) + assert any(user.email == email1 for user in root_datasite_client.users) # only after the root client enable other users to signup, they can - mock_node_settings.signup_enabled = True + mock_server_settings.signup_enabled = True with mock.patch( "syft.Worker.settings", new_callable=mock.PropertyMock, - return_value=mock_node_settings, + return_value=mock_server_settings, ): worker = syft.Worker.named(name=faker.name(), reset=True) - guest_domain_client = worker.guest_client - root_domain_client = worker.root_client + guest_datasite_client = worker.guest_client + root_datasite_client = worker.root_client password = faker.email() - response_3 = guest_domain_client.register( + response_3 = guest_datasite_client.register( email=email2, password=password, password_verify=password, @@ -288,7 +288,7 @@ def test_settings_allow_guest_registration( ) assert isinstance(response_3, SyftSuccess) - assert any(user.email == email2 for user in root_domain_client.users) + assert any(user.email == email2 for user in root_datasite_client.users) def test_user_register_for_role(monkeypatch: MonkeyPatch, faker: Faker): @@ -302,7 +302,7 @@ def get_mock_client(faker, root_client, role): password="password", password_verify="password", ) - result = root_client.users.create(user_create=user_create) + result = root_client.users.create(**user_create) assert not isinstance(result, SyftError) guest_client = root_client.guest() @@ -311,7 +311,7 @@ def get_mock_client(faker, root_client, role): ) verify_key = SyftSigningKey.generate().verify_key - mock_node_settings = NodeSettings( + mock_server_settings = ServerSettings( name=faker.name(), verify_key=verify_key, highest_version=1, @@ -319,7 +319,7 @@ def get_mock_client(faker, root_client, role): syft_version=syft.__version__, signup_enabled=False, admin_email="info@openmined.org", - node_side_type=NodeSideType.LOW_SIDE, + server_side_type=ServerSideType.LOW_SIDE, show_warnings=False, deployed_on=datetime.now().date().strftime("%m/%d/%Y"), association_request_auto_approval=False, @@ -328,7 +328,7 @@ def get_mock_client(faker, root_client, role): with mock.patch( "syft.Worker.settings", new_callable=mock.PropertyMock, - return_value=mock_node_settings, + return_value=mock_server_settings, ): worker = syft.Worker.named(name=faker.name(), reset=True) root_client = worker.root_client diff --git a/packages/syft/tests/syft/settings/settings_stash_test.py b/packages/syft/tests/syft/settings/settings_stash_test.py index 68d9ea73f42..3fbbd28f9e9 100644 --- a/packages/syft/tests/syft/settings/settings_stash_test.py +++ b/packages/syft/tests/syft/settings/settings_stash_test.py @@ -1,14 +1,14 @@ # third party # syft absolute -from syft.service.settings.settings import NodeSettings -from syft.service.settings.settings import NodeSettingsUpdate +from syft.service.settings.settings import ServerSettings +from syft.service.settings.settings import ServerSettingsUpdate from syft.service.settings.settings_stash import SettingsStash def add_mock_settings( - root_verify_key, settings_stash: SettingsStash, settings: NodeSettings -) -> NodeSettings: + root_verify_key, settings_stash: SettingsStash, settings: ServerSettings +) -> ServerSettings: # prepare: add mock settings result = settings_stash.partition.set(root_verify_key, settings) assert result.is_ok() @@ -20,13 +20,13 @@ def add_mock_settings( def test_settingsstash_set( - root_verify_key, settings_stash: SettingsStash, settings: NodeSettings + root_verify_key, settings_stash: SettingsStash, settings: ServerSettings ) -> None: result = settings_stash.set(root_verify_key, settings) assert result.is_ok() created_settings = result.ok() - assert isinstance(created_settings, NodeSettings) + assert isinstance(created_settings, ServerSettings) assert created_settings == settings assert settings.id in settings_stash.partition.data @@ -34,8 +34,8 @@ def test_settingsstash_set( def test_settingsstash_update( root_verify_key, settings_stash: SettingsStash, - settings: NodeSettings, - update_settings: NodeSettingsUpdate, + settings: ServerSettings, + update_settings: ServerSettingsUpdate, ) -> None: # prepare: add a mock settings mock_settings = add_mock_settings(root_verify_key, settings_stash, settings) @@ -50,5 +50,5 @@ def test_settingsstash_update( assert result.is_ok() updated_settings = result.ok() - assert isinstance(updated_settings, NodeSettings) + assert isinstance(updated_settings, ServerSettings) assert mock_settings == updated_settings diff --git a/packages/syft/tests/syft/stores/action_store_test.py b/packages/syft/tests/syft/stores/action_store_test.py index 04d693a618a..38c5f66b304 100644 --- a/packages/syft/tests/syft/stores/action_store_test.py +++ b/packages/syft/tests/syft/stores/action_store_test.py @@ -6,7 +6,7 @@ import pytest # syft absolute -from syft.node.credentials import SyftVerifyKey +from syft.server.credentials import SyftVerifyKey from syft.service.action.action_store import ActionObjectEXECUTE from syft.service.action.action_store import ActionObjectOWNER from syft.service.action.action_store import ActionObjectREAD diff --git a/packages/syft/tests/syft/stores/base_stash_test.py b/packages/syft/tests/syft/stores/base_stash_test.py index 2dd5e443080..ba4975305a1 100644 --- a/packages/syft/tests/syft/stores/base_stash_test.py +++ b/packages/syft/tests/syft/stores/base_stash_test.py @@ -27,6 +27,7 @@ @serializable() class MockObject(SyftObject): __canonical_name__ = "base_stash_mock_object_type" + __version__ = 1 id: UID name: str desc: str diff --git a/packages/syft/tests/syft/stores/kv_document_store_test.py b/packages/syft/tests/syft/stores/kv_document_store_test.py index 3caacf3eb4c..7326beaf479 100644 --- a/packages/syft/tests/syft/stores/kv_document_store_test.py +++ b/packages/syft/tests/syft/stores/kv_document_store_test.py @@ -22,7 +22,7 @@ def kv_store_partition(worker): store_config = MockStoreConfig() settings = PartitionSettings(name="test", object_type=MockObjectType) store = KeyValueStorePartition( - node_uid=worker.id, + server_uid=worker.id, root_verify_key=worker.root_client.credentials.verify_key, settings=settings, store_config=store_config, diff --git a/packages/syft/tests/syft/stores/mongo_document_store_test.py b/packages/syft/tests/syft/stores/mongo_document_store_test.py index d9d5ef578a3..ed796d9a8df 100644 --- a/packages/syft/tests/syft/stores/mongo_document_store_test.py +++ b/packages/syft/tests/syft/stores/mongo_document_store_test.py @@ -7,7 +7,7 @@ from result import Err # syft absolute -from syft.node.credentials import SyftVerifyKey +from syft.server.credentials import SyftVerifyKey from syft.service.action.action_permissions import ActionObjectPermission from syft.service.action.action_permissions import ActionPermission from syft.service.action.action_permissions import StoragePermission @@ -709,7 +709,7 @@ def test_mongo_store_partition_add_remove_storage_permission( storage_permission = StoragePermission( uid=obj.id, - node_uid=UID(), + server_uid=UID(), ) assert not mongo_store_partition.has_storage_permission(storage_permission) mongo_store_partition.add_storage_permission(storage_permission) @@ -720,14 +720,14 @@ def test_mongo_store_partition_add_remove_storage_permission( obj2 = MockSyftObject(data=1) mongo_store_partition.set(root_verify_key, obj2, add_storage_permission=False) storage_permission3 = StoragePermission( - uid=obj2.id, node_uid=mongo_store_partition.node_uid + uid=obj2.id, server_uid=mongo_store_partition.server_uid ) assert not mongo_store_partition.has_storage_permission(storage_permission3) obj3 = MockSyftObject(data=1) mongo_store_partition.set(root_verify_key, obj3, add_storage_permission=True) storage_permission4 = StoragePermission( - uid=obj3.id, node_uid=mongo_store_partition.node_uid + uid=obj3.id, server_uid=mongo_store_partition.server_uid ) assert mongo_store_partition.has_storage_permission(storage_permission4) diff --git a/packages/syft/tests/syft/stores/queue_stash_test.py b/packages/syft/tests/syft/stores/queue_stash_test.py index a89baaf0ea8..d4a65962373 100644 --- a/packages/syft/tests/syft/stores/queue_stash_test.py +++ b/packages/syft/tests/syft/stores/queue_stash_test.py @@ -28,12 +28,12 @@ def mock_queue_object(): ) linked_worker_pool = LinkedObject.from_obj( worker_pool_obj, - node_uid=UID(), + server_uid=UID(), service_type=SyftWorkerPoolService, ) obj = QueueItem( id=UID(), - node_uid=UID(), + server_uid=UID(), method="dummy_method", service="dummy_service", args=[], diff --git a/packages/syft/tests/syft/stores/store_constants_test.py b/packages/syft/tests/syft/stores/store_constants_test.py index 4de8f7df45a..e82fd839259 100644 --- a/packages/syft/tests/syft/stores/store_constants_test.py +++ b/packages/syft/tests/syft/stores/store_constants_test.py @@ -1,5 +1,5 @@ # syft absolute -from syft.node.credentials import SyftSigningKey +from syft.server.credentials import SyftSigningKey TEST_VERIFY_KEY_STRING_ROOT = ( "08e5bcddfd55cdff0f7f6a62d63a43585734c6e7a17b2ffb3f3efe322c3cecc5" diff --git a/packages/syft/tests/syft/stores/store_fixtures_test.py b/packages/syft/tests/syft/stores/store_fixtures_test.py index a1a9cc0a224..eb4aabd3cf4 100644 --- a/packages/syft/tests/syft/stores/store_fixtures_test.py +++ b/packages/syft/tests/syft/stores/store_fixtures_test.py @@ -10,7 +10,7 @@ import pytest # syft absolute -from syft.node.credentials import SyftVerifyKey +from syft.server.credentials import SyftVerifyKey from syft.service.action.action_permissions import ActionObjectPermission from syft.service.action.action_permissions import ActionPermission from syft.service.action.action_store import DictActionStore @@ -63,9 +63,11 @@ def str_to_locking_config(conf: str) -> LockingConfig: def document_store_with_admin( - node_uid: UID, verify_key: SyftVerifyKey + server_uid: UID, verify_key: SyftVerifyKey ) -> DocumentStore: - document_store = DictDocumentStore(node_uid=node_uid, root_verify_key=verify_key) + document_store = DictDocumentStore( + server_uid=server_uid, root_verify_key=verify_key + ) password = uuid.uuid4().hex @@ -214,11 +216,11 @@ def sqlite_action_store(sqlite_workspace: tuple[Path, str], request): ver_key = SyftVerifyKey.from_string(TEST_VERIFY_KEY_STRING_ROOT) - node_uid = UID() - document_store = document_store_with_admin(node_uid, ver_key) + server_uid = UID() + document_store = document_store_with_admin(server_uid, ver_key) yield SQLiteActionStore( - node_uid=node_uid, + server_uid=server_uid, store_config=store_config, root_verify_key=ver_key, document_store=document_store, @@ -325,10 +327,10 @@ def mongo_action_store(mongo_client, request): client_config=mongo_config, db_name=mongo_db_name, locking_config=locking_config ) ver_key = SyftVerifyKey.from_string(TEST_VERIFY_KEY_STRING_ROOT) - node_uid = UID() - document_store = document_store_with_admin(node_uid, ver_key) + server_uid = UID() + document_store = document_store_with_admin(server_uid, ver_key) mongo_action_store = MongoActionStore( - node_uid=node_uid, + server_uid=server_uid, store_config=store_config, root_verify_key=ver_key, document_store=document_store, @@ -365,11 +367,11 @@ def dict_action_store(request): store_config = DictStoreConfig(locking_config=locking_config) ver_key = SyftVerifyKey.from_string(TEST_VERIFY_KEY_STRING_ROOT) - node_uid = UID() - document_store = document_store_with_admin(node_uid, ver_key) + server_uid = UID() + document_store = document_store_with_admin(server_uid, ver_key) yield DictActionStore( - node_uid=node_uid, + server_uid=server_uid, store_config=store_config, root_verify_key=ver_key, document_store=document_store, diff --git a/packages/syft/tests/syft/stores/store_mocks_test.py b/packages/syft/tests/syft/stores/store_mocks_test.py index 9ab6a71cb57..8dc8f4e43bc 100644 --- a/packages/syft/tests/syft/stores/store_mocks_test.py +++ b/packages/syft/tests/syft/stores/store_mocks_test.py @@ -12,7 +12,10 @@ from syft.types.uid import UID -@serializable() +@serializable( + canonical_name="MockKeyValueBackingStore", + version=1, +) class MockKeyValueBackingStore(dict, KeyValueBackingStore): def __init__( self, diff --git a/packages/syft/tests/syft/transforms/transform_methods_test.py b/packages/syft/tests/syft/transforms/transform_methods_test.py index 6cd3e9a750e..93a521e297f 100644 --- a/packages/syft/tests/syft/transforms/transform_methods_test.py +++ b/packages/syft/tests/syft/transforms/transform_methods_test.py @@ -12,7 +12,7 @@ from syft.types.transforms import NotNone from syft.types.transforms import TransformContext from syft.types.transforms import add_credentials_for_key -from syft.types.transforms import add_node_uid_for_key +from syft.types.transforms import add_server_uid_for_key from syft.types.transforms import drop from syft.types.transforms import generate_id from syft.types.transforms import geteitherattr @@ -28,7 +28,7 @@ "syft_obj, context", [ ("admin_user", "authed_context"), - ("guest_user", "node_context"), + ("guest_user", "server_context"), ], ) def test_transformcontext(syft_obj, context, request): @@ -45,12 +45,12 @@ def test_transformcontext(syft_obj, context, request): if hasattr(context, "credentials"): assert transform_context.credentials == context.credentials - if hasattr(context, "node"): - assert transform_context.node == context.node + if hasattr(context, "server"): + assert transform_context.server == context.server - node_context = transform_context.to_node_context() + server_context = transform_context.to_server_context() - assert node_context == context + assert server_context == context @pytest.mark.parametrize( @@ -91,7 +91,7 @@ class MockObject: ("no_key", "no_value"), ], ) -def test_make_set_default(faker, key, value, node_context): +def test_make_set_default(faker, key, value, server_context): result = make_set_default(key, value) assert isinstance(result, FunctionType) assert isinstance(result, Callable) @@ -106,7 +106,7 @@ def __iter__(self): mock_obj = MockObject(obj_key=faker.name()) transform_context = TransformContext.from_context( - obj=mock_obj, context=node_context + obj=mock_obj, context=server_context ) resultant_context = result(transform_context) @@ -123,7 +123,7 @@ def __iter__(self): assert resultant_context.output[key] == mock_obj.obj_key -def test_drop(faker, node_context): +def test_drop(faker, server_context): @dataclass class MockObject: name: str @@ -146,7 +146,7 @@ def __iter__(self): assert isinstance(result, Callable) transform_context = TransformContext.from_context( - obj=mock_obj, context=node_context + obj=mock_obj, context=server_context ) expected_output = dict(mock_obj).copy() @@ -164,7 +164,7 @@ def __iter__(self): assert resultant_context.output == expected_output -def test_keep(faker, node_context): +def test_keep(faker, server_context): @dataclass class MockObject: name: str @@ -187,7 +187,7 @@ def __iter__(self): assert isinstance(result, Callable) transform_context = TransformContext.from_context( - obj=mock_obj, context=node_context + obj=mock_obj, context=server_context ) mock_obj_dict = dict(mock_obj) @@ -206,7 +206,7 @@ def __iter__(self): assert resultant_context.output == expected_output -def test_rename(faker, node_context): +def test_rename(faker, server_context): @dataclass class MockObject: name: str @@ -230,7 +230,7 @@ def __iter__(self): assert isinstance(result, Callable) transform_context = TransformContext.from_context( - obj=mock_obj, context=node_context + obj=mock_obj, context=server_context ) mock_obj_dict = dict(mock_obj) @@ -245,7 +245,7 @@ def __iter__(self): assert resultant_context.output == expected_output -def test_generate_id(faker, node_context): +def test_generate_id(faker, server_context): @dataclass class MockObject: name: str @@ -272,7 +272,7 @@ def __iter__(self): ) transform_context = TransformContext.from_context( - obj=mock_obj, context=node_context + obj=mock_obj, context=server_context ) result = generate_id(context=transform_context) @@ -288,7 +288,7 @@ def __iter__(self): ) transform_context = TransformContext.from_context( - obj=mock_obj, context=node_context + obj=mock_obj, context=server_context ) result = generate_id(context=transform_context) @@ -305,7 +305,7 @@ def __iter__(self): ) transform_context = TransformContext.from_context( - obj=mock_obj, context=node_context + obj=mock_obj, context=server_context ) result = generate_id(context=transform_context) @@ -340,7 +340,7 @@ def __iter__(self): assert result.output[key] == authed_context.credentials -def test_add_node_uid_for_key(faker, node_context): +def test_add_server_uid_for_key(faker, server_context): @dataclass class MockObject: name: str @@ -353,20 +353,20 @@ def __iter__(self): ) transform_context = TransformContext.from_context( - obj=mock_obj, context=node_context + obj=mock_obj, context=server_context ) key = "random_uid_key" - result_func = add_node_uid_for_key(key=key) + result_func = add_server_uid_for_key(key=key) assert isinstance(result_func, FunctionType) result = result_func(context=transform_context) assert isinstance(result, TransformContext) assert key in result.output - assert result.output[key] == node_context.node.id + assert result.output[key] == server_context.server.id -def test_validate_url(faker, node_context): +def test_validate_url(faker, server_context): @dataclass class MockObject: url: str | None @@ -377,7 +377,7 @@ def __iter__(self): mock_obj = MockObject(url=None) transform_context = TransformContext.from_context( - obj=mock_obj, context=node_context + obj=mock_obj, context=server_context ) # no change in context if url is None @@ -390,7 +390,7 @@ def __iter__(self): mock_obj = MockObject(url=url_with_port) transform_context = TransformContext.from_context( - obj=mock_obj, context=node_context + obj=mock_obj, context=server_context ) result = validate_url(transform_context) @@ -398,7 +398,7 @@ def __iter__(self): assert result.output["url"] == url -def test_validate_email(faker, node_context): +def test_validate_email(faker, server_context): @dataclass class MockObject: email: str @@ -408,7 +408,7 @@ def __iter__(self): mock_obj = MockObject(email=None) transform_context = TransformContext.from_context( - obj=mock_obj, context=node_context + obj=mock_obj, context=server_context ) result = validate_email(transform_context) assert isinstance(result, TransformContext) @@ -416,7 +416,7 @@ def __iter__(self): mock_obj = MockObject(email=faker.email()) transform_context = TransformContext.from_context( - obj=mock_obj, context=node_context + obj=mock_obj, context=server_context ) result = validate_email(transform_context) assert isinstance(result, TransformContext) @@ -425,7 +425,7 @@ def __iter__(self): mock_obj = MockObject(email=faker.name()) transform_context = TransformContext.from_context( - obj=mock_obj, context=node_context + obj=mock_obj, context=server_context ) with pytest.raises(PydanticCustomError): diff --git a/packages/syft/tests/syft/transforms/transforms_test.py b/packages/syft/tests/syft/transforms/transforms_test.py index 465e876b464..140854d6c1d 100644 --- a/packages/syft/tests/syft/transforms/transforms_test.py +++ b/packages/syft/tests/syft/transforms/transforms_test.py @@ -75,7 +75,7 @@ def test_validate_klass_and_version( assert result == expected_result -def test_generate_transform_wrapper(faker, monkeypatch, node_context): +def test_generate_transform_wrapper(faker, monkeypatch, server_context): mock_value = faker.random_int() def mock_transform_method(context: TransformContext) -> TransformContext: @@ -95,7 +95,7 @@ def mock_transform_method(context: TransformContext) -> TransformContext: output = resultant_wrapper( MockObjectFromSyftBaseObj(), - node_context, + server_context, ) assert isinstance(output, MockObjectToSyftBaseObj) assert output.value == mock_value diff --git a/packages/syft/tests/syft/users/fixtures.py b/packages/syft/tests/syft/users/fixtures.py index 14c671d348e..8f282b6dbdc 100644 --- a/packages/syft/tests/syft/users/fixtures.py +++ b/packages/syft/tests/syft/users/fixtures.py @@ -2,10 +2,10 @@ import pytest # syft absolute -from syft.node.credentials import UserLoginCredentials -from syft.node.worker import Worker +from syft.server.credentials import UserLoginCredentials +from syft.server.worker import Worker from syft.service.context import AuthedServiceContext -from syft.service.context import NodeServiceContext +from syft.service.context import ServerServiceContext from syft.service.context import UnauthedServiceContext from syft.service.user.user import User from syft.service.user.user import UserCreate @@ -118,12 +118,12 @@ def user_service(document_store: DocumentStore): @pytest.fixture def authed_context(admin_user: User, worker: Worker) -> AuthedServiceContext: - yield AuthedServiceContext(credentials=admin_user.verify_key, node=worker) + yield AuthedServiceContext(credentials=admin_user.verify_key, server=worker) @pytest.fixture -def node_context(worker: Worker) -> NodeServiceContext: - yield NodeServiceContext(node=worker) +def server_context(worker: Worker) -> ServerServiceContext: + yield ServerServiceContext(server=worker) @pytest.fixture @@ -133,4 +133,4 @@ def unauthed_context( login_credentials = UserLoginCredentials( email=guest_create_user.email, password=guest_create_user.password ) - yield UnauthedServiceContext(login_credentials=login_credentials, node=worker) + yield UnauthedServiceContext(login_credentials=login_credentials, server=worker) diff --git a/packages/syft/tests/syft/users/local_execution_test.py b/packages/syft/tests/syft/users/local_execution_test.py index bdb706ae945..966bc215a4a 100644 --- a/packages/syft/tests/syft/users/local_execution_test.py +++ b/packages/syft/tests/syft/users/local_execution_test.py @@ -13,7 +13,7 @@ @pytest.mark.skipif(sys.platform == "win32", reason="does not run on windows") def test_local_execution(worker): - root_domain_client = worker.root_client + root_datasite_client = worker.root_client dataset = sy.Dataset( name="local_test", asset_list=[ @@ -24,15 +24,15 @@ def test_local_execution(worker): ) ], ) - root_domain_client.upload_dataset(dataset) - asset = root_domain_client.datasets[0].assets[0] + root_datasite_client.upload_dataset(dataset) + asset = root_datasite_client.datasets[0].assets[0] APIRegistry.__api_registry__ = OrderedDict() APIRegistry.set_api_for( - node_uid=worker.id, - user_verify_key=root_domain_client.verify_key, - api=root_domain_client.api, + server_uid=worker.id, + user_verify_key=root_datasite_client.verify_key, + api=root_datasite_client.api, ) @sy.syft_function( diff --git a/packages/syft/tests/syft/users/user_code_test.py b/packages/syft/tests/syft/users/user_code_test.py index 2b1e206c7a4..32b70f42d37 100644 --- a/packages/syft/tests/syft/users/user_code_test.py +++ b/packages/syft/tests/syft/users/user_code_test.py @@ -9,8 +9,8 @@ # syft absolute import syft as sy -from syft.client.domain_client import DomainClient -from syft.node.worker import Worker +from syft.client.datasite_client import DatasiteClient +from syft.server.worker import Worker from syft.service.action.action_data_empty import ActionDataEmpty from syft.service.action.action_object import ActionObject from syft.service.request.request import Request @@ -18,7 +18,6 @@ from syft.service.response import SyftError from syft.service.response import SyftSuccess from syft.service.user.user import User -from syft.service.user.user import UserUpdate from syft.service.user.user_roles import ServiceRole # relative @@ -41,7 +40,7 @@ def mock_syft_func_2(): return 1 -def test_repr_markdown_not_throwing_error(guest_client: DomainClient) -> None: +def test_repr_markdown_not_throwing_error(guest_client: DatasiteClient) -> None: guest_client.code.submit(mock_syft_func) result = guest_client.code.get_by_service_func_name("mock_syft_func") assert len(result) == 1 @@ -51,7 +50,7 @@ def test_repr_markdown_not_throwing_error(guest_client: DomainClient) -> None: @pytest.mark.parametrize("delete_original_admin", [False, True]) def test_new_admin_can_list_user_code( worker: Worker, - ds_client: DomainClient, + ds_client: DatasiteClient, faker: Faker, delete_original_admin: bool, ) -> None: @@ -68,12 +67,10 @@ def test_new_admin_can_list_user_code( admin = root_client.login(email=email, password=pw) - root_client.api.services.user.update( - admin.me.id, UserUpdate(role=ServiceRole.ADMIN) - ) + root_client.api.services.user.update(uid=admin.account.id, role=ServiceRole.ADMIN) if delete_original_admin: - res = root_client.api.services.user.delete(root_client.me.id) + res = root_client.api.services.user.delete(root_client.account.id) assert not isinstance(res, SyftError) user_code_stash = worker.get_service("usercodeservice").stash @@ -84,25 +81,25 @@ def test_new_admin_can_list_user_code( def test_user_code(worker) -> None: - root_domain_client = worker.root_client - root_domain_client.register( + root_datasite_client = worker.root_client + root_datasite_client.register( name="data-scientist", email="test_user@openmined.org", password="0000", password_verify="0000", ) - guest_client = root_domain_client.login( + guest_client = root_datasite_client.login( email="test_user@openmined.org", password="0000", ) - users = root_domain_client.users.get_all() + users = root_datasite_client.users.get_all() users[-1].allow_mock_execution() guest_client.api.services.code.request_code_execution(mock_syft_func) - root_domain_client = worker.root_client - message = root_domain_client.notifications[-1] + root_datasite_client = worker.root_client + message = root_datasite_client.notifications[-1] request = message.link user_code = request.changes[0].code result = user_code.run() @@ -147,7 +144,7 @@ def test_duplicated_user_code(worker) -> None: # request the a different function name but same content will also succeed # flaky if not blocking - mock_syft_func_2(syft_no_node=True) + mock_syft_func_2(syft_no_server=True) result = ds_client.api.services.code.request_code_execution(mock_syft_func_2) assert isinstance(result, Request) assert len(ds_client.code.get_all()) == 2 @@ -223,8 +220,8 @@ def mock_inner_func(): @sy.syft_function( input_policy=sy.ExactMatch(), output_policy=sy.SingleExecutionExactOutput() ) -def mock_outer_func(domain): - job = domain.launch_job(mock_inner_func) +def mock_outer_func(datasite): + job = datasite.launch_job(mock_inner_func) return job @@ -232,19 +229,21 @@ def test_nested_requests(worker, guest_client: User): guest_client.api.services.code.submit(mock_inner_func) guest_client.api.services.code.request_code_execution(mock_outer_func) - root_domain_client = worker.root_client - request = root_domain_client.requests[-1] + root_datasite_client = worker.root_client + request = root_datasite_client.requests[-1] - root_domain_client.api.services.request.apply(request.id) - request = root_domain_client.requests[-1] + root_datasite_client.api.services.request.apply(request.id) + request = root_datasite_client.requests[-1] - codes = root_domain_client.code + codes = root_datasite_client.code inner = codes[0] if codes[0].service_func_name == "mock_inner_func" else codes[1] outer = codes[0] if codes[0].service_func_name == "mock_outer_func" else codes[1] assert list(request.code.nested_codes.keys()) == ["mock_inner_func"] - (linked_obj, node) = request.code.nested_codes["mock_inner_func"] - assert node == {} - resolved = root_domain_client.api.services.notifications.resolve_object(linked_obj) + (linked_obj, server) = request.code.nested_codes["mock_inner_func"] + assert server == {} + resolved = root_datasite_client.api.services.notifications.resolve_object( + linked_obj + ) assert resolved.id == inner.id assert outer.status.approved assert not inner.status.approved @@ -252,16 +251,16 @@ def test_nested_requests(worker, guest_client: User): def test_user_code_mock_execution(worker) -> None: # Setup - root_domain_client = worker.root_client + root_datasite_client = worker.root_client - # TODO guest_client fixture is not in root_domain_client.users - root_domain_client.register( + # TODO guest_client fixture is not in root_datasite_client.users + root_datasite_client.register( name="data-scientist", email="test_user@openmined.org", password="0000", password_verify="0000", ) - ds_client = root_domain_client.login( + ds_client = root_datasite_client.login( email="test_user@openmined.org", password="0000", ) @@ -276,7 +275,7 @@ def test_user_code_mock_execution(worker) -> None: ) ], ) - root_domain_client.upload_dataset(dataset) + root_datasite_client.upload_dataset(dataset) # DS requests code execution data = ds_client.datasets[0].assets[0] @@ -297,7 +296,7 @@ def compute_mean(data): assert isinstance(result, SyftError) # DO grants permissions - users = root_domain_client.users.get_all() + users = root_datasite_client.users.get_all() guest_user = [u for u in users if u.id == guest_user.id][0] guest_user.allow_mock_execution() @@ -308,15 +307,15 @@ def compute_mean(data): def test_mock_multiple_arguments(worker) -> None: # Setup - root_domain_client = worker.root_client + root_datasite_client = worker.root_client - root_domain_client.register( + root_datasite_client.register( name="data-scientist", email="test_user@openmined.org", password="0000", password_verify="0000", ) - ds_client = root_domain_client.login( + ds_client = root_datasite_client.login( email="test_user@openmined.org", password="0000", ) @@ -331,8 +330,8 @@ def test_mock_multiple_arguments(worker) -> None: ) ], ) - root_domain_client.upload_dataset(dataset) - users = root_domain_client.users.get_all() + root_datasite_client.upload_dataset(dataset) + users = root_datasite_client.users.get_all() users[-1].allow_mock_execution() # DS requests code execution @@ -343,7 +342,7 @@ def compute_sum(data1, data2): return data1 + data2 ds_client.api.services.code.request_code_execution(compute_sum) - root_domain_client.requests[-1].approve() + root_datasite_client.requests[-1].approve() # Mock execution succeeds, result not cached result = ds_client.api.services.code.compute_sum(data1=1, data2=1) @@ -363,20 +362,20 @@ def compute_sum(data1, data2): def test_mock_no_arguments(worker) -> None: - root_domain_client = worker.root_client + root_datasite_client = worker.root_client - root_domain_client.register( + root_datasite_client.register( name="data-scientist", email="test_user@openmined.org", password="0000", password_verify="0000", ) - ds_client = root_domain_client.login( + ds_client = root_datasite_client.login( email="test_user@openmined.org", password="0000", ) - users = root_domain_client.users.get_all() + users = root_datasite_client.users.get_all() @sy.syft_function_single_use() def compute_sum(): @@ -396,7 +395,7 @@ def compute_sum(): # approved, no mock execution users[-1].allow_mock_execution(allow=False) - message = root_domain_client.notifications[-1] + message = root_datasite_client.notifications[-1] request = message.link user_code = request.changes[0].code result = user_code.run() @@ -440,7 +439,7 @@ def valid_name_2(): client.code.submit(valid_name_2) -def test_submit_code_with_global_var(guest_client: DomainClient) -> None: +def test_submit_code_with_global_var(guest_client: DatasiteClient) -> None: @sy.syft_function( input_policy=sy.ExactMatch(), output_policy=sy.SingleExecutionExactOutput() ) @@ -461,15 +460,15 @@ def mock_syft_func_single_use_with_global(): def test_request_existing_usercodesubmit(worker) -> None: - root_domain_client = worker.root_client + root_datasite_client = worker.root_client - root_domain_client.register( + root_datasite_client.register( name="data-scientist", email="test_user@openmined.org", password="0000", password_verify="0000", ) - ds_client = root_domain_client.login( + ds_client = root_datasite_client.login( email="test_user@openmined.org", password="0000", ) @@ -492,15 +491,15 @@ def my_func(): def test_request_existing_usercode(worker) -> None: - root_domain_client = worker.root_client + root_datasite_client = worker.root_client - root_domain_client.register( + root_datasite_client.register( name="data-scientist", email="test_user@openmined.org", password="0000", password_verify="0000", ) - ds_client = root_domain_client.login( + ds_client = root_datasite_client.login( email="test_user@openmined.org", password="0000", ) @@ -525,26 +524,26 @@ def my_func(): def test_submit_existing_code_different_user(worker): - root_domain_client = worker.root_client + root_datasite_client = worker.root_client - root_domain_client.register( + root_datasite_client.register( name="data-scientist", email="test_user@openmined.org", password="0000", password_verify="0000", ) - ds_client_1 = root_domain_client.login( + ds_client_1 = root_datasite_client.login( email="test_user@openmined.org", password="0000", ) - root_domain_client.register( + root_datasite_client.register( name="data-scientist-2", email="test_user_2@openmined.org", password="0000", password_verify="0000", ) - ds_client_2 = root_domain_client.login( + ds_client_2 = root_datasite_client.login( email="test_user_2@openmined.org", password="0000", ) @@ -566,4 +565,4 @@ def my_func(): assert len(ds_client_1.code.get_all()) == 1 assert len(ds_client_2.code.get_all()) == 1 - assert len(root_domain_client.code.get_all()) == 2 + assert len(root_datasite_client.code.get_all()) == 2 diff --git a/packages/syft/tests/syft/users/user_service_test.py b/packages/syft/tests/syft/users/user_service_test.py index cd6d0aeefd4..dfb424663e1 100644 --- a/packages/syft/tests/syft/users/user_service_test.py +++ b/packages/syft/tests/syft/users/user_service_test.py @@ -8,10 +8,10 @@ from result import Ok # syft absolute -from syft.node.credentials import SyftVerifyKey -from syft.node.worker import Worker +from syft.server.credentials import SyftVerifyKey +from syft.server.worker import Worker from syft.service.context import AuthedServiceContext -from syft.service.context import NodeServiceContext +from syft.service.context import ServerServiceContext from syft.service.context import UnauthedServiceContext from syft.service.response import SyftError from syft.service.response import SyftSuccess @@ -42,7 +42,7 @@ def mock_get_by_email(credentials: SyftVerifyKey, email: str) -> Ok: return Ok(guest_create_user.to(User)) monkeypatch.setattr(user_service.stash, "get_by_email", mock_get_by_email) - response = user_service.create(authed_context, guest_create_user) + response = user_service.create(authed_context, **guest_create_user) assert isinstance(response, SyftError) expected_error_message = ( f"User already exists with email: {guest_create_user.email}" @@ -60,7 +60,7 @@ def mock_get_by_email(credentials: SyftVerifyKey, email: str) -> Err: return Err(f"No user exists with given email: {email}") monkeypatch.setattr(user_service.stash, "get_by_email", mock_get_by_email) - response = user_service.create(authed_context, guest_create_user) + response = user_service.create(authed_context, **guest_create_user) assert isinstance(response, SyftError) expected_error_message = mock_get_by_email(None, guest_create_user.email).err() assert response.message == expected_error_message @@ -78,7 +78,7 @@ def mock_get_by_email(credentials: SyftVerifyKey, email: str) -> Ok: expected_user = guest_create_user.to(User) expected_output: UserView = expected_user.to(UserView) expected_output.syft_client_verify_key = authed_context.credentials - expected_output.syft_node_location = authed_context.node.id + expected_output.syft_server_location = authed_context.server.id def mock_set( credentials: SyftVerifyKey, @@ -90,7 +90,7 @@ def mock_set( monkeypatch.setattr(user_service.stash, "get_by_email", mock_get_by_email) monkeypatch.setattr(user_service.stash, "set", mock_set) - response = user_service.create(authed_context, guest_create_user) + response = user_service.create(authed_context, **guest_create_user) assert isinstance(response, UserView) assert response.to_dict() == expected_output.to_dict() @@ -116,7 +116,7 @@ def mock_set( monkeypatch.setattr(user_service.stash, "get_by_email", mock_get_by_email) monkeypatch.setattr(user_service.stash, "set", mock_set) - response = user_service.create(authed_context, guest_create_user) + response = user_service.create(authed_context, **guest_create_user) assert isinstance(response, SyftError) assert response.message == expected_error_msg @@ -301,9 +301,7 @@ def mock_get_by_uid(credentials: SyftVerifyKey, uid: UID) -> Err: monkeypatch.setattr(user_service.stash, "get_by_uid", mock_get_by_uid) - response = user_service.update( - authed_context, uid=random_uid, user_update=update_user - ) + response = user_service.update(authed_context, uid=random_uid, **update_user) assert isinstance(response, SyftError) assert response.message == expected_error_msg @@ -322,9 +320,7 @@ def mock_get_by_uid(credentials: SyftVerifyKey, uid: UID) -> Ok: monkeypatch.setattr(user_service.stash, "get_by_uid", mock_get_by_uid) - response = user_service.update( - authed_context, uid=random_uid, user_update=update_user - ) + response = user_service.update(authed_context, uid=random_uid, **update_user) assert isinstance(response, SyftError) assert response.message == expected_error_msg @@ -349,7 +345,7 @@ def mock_update(credentials: SyftVerifyKey, user: User, has_permission: bool) -> authed_context.role = ServiceRole.ADMIN resultant_user = user_service.update( - authed_context, uid=guest_user.id, user_update=update_user + authed_context, uid=guest_user.id, **update_user ) assert isinstance(resultant_user, UserView) assert resultant_user.email == update_user.email @@ -379,9 +375,7 @@ def mock_update(credentials: SyftVerifyKey, user, has_permission: bool) -> Err: monkeypatch.setattr(user_service.stash, "update", mock_update) monkeypatch.setattr(user_service.stash, "get_by_uid", mock_get_by_uid) - response = user_service.update( - authed_context, uid=guest_user.id, user_update=update_user - ) + response = user_service.update(authed_context, uid=guest_user.id, **update_user) assert isinstance(response, SyftError) assert response.message == expected_error_msg @@ -499,10 +493,10 @@ def mock_get_by_email(credentials: SyftVerifyKey, email): new_callable=mock.PropertyMock, return_value=settings_with_signup_enabled(worker), ): - mock_worker = Worker.named(name="mock-node") - node_context = NodeServiceContext(node=mock_worker) + mock_worker = Worker.named(name="mock-server") + server_context = ServerServiceContext(server=mock_worker) - response = user_service.register(node_context, guest_create_user) + response = user_service.register(server_context, guest_create_user) assert isinstance(response, SyftError) assert response.message == expected_error_msg @@ -526,10 +520,10 @@ def mock_get_by_email(credentials: SyftVerifyKey, email): new_callable=mock.PropertyMock, return_value=settings_with_signup_enabled(worker), ): - mock_worker = Worker.named(name="mock-node") - node_context = NodeServiceContext(node=mock_worker) + mock_worker = Worker.named(name="mock-server") + server_context = ServerServiceContext(server=mock_worker) - response = user_service.register(node_context, guest_create_user) + response = user_service.register(server_context, guest_create_user) assert isinstance(response, SyftError) assert response.message == expected_error_msg @@ -554,8 +548,8 @@ def mock_set(*args, **kwargs) -> Ok: new_callable=mock.PropertyMock, return_value=settings_with_signup_enabled(worker), ): - mock_worker = Worker.named(name="mock-node") - node_context = NodeServiceContext(node=mock_worker) + mock_worker = Worker.named(name="mock-server") + server_context = ServerServiceContext(server=mock_worker) monkeypatch.setattr(user_service.stash, "get_by_email", mock_get_by_email) monkeypatch.setattr(user_service.stash, "set", mock_set) @@ -563,7 +557,7 @@ def mock_set(*args, **kwargs) -> Ok: expected_msg = f"User '{guest_create_user.name}' successfully registered!" expected_private_key = guest_user.to(UserPrivateKey) - response = user_service.register(node_context, guest_create_user) + response = user_service.register(server_context, guest_create_user) assert isinstance(response, tuple) syft_success_response, user_private_key = response @@ -598,13 +592,13 @@ def mock_set( new_callable=mock.PropertyMock, return_value=settings_with_signup_enabled(worker), ): - mock_worker = Worker.named(name="mock-node") - node_context = NodeServiceContext(node=mock_worker) + mock_worker = Worker.named(name="mock-server") + server_context = ServerServiceContext(server=mock_worker) monkeypatch.setattr(user_service.stash, "get_by_email", mock_get_by_email) monkeypatch.setattr(user_service.stash, "set", mock_set) - response = user_service.register(node_context, guest_create_user) + response = user_service.register(server_context, guest_create_user) assert isinstance(response, SyftError) assert response.message == expected_error_msg diff --git a/packages/syft/tests/syft/users/user_stash_test.py b/packages/syft/tests/syft/users/user_stash_test.py index 2529b417b89..51d1f75f159 100644 --- a/packages/syft/tests/syft/users/user_stash_test.py +++ b/packages/syft/tests/syft/users/user_stash_test.py @@ -2,7 +2,7 @@ from faker import Faker # syft absolute -from syft.node.credentials import SyftSigningKey +from syft.server.credentials import SyftSigningKey from syft.service.response import SyftSuccess from syft.service.user.user import User from syft.service.user.user import UserUpdate @@ -11,9 +11,9 @@ from syft.types.uid import UID -def add_mock_user(root_domain_client, user_stash: UserStash, user: User) -> User: +def add_mock_user(root_datasite_client, user_stash: UserStash, user: User) -> User: # prepare: add mock data - result = user_stash.partition.set(root_domain_client.credentials.verify_key, user) + result = user_stash.partition.set(root_datasite_client.credentials.verify_key, user) assert result.is_ok() user = result.ok() @@ -23,9 +23,9 @@ def add_mock_user(root_domain_client, user_stash: UserStash, user: User) -> User def test_userstash_set( - root_domain_client, user_stash: UserStash, guest_user: User + root_datasite_client, user_stash: UserStash, guest_user: User ) -> None: - result = user_stash.set(root_domain_client.credentials.verify_key, guest_user) + result = user_stash.set(root_datasite_client.credentials.verify_key, guest_user) assert result.is_ok() created_user = result.ok() @@ -35,14 +35,14 @@ def test_userstash_set( def test_userstash_set_duplicate( - root_domain_client, user_stash: UserStash, guest_user: User + root_datasite_client, user_stash: UserStash, guest_user: User ) -> None: - result = user_stash.set(root_domain_client.credentials.verify_key, guest_user) + result = user_stash.set(root_datasite_client.credentials.verify_key, guest_user) assert result.is_ok() original_count = len(user_stash.partition.data) - result = user_stash.set(root_domain_client.credentials.verify_key, guest_user) + result = user_stash.set(root_datasite_client.credentials.verify_key, guest_user) assert result.is_err() assert "Duplication Key Error" in result.err() @@ -51,13 +51,13 @@ def test_userstash_set_duplicate( def test_userstash_get_by_uid( - root_domain_client, user_stash: UserStash, guest_user: User + root_datasite_client, user_stash: UserStash, guest_user: User ) -> None: # prepare: add mock data - user = add_mock_user(root_domain_client, user_stash, guest_user) + user = add_mock_user(root_datasite_client, user_stash, guest_user) result = user_stash.get_by_uid( - root_domain_client.credentials.verify_key, uid=user.id + root_datasite_client.credentials.verify_key, uid=user.id ) assert result.is_ok() @@ -67,7 +67,7 @@ def test_userstash_get_by_uid( random_uid = UID() result = user_stash.get_by_uid( - root_domain_client.credentials.verify_key, uid=random_uid + root_datasite_client.credentials.verify_key, uid=random_uid ) assert result.is_ok() @@ -76,13 +76,13 @@ def test_userstash_get_by_uid( def test_userstash_get_by_email( - root_domain_client, faker: Faker, user_stash: UserStash, guest_user: User + root_datasite_client, faker: Faker, user_stash: UserStash, guest_user: User ) -> None: # prepare: add mock data - user = add_mock_user(root_domain_client, user_stash, guest_user) + user = add_mock_user(root_datasite_client, user_stash, guest_user) result = user_stash.get_by_email( - root_domain_client.credentials.verify_key, email=user.email + root_datasite_client.credentials.verify_key, email=user.email ) assert result.is_ok() searched_user = result.ok() @@ -90,7 +90,7 @@ def test_userstash_get_by_email( random_email = faker.email() result = user_stash.get_by_email( - root_domain_client.credentials.verify_key, email=random_email + root_datasite_client.credentials.verify_key, email=random_email ) searched_user = result.ok() assert result.is_ok() @@ -98,13 +98,13 @@ def test_userstash_get_by_email( def test_userstash_get_by_signing_key( - root_domain_client, user_stash: UserStash, guest_user: User + root_datasite_client, user_stash: UserStash, guest_user: User ) -> None: # prepare: add mock data - user = add_mock_user(root_domain_client, user_stash, guest_user) + user = add_mock_user(root_datasite_client, user_stash, guest_user) result = user_stash.get_by_signing_key( - root_domain_client.credentials.verify_key, signing_key=user.signing_key + root_datasite_client.credentials.verify_key, signing_key=user.signing_key ) assert result.is_ok() searched_user = result.ok() @@ -112,7 +112,7 @@ def test_userstash_get_by_signing_key( signing_key_as_str = str(user.signing_key) result = user_stash.get_by_signing_key( - root_domain_client.credentials.verify_key, signing_key=signing_key_as_str + root_datasite_client.credentials.verify_key, signing_key=signing_key_as_str ) assert result.is_ok() searched_user = result.ok() @@ -120,7 +120,7 @@ def test_userstash_get_by_signing_key( random_singing_key = SyftSigningKey.generate() result = user_stash.get_by_signing_key( - root_domain_client.credentials.verify_key, signing_key=random_singing_key + root_datasite_client.credentials.verify_key, signing_key=random_singing_key ) searched_user = result.ok() assert result.is_ok() @@ -128,13 +128,13 @@ def test_userstash_get_by_signing_key( def test_userstash_get_by_verify_key( - root_domain_client, user_stash: UserStash, guest_user: User + root_datasite_client, user_stash: UserStash, guest_user: User ) -> None: # prepare: add mock data - user = add_mock_user(root_domain_client, user_stash, guest_user) + user = add_mock_user(root_datasite_client, user_stash, guest_user) result = user_stash.get_by_verify_key( - root_domain_client.credentials.verify_key, verify_key=user.verify_key + root_datasite_client.credentials.verify_key, verify_key=user.verify_key ) assert result.is_ok() searched_user = result.ok() @@ -142,7 +142,7 @@ def test_userstash_get_by_verify_key( verify_key_as_str = str(user.verify_key) result = user_stash.get_by_verify_key( - root_domain_client.credentials.verify_key, verify_key=verify_key_as_str + root_datasite_client.credentials.verify_key, verify_key=verify_key_as_str ) assert result.is_ok() searched_user = result.ok() @@ -150,7 +150,7 @@ def test_userstash_get_by_verify_key( random_verify_key = SyftSigningKey.generate().verify_key result = user_stash.get_by_verify_key( - root_domain_client.credentials.verify_key, verify_key=random_verify_key + root_datasite_client.credentials.verify_key, verify_key=random_verify_key ) searched_user = result.ok() assert result.is_ok() @@ -158,13 +158,13 @@ def test_userstash_get_by_verify_key( def test_userstash_get_by_role( - root_domain_client, user_stash: UserStash, guest_user: User + root_datasite_client, user_stash: UserStash, guest_user: User ) -> None: # prepare: add mock data - user = add_mock_user(root_domain_client, user_stash, guest_user) + user = add_mock_user(root_datasite_client, user_stash, guest_user) result = user_stash.get_by_role( - root_domain_client.credentials.verify_key, role=ServiceRole.GUEST + root_datasite_client.credentials.verify_key, role=ServiceRole.GUEST ) assert result.is_ok() searched_user = result.ok() @@ -172,13 +172,13 @@ def test_userstash_get_by_role( def test_userstash_delete_by_uid( - root_domain_client, user_stash: UserStash, guest_user: User + root_datasite_client, user_stash: UserStash, guest_user: User ) -> None: # prepare: add mock data - user = add_mock_user(root_domain_client, user_stash, guest_user) + user = add_mock_user(root_datasite_client, user_stash, guest_user) result = user_stash.delete_by_uid( - root_domain_client.credentials.verify_key, uid=user.id + root_datasite_client.credentials.verify_key, uid=user.id ) assert result.is_ok() response = result.ok() @@ -186,7 +186,7 @@ def test_userstash_delete_by_uid( assert str(user.id) in response.message result = user_stash.get_by_uid( - root_domain_client.credentials.verify_key, uid=user.id + root_datasite_client.credentials.verify_key, uid=user.id ) assert result.is_ok() searched_user = result.ok() @@ -194,17 +194,20 @@ def test_userstash_delete_by_uid( def test_userstash_update( - root_domain_client, user_stash: UserStash, guest_user: User, update_user: UserUpdate + root_datasite_client, + user_stash: UserStash, + guest_user: User, + update_user: UserUpdate, ) -> None: # prepare: add mock data - user = add_mock_user(root_domain_client, user_stash, guest_user) + user = add_mock_user(root_datasite_client, user_stash, guest_user) update_kwargs = update_user.to_dict(exclude_empty=True).items() for field_name, value in update_kwargs: setattr(user, field_name, value) - result = user_stash.update(root_domain_client.credentials.verify_key, user=user) + result = user_stash.update(root_datasite_client.credentials.verify_key, user=user) assert result.is_ok() updated_user = result.ok() assert isinstance(updated_user, User) diff --git a/packages/syft/tests/syft/users/user_test.py b/packages/syft/tests/syft/users/user_test.py index 613c3561bcc..00d1743fd71 100644 --- a/packages/syft/tests/syft/users/user_test.py +++ b/packages/syft/tests/syft/users/user_test.py @@ -10,13 +10,12 @@ from syft import SyftError from syft import SyftSuccess from syft.client.api import SyftAPICall -from syft.client.domain_client import DomainClient -from syft.node.node import get_default_root_email -from syft.node.worker import Worker +from syft.client.datasite_client import DatasiteClient +from syft.server.server import get_default_root_email +from syft.server.worker import Worker from syft.service.context import AuthedServiceContext from syft.service.user.user import ServiceRole from syft.service.user.user import UserCreate -from syft.service.user.user import UserUpdate from syft.service.user.user import UserView GUEST_ROLES = [ServiceRole.GUEST] @@ -32,12 +31,12 @@ def get_users(worker): return worker.get_service("UserService").get_all( - AuthedServiceContext(node=worker, credentials=worker.signing_key.verify_key) + AuthedServiceContext(server=worker, credentials=worker.signing_key.verify_key) ) -def get_mock_client(root_client, role) -> DomainClient: - worker = root_client.api.connection.node +def get_mock_client(root_client, role) -> DatasiteClient: + worker = root_client.api.connection.server client = worker.guest_client mail = Faker().email() name = Faker().name() @@ -47,9 +46,7 @@ def get_mock_client(root_client, role) -> DomainClient: ) assert user user_id = [u for u in get_users(worker) if u.email == mail][0].id - assert worker.root_client.api.services.user.update( - user_id, UserUpdate(user_id=user_id, role=role) - ) + assert worker.root_client.api.services.user.update(uid=user_id, role=role) client = client.login(email=mail, password=password) client._fetch_api(client.credentials) # hacky, but useful for testing: patch user id and role on client @@ -63,7 +60,7 @@ def manually_call_service(worker, client, service, args=None, kwargs=None): # while we mostly want to validate the server side permissions. args = args if args is not None else [] kwargs = kwargs if kwargs is not None else {} - api_call = SyftAPICall(node_uid=worker.id, path=service, args=args, kwargs=kwargs) + api_call = SyftAPICall(server_uid=worker.id, path=service, args=args, kwargs=kwargs) signed_call = api_call.sign(client.api.signing_key) signed_result = client.api.connection.make_call(signed_call) result = signed_result.message.data @@ -71,17 +68,17 @@ def manually_call_service(worker, client, service, args=None, kwargs=None): @pytest.fixture -def guest_client(worker) -> DomainClient: +def guest_client(worker) -> DatasiteClient: return get_mock_client(worker.root_client, ServiceRole.GUEST) @pytest.fixture -def ds_client(worker) -> DomainClient: +def ds_client(worker) -> DatasiteClient: return get_mock_client(worker.root_client, ServiceRole.DATA_SCIENTIST) @pytest.fixture -def do_client(worker) -> DomainClient: +def do_client(worker) -> DatasiteClient: return get_mock_client(worker.root_client, ServiceRole.DATA_OWNER) @@ -113,15 +110,11 @@ def test_user_create(worker, do_client, guest_client, ds_client, root_client): for client in [ds_client, guest_client]: assert not manually_call_service(worker, client, "user.create") for client in [do_client, root_client]: + user_create = UserCreate( + email=Faker().email(), name="z", password="pw", password_verify="pw" + ) res = manually_call_service( - worker, - client, - "user.create", - args=[ - UserCreate( - email=Faker().email(), name="z", password="pw", password_verify="pw" - ) - ], + worker, client, "user.create", args=[], kwargs={**user_create} ) assert isinstance(res, UserView) @@ -169,14 +162,14 @@ def test_user_update_roles(do_client, guest_client, ds_client, root_client, work clients = [get_mock_client(root_client, role) for role in DO_ROLES] for _c in clients: assert worker.root_client.api.services.user.update( - _c.user_id, UserUpdate(role=ServiceRole.ADMIN) + uid=_c.user_id, role=ServiceRole.ADMIN ) # DOs can update the roles of lower roles clients = [get_mock_client(root_client, role) for role in DS_ROLES] for _c in clients: assert do_client.api.services.user.update( - _c.user_id, UserUpdate(role=ServiceRole.DATA_SCIENTIST) + uid=_c.user_id, role=ServiceRole.DATA_SCIENTIST ) clients = [get_mock_client(root_client, role) for role in ADMIN_ROLES] @@ -185,7 +178,7 @@ def test_user_update_roles(do_client, guest_client, ds_client, root_client, work for _c in clients: for target_role in [ServiceRole.DATA_OWNER, ServiceRole.ADMIN]: assert not do_client.api.services.user.update( - _c.user_id, UserUpdate(role=target_role) + uid=_c.user_id, role=target_role ) # DOs cannot downgrade higher roles to lower levels @@ -197,7 +190,7 @@ def test_user_update_roles(do_client, guest_client, ds_client, root_client, work for target_role in DO_ROLES: if target_role < _c.role: assert not do_client.api.services.user.update( - _c.user_id, UserUpdate(role=target_role) + uid=_c.user_id, role=target_role ) # DSs cannot update any roles @@ -205,7 +198,7 @@ def test_user_update_roles(do_client, guest_client, ds_client, root_client, work for _c in clients: for target_role in ADMIN_ROLES: assert not ds_client.api.services.user.update( - _c.user_id, UserUpdate(role=target_role) + uid=_c.user_id, role=target_role ) # Guests cannot update any roles @@ -213,7 +206,7 @@ def test_user_update_roles(do_client, guest_client, ds_client, root_client, work for _c in clients: for target_role in ADMIN_ROLES: assert not guest_client.api.services.user.update( - _c.user_id, UserUpdate(role=target_role) + uid=_c.user_id, role=target_role ) @@ -225,44 +218,43 @@ def test_user_update(root_client): for target_client in target_clients: if executing_client.role != ServiceRole.ADMIN: assert not executing_client.api.services.user.update( - target_client.user_id, UserUpdate(name="abc") + uid=target_client.user_id, name="abc" ) else: assert executing_client.api.services.user.update( - target_client.user_id, UserUpdate(name="abc") + uid=target_client.user_id, name="abc" ) # you can update yourself assert executing_client.api.services.user.update( - executing_client.user_id, UserUpdate(name=Faker().name()) + uid=executing_client.user_id, name=Faker().name() ) def test_guest_user_update_to_root_email_failed( - root_client: DomainClient, - do_client: DomainClient, - guest_client: DomainClient, - ds_client: DomainClient, + root_client: DatasiteClient, + do_client: DatasiteClient, + guest_client: DatasiteClient, + ds_client: DatasiteClient, ) -> None: default_root_email: str = get_default_root_email() - user_update_to_root_email = UserUpdate(email=default_root_email) for client in [root_client, do_client, guest_client, ds_client]: res = client.api.services.user.update( - uid=client.me.id, user_update=user_update_to_root_email + uid=client.account.id, email=default_root_email ) assert isinstance(res, SyftError) assert res.message == "User already exists" -def test_user_view_set_password(worker: Worker, root_client: DomainClient) -> None: - root_client.me.set_password("123", confirm=False) - email = root_client.me.email +def test_user_view_set_password(worker: Worker, root_client: DatasiteClient) -> None: + root_client.account.set_password("123", confirm=False) + email = root_client.account.email # log in again with the wrong password root_client_c = worker.root_client.login(email=email, password="1234") assert isinstance(root_client_c, SyftError) # log in again with the right password root_client_b = worker.root_client.login(email=email, password="123") - assert root_client_b.me == root_client.me + assert root_client_b.account == root_client.account @pytest.mark.parametrize( @@ -270,9 +262,9 @@ def test_user_view_set_password(worker: Worker, root_client: DomainClient) -> No ["syft", "syft.com", "syft@.com"], ) def test_user_view_set_invalid_email( - root_client: DomainClient, invalid_email: str + root_client: DatasiteClient, invalid_email: str ) -> None: - result = root_client.me.set_email(invalid_email) + result = root_client.account.set_email(invalid_email) assert isinstance(result, SyftError) @@ -284,72 +276,72 @@ def test_user_view_set_invalid_email( ], ) def test_user_view_set_email_success( - root_client: DomainClient, - ds_client: DomainClient, + root_client: DatasiteClient, + ds_client: DatasiteClient, valid_email_root: str, valid_email_ds: str, ) -> None: - result = root_client.me.set_email(valid_email_root) + result = root_client.account.set_email(valid_email_root) assert isinstance(result, SyftSuccess) - result2 = ds_client.me.set_email(valid_email_ds) + result2 = ds_client.account.set_email(valid_email_ds) assert isinstance(result2, SyftSuccess) def test_user_view_set_default_admin_email_failed( - ds_client: DomainClient, guest_client: DomainClient + ds_client: DatasiteClient, guest_client: DatasiteClient ) -> None: default_root_email = get_default_root_email() - result = ds_client.me.set_email(default_root_email) + result = ds_client.account.set_email(default_root_email) assert isinstance(result, SyftError) assert result.message == "User already exists" - result_2 = guest_client.me.set_email(default_root_email) + result_2 = guest_client.account.set_email(default_root_email) assert isinstance(result_2, SyftError) assert result_2.message == "User already exists" def test_user_view_set_duplicated_email( - root_client: DomainClient, ds_client: DomainClient, guest_client: DomainClient + root_client: DatasiteClient, ds_client: DatasiteClient, guest_client: DatasiteClient ) -> None: - result = ds_client.me.set_email(root_client.me.email) - result2 = guest_client.me.set_email(root_client.me.email) + result = ds_client.account.set_email(root_client.account.email) + result2 = guest_client.account.set_email(root_client.account.email) assert isinstance(result, SyftError) assert result.message == "User already exists" assert isinstance(result2, SyftError) assert result2.message == "User already exists" - result3 = guest_client.me.set_email(ds_client.me.email) + result3 = guest_client.account.set_email(ds_client.account.email) assert isinstance(result3, SyftError) assert result3.message == "User already exists" def test_user_view_update_name_institution_website( - root_client: DomainClient, - ds_client: DomainClient, - guest_client: DomainClient, + root_client: DatasiteClient, + ds_client: DatasiteClient, + guest_client: DatasiteClient, ) -> None: - result = root_client.me.update( + result = root_client.account.update( name="syft", institution="OpenMined", website="https://syft.org" ) assert isinstance(result, SyftSuccess) - assert root_client.me.name == "syft" - assert root_client.me.institution == "OpenMined" - assert root_client.me.website == "https://syft.org" + assert root_client.account.name == "syft" + assert root_client.account.institution == "OpenMined" + assert root_client.account.website == "https://syft.org" - result2 = ds_client.me.update(name="syft2", institution="OpenMined") + result2 = ds_client.account.update(name="syft2", institution="OpenMined") assert isinstance(result2, SyftSuccess) - assert ds_client.me.name == "syft2" - assert ds_client.me.institution == "OpenMined" + assert ds_client.account.name == "syft2" + assert ds_client.account.institution == "OpenMined" - result3 = guest_client.me.update(name="syft3") + result3 = guest_client.account.update(name="syft3") assert isinstance(result3, SyftSuccess) - assert guest_client.me.name == "syft3" + assert guest_client.account.name == "syft3" -def test_user_view_set_role(worker: Worker, guest_client: DomainClient) -> None: +def test_user_view_set_role(worker: Worker, guest_client: DatasiteClient) -> None: admin_client = get_mock_client(worker.root_client, ServiceRole.ADMIN) - assert admin_client.me.role == ServiceRole.ADMIN + assert admin_client.account.role == ServiceRole.ADMIN admin_client.register( name="Sheldon Cooper", email="sheldon@caltech.edu", @@ -361,7 +353,7 @@ def test_user_view_set_role(worker: Worker, guest_client: DomainClient) -> None: sheldon = admin_client.users[-1] assert ( sheldon.syft_client_verify_key - == admin_client.me.syft_client_verify_key + == admin_client.account.syft_client_verify_key == admin_client.verify_key ) assert sheldon.role == ServiceRole.DATA_SCIENTIST @@ -369,31 +361,31 @@ def test_user_view_set_role(worker: Worker, guest_client: DomainClient) -> None: assert sheldon.role == ServiceRole.GUEST sheldon.update(role="data_owner") assert sheldon.role == ServiceRole.DATA_OWNER - # the data scientist (Sheldon) log in the domain, he should not + # the data scientist (Sheldon) log in the datasite, he should not # be able to change his role, even if he is a data owner now ds_client = guest_client.login(email="sheldon@caltech.edu", password="changethis") assert ( - ds_client.me.syft_client_verify_key + ds_client.account.syft_client_verify_key == ds_client.verify_key != admin_client.verify_key ) - assert ds_client.me.role == sheldon.role - assert ds_client.me.role == ServiceRole.DATA_OWNER - assert isinstance(ds_client.me.update(role="guest"), SyftError) - assert isinstance(ds_client.me.update(role="data_scientist"), SyftError) + assert ds_client.account.role == sheldon.role + assert ds_client.account.role == ServiceRole.DATA_OWNER + assert isinstance(ds_client.account.update(role="guest"), SyftError) + assert isinstance(ds_client.account.update(role="data_scientist"), SyftError) # now we set sheldon's role to admin. Only now he can change his role sheldon.update(role="admin") assert sheldon.role == ServiceRole.ADMIN # QA: this is different than when running in the notebook assert len(ds_client.users.get_all()) == len(admin_client.users.get_all()) - assert isinstance(ds_client.me.update(role="guest"), SyftSuccess) - assert isinstance(ds_client.me.update(role="admin"), SyftError) + assert isinstance(ds_client.account.update(role="guest"), SyftSuccess) + assert isinstance(ds_client.account.update(role="admin"), SyftError) def test_user_view_set_role_admin(faker: Faker) -> None: - node = sy.orchestra.launch(name=token_hex(8), reset=True) - domain_client = node.login(email="info@openmined.org", password="changethis") - domain_client.register( + server = sy.orchestra.launch(name=token_hex(8), reset=True) + datasite_client = server.login(email="info@openmined.org", password="changethis") + datasite_client.register( name="Sheldon Cooper", email="sheldon@caltech.edu", password="changethis", @@ -401,7 +393,7 @@ def test_user_view_set_role_admin(faker: Faker) -> None: institution="Caltech", website="https://www.caltech.edu/", ) - domain_client.register( + datasite_client.register( name="Sheldon Cooper", email="sheldon2@caltech.edu", password="changethis", @@ -409,20 +401,20 @@ def test_user_view_set_role_admin(faker: Faker) -> None: institution="Caltech", website="https://www.caltech.edu/", ) - assert len(domain_client.users.get_all()) == 3 + assert len(datasite_client.users.get_all()) == 3 - domain_client.users[1].update(role="admin") - ds_client = node.login(email="sheldon@caltech.edu", password="changethis") - assert ds_client.me.role == ServiceRole.ADMIN - assert len(ds_client.users.get_all()) == len(domain_client.users.get_all()) + datasite_client.users[1].update(role="admin") + ds_client = server.login(email="sheldon@caltech.edu", password="changethis") + assert ds_client.account.role == ServiceRole.ADMIN + assert len(ds_client.users.get_all()) == len(datasite_client.users.get_all()) - domain_client.users[2].update(role="admin") - ds_client_2 = node.login(email="sheldon2@caltech.edu", password="changethis") - assert ds_client_2.me.role == ServiceRole.ADMIN - assert len(ds_client_2.users.get_all()) == len(domain_client.users.get_all()) + datasite_client.users[2].update(role="admin") + ds_client_2 = server.login(email="sheldon2@caltech.edu", password="changethis") + assert ds_client_2.account.role == ServiceRole.ADMIN + assert len(ds_client_2.users.get_all()) == len(datasite_client.users.get_all()) - node.python_node.cleanup() - node.land() + server.python_server.cleanup() + server.land() @pytest.mark.parametrize( @@ -433,7 +425,9 @@ def test_user_view_set_role_admin(faker: Faker) -> None: ], ) def test_user_search( - root_client: DomainClient, ds_client: DomainClient, search_param: tuple[str, str] + root_client: DatasiteClient, + ds_client: DatasiteClient, + search_param: tuple[str, str], ) -> None: k, attr = search_param v = getattr(ds_client, attr) diff --git a/packages/syft/tests/syft/worker_pool/worker_pool_service_test.py b/packages/syft/tests/syft/worker_pool/worker_pool_service_test.py index 2b65105c160..b5c5ea2f3a3 100644 --- a/packages/syft/tests/syft/worker_pool/worker_pool_service_test.py +++ b/packages/syft/tests/syft/worker_pool/worker_pool_service_test.py @@ -7,7 +7,7 @@ from syft.custom_worker.config import DockerWorkerConfig from syft.custom_worker.config import PrebuiltWorkerConfig from syft.custom_worker.config import WorkerConfig -from syft.node.worker import Worker +from syft.server.worker import Worker from syft.service.request.request import CreateCustomWorkerPoolChange from syft.service.response import SyftSuccess from syft.service.worker.worker_image import SyftWorkerImage @@ -16,7 +16,7 @@ # relative from ..request.request_code_accept_deny_test import get_ds_client -PREBUILT_IMAGE_TAG = f"docker.io/openmined/grid-backend:{sy.__version__}" +PREBUILT_IMAGE_TAG = f"docker.io/openmined/syft-backend:{sy.__version__}" CUSTOM_DOCKERFILE = f""" FROM {PREBUILT_IMAGE_TAG} @@ -49,7 +49,7 @@ def test_create_image_and_pool_request_accept( Test the functionality of `SyftWorkerPoolService.create_image_and_pool_request` when the request is accepted """ - # construct a root client and data scientist client for a domain + # construct a root client and data scientist client for a datasite root_client = worker.root_client ds_client = get_ds_client(faker, root_client, worker.guest_client) assert root_client.credentials != ds_client.credentials @@ -104,7 +104,7 @@ def test_create_pool_request_accept( Test the functionality of `SyftWorkerPoolService.create_pool_request` when the request is accepted """ - # construct a root client and data scientist client for a domain + # construct a root client and data scientist client for a datasite root_client = worker.root_client ds_client = get_ds_client(faker, root_client, worker.guest_client) assert root_client.credentials != ds_client.credentials diff --git a/packages/syft/tests/syft/worker_pool/worker_test.py b/packages/syft/tests/syft/worker_pool/worker_test.py index c6935006a61..4da24cb315b 100644 --- a/packages/syft/tests/syft/worker_pool/worker_test.py +++ b/packages/syft/tests/syft/worker_pool/worker_test.py @@ -1,7 +1,7 @@ # syft absolute import syft as sy from syft.custom_worker.config import DockerWorkerConfig -from syft.node.worker import Worker +from syft.server.worker import Worker from syft.service.response import SyftSuccess from syft.service.worker.worker_image import SyftWorkerImage from syft.types.datetime import DateTime @@ -10,7 +10,7 @@ def get_docker_config(): # the DS makes a request to create an image and a pool based on the image custom_dockerfile = f""" - FROM openmined/grid-backend:{sy.__version__} + FROM openmined/syft-backend:{sy.__version__} RUN pip install recordlinkage """ return DockerWorkerConfig(dockerfile=custom_dockerfile) diff --git a/packages/syft/tests/syft/worker_test.py b/packages/syft/tests/syft/worker_test.py index 46ca54963c0..f2ba6eeedcb 100644 --- a/packages/syft/tests/syft/worker_test.py +++ b/packages/syft/tests/syft/worker_test.py @@ -12,10 +12,10 @@ import syft as sy from syft.client.api import SignedSyftAPICall from syft.client.api import SyftAPICall -from syft.node.credentials import SIGNING_KEY_FOR -from syft.node.credentials import SyftSigningKey -from syft.node.credentials import SyftVerifyKey -from syft.node.worker import Worker +from syft.server.credentials import SIGNING_KEY_FOR +from syft.server.credentials import SyftSigningKey +from syft.server.credentials import SyftVerifyKey +from syft.server.worker import Worker from syft.service.action.action_object import ActionObject from syft.service.action.action_store import DictActionStore from syft.service.context import AuthedServiceContext @@ -78,7 +78,7 @@ def test_signing_key() -> None: def test_action_store() -> None: test_signing_key = SyftSigningKey.from_string(test_signing_key_string) - action_store = DictActionStore(node_uid=UID()) + action_store = DictActionStore(server_uid=UID()) uid = UID() raw_data = np.array([1, 2, 3]) test_object = ActionObject.from_obj(raw_data) @@ -142,10 +142,12 @@ def test_user_service(worker) -> None: ) # create a context - context = AuthedServiceContext(node=worker, credentials=test_signing_key.verify_key) + context = AuthedServiceContext( + server=worker, credentials=test_signing_key.verify_key + ) # call the create function - user_view = user_service.create(context=context, user_create=new_user) + user_view = user_service.create(context=context, **new_user) # get the result assert user_view is not None @@ -256,7 +258,7 @@ def test_worker_handle_api_request( kwargs: dict, blocking: bool, ) -> None: - node_uid = worker_with_proc.id + server_uid = worker_with_proc.id root_client = worker_with_proc.root_client assert root_client.api is not None @@ -266,7 +268,7 @@ def test_worker_handle_api_request( root_client = worker_with_proc.root_client api_call = SyftAPICall( - node_uid=node_uid, path=path, args=[], kwargs=kwargs, blocking=blocking + server_uid=server_uid, path=path, args=[], kwargs=kwargs, blocking=blocking ) # should fail on unsigned requests result = worker_with_proc.handle_api_call(api_call).message.data @@ -308,7 +310,7 @@ def test_worker_handle_api_response( kwargs: dict, blocking: bool, ) -> None: - node_uid = worker_with_proc.id + server_uid = worker_with_proc.id n_processes = worker_with_proc.processes root_client = worker_with_proc.root_client assert root_client.api is not None @@ -326,7 +328,7 @@ def test_worker_handle_api_response( root_client = worker_with_proc.root_client call = SyftAPICall( - node_uid=node_uid, path=path, args=[], kwargs=kwargs, blocking=blocking + server_uid=server_uid, path=path, args=[], kwargs=kwargs, blocking=blocking ) signed_api_call = call.sign(root_client.credentials) diff --git a/packages/syftcli/manifest.yml b/packages/syftcli/manifest.yml index 966706f9b11..cda53aa0678 100644 --- a/packages/syftcli/manifest.yml +++ b/packages/syftcli/manifest.yml @@ -1,11 +1,11 @@ manifestVersion: 1.0 -syftVersion: 0.8.7-beta.13 -dockerTag: 0.8.7-beta.13 +syftVersion: 0.8.8-beta.2 +dockerTag: 0.8.8-beta.2 images: - - docker.io/openmined/grid-frontend:0.8.7-beta.13 - - docker.io/openmined/grid-backend:0.8.7-beta.13 + - docker.io/openmined/syft-frontend:0.8.8-beta.2 + - docker.io/openmined/syft-backend:0.8.8-beta.2 - docker.io/library/mongo:7.0.4 - docker.io/traefik:v2.11.0 diff --git a/scripts/build_images.sh b/scripts/build_images.sh index 3426b5049de..bf7ad7e1823 100644 --- a/scripts/build_images.sh +++ b/scripts/build_images.sh @@ -3,6 +3,6 @@ REGISTRY=${1:-"k3d-registry.localhost:5800"} TAG=${2:-"latest"} -docker image build -f ./packages/grid/backend/backend.dockerfile --target backend -t $REGISTRY/openmined/grid-backend:$TAG ./packages -docker image build -f ./packages/grid/frontend/frontend.dockerfile --target grid-ui-development -t $REGISTRY/openmined/grid-frontend:$TAG ./packages/grid/frontend -docker image build -f ./packages/grid/seaweedfs/seaweedfs.dockerfile -t $REGISTRY/openmined/grid-seaweedfs:$TAG ./packages/grid/seaweedfs +docker image build -f ./packages/grid/backend/backend.dockerfile --target backend -t $REGISTRY/openmined/syft-backend:$TAG ./packages +docker image build -f ./packages/grid/frontend/frontend.dockerfile --target syft-ui-development -t $REGISTRY/openmined/syft-frontend:$TAG ./packages/grid/frontend +docker image build -f ./packages/grid/seaweedfs/seaweedfs.dockerfile -t $REGISTRY/openmined/syft-seaweedfs:$TAG ./packages/grid/seaweedfs diff --git a/scripts/flush_queue.sh b/scripts/flush_queue.sh deleted file mode 100755 index fbb2914bc33..00000000000 --- a/scripts/flush_queue.sh +++ /dev/null @@ -1,2 +0,0 @@ -#!/bin/sh -docker ps --format '{{.Names}}' | grep "celeryworker" | xargs -I '{}' docker exec -i {} python -c "from grid.core.celery_app import celery_app; celery_app.control.purge();print('Tasks Cleared')" diff --git a/scripts/generate_canonical_names.py b/scripts/generate_canonical_names.py new file mode 100644 index 00000000000..1744c67db15 --- /dev/null +++ b/scripts/generate_canonical_names.py @@ -0,0 +1,126 @@ +# stdlib +import ast +import inspect +import os + +# If variable is set, search for all serializable classes without canonical names +os.environ["SYFT_SEARCH_MISSING_CANONICAL_NAME"] = "true" + +# syft absolute +# NOTE import has to happen after setting the environment variable + +# relative +from ..serde.recursive import SYFT_CLASSES_MISSING_CANONICAL_NAME # noqa: E402 +from ..types.syft_object_registry import SyftObjectRegistry # noqa: E402 + + +class DecoratorFinder(ast.NodeVisitor): + def __init__(self, class_name: str, decorator_name: str): + self.class_name = class_name + self.decorator_name = decorator_name + self.decorator: ast.Call | None = None + + def visit_ClassDef(self, node: ast.ClassDef) -> None: + if node.name == self.class_name: + for decorator in node.decorator_list: + if ( + isinstance(decorator, ast.Call) + and getattr(decorator.func, "id", None) == self.decorator_name + ): + self.decorator = decorator + self.generic_visit(node) + + +def get_class_file_path(cls: type) -> str: + return inspect.getfile(cls) + + +def get_decorator_with_lines( + file_path: str, class_name: str, decorator_name: str +) -> tuple[ast.Call | None, int | None, int | None]: + with open(file_path) as source: + tree = ast.parse(source.read()) + + finder = DecoratorFinder(class_name, decorator_name) + finder.visit(tree) + + if finder.decorator: + start_line = finder.decorator.lineno - 1 + end_line = ( + finder.decorator.end_lineno + if hasattr(finder.decorator, "end_lineno") + else finder.decorator.lineno + ) + return finder.decorator, start_line, end_line + return None, None, None + + +def add_canonical_name_version(decorator: ast.Call, class_name: str) -> ast.Call: + new_decorator = decorator + + canonical_name_exists = any( + kw.arg == "canonical_name" for kw in new_decorator.keywords + ) + version_exists = any(kw.arg == "version" for kw in new_decorator.keywords) + + if not canonical_name_exists: + new_decorator.keywords.append( + ast.keyword(arg="canonical_name", value=ast.Constant(value=class_name)) + ) + if not version_exists: + new_decorator.keywords.append( + ast.keyword(arg="version", value=ast.Constant(value=1)) + ) + + return ast.copy_location(new_decorator, decorator) + + +def update_decorator_for_cls( + cls: type, existing_canonical_names: list[str] +) -> str | None: + file_path = inspect.getfile(cls) + class_name = cls.__name__ + + decorator, start_line, end_line = get_decorator_with_lines( + file_path, class_name, "serializable" + ) + + if decorator is None: + print( + f"{cls.__module__}: Could not find decorator for class {class_name}. Did not update canonical name." + ) + return None + if start_line is None or end_line is None: + print( + f"{cls.__module__}: No start/end lines for decorator in class {class_name}. Did not update canonical name." + ) + return None + + if class_name in existing_canonical_names: + print( + f"{cls.__module__}: {class_name} is already a registered canonical name. Did not update canonical name." + ) + return None + + new_decorator = add_canonical_name_version(decorator, class_name) + new_decorator_code = ast.unparse(new_decorator).split("\n") + new_decorator_code[0] = "@" + new_decorator_code[0] + + with open(file_path) as file: + lines = file.readlines() + + lines[start_line:end_line] = [line + "\n" for line in new_decorator_code] + + with open(file_path, "w") as file: + file.writelines(lines) + + print(f"Updated {cls.__module__}.{cls.__name__}") + return class_name + + +def update_canonical_names(): + existing_cnames = list(SyftObjectRegistry.__object_serialization_registry__.keys()) + for cls in SYFT_CLASSES_MISSING_CANONICAL_NAME: + new_name = update_decorator_for_cls(cls, existing_cnames) + if new_name: + existing_cnames.append(new_name) diff --git a/scripts/get_k8s_secret_ci.sh b/scripts/get_k8s_secret_ci.sh index 965e8ff0896..f23bf2048af 100644 --- a/scripts/get_k8s_secret_ci.sh +++ b/scripts/get_k8s_secret_ci.sh @@ -2,5 +2,5 @@ export SYFT_LOGIN_testgateway1_PASSWORD=$(kubectl --context=k3d-testgateway1 get secret backend-secret -n syft \ -o jsonpath='{.data.defaultRootPassword}' | base64 --decode) -export SYFT_LOGIN_testdomain1_PASSWORD=$(kubectl get --context=k3d-testdomain1 secret backend-secret -n syft \ +export SYFT_LOGIN_testdatasite1_PASSWORD=$(kubectl get --context=k3d-testdatasite1 secret backend-secret -n syft \ -o jsonpath='{.data.defaultRootPassword}' | base64 --decode) diff --git a/scripts/patch_hosts.py b/scripts/patch_hosts.py index 56dc0f25185..6dd72c40eb9 100644 --- a/scripts/patch_hosts.py +++ b/scripts/patch_hosts.py @@ -60,29 +60,31 @@ def path(self) -> Path: def read(self) -> str: return self.path.read_text() - def get(self, domain: str) -> list[str]: - return re.findall(f"(.+)\s+{domain}", self.content) + def get(self, datasite: str) -> list[str]: + return re.findall(f"(.+)\s+{datasite}", self.content) - def add(self, ip: str, domain: str) -> None: - if self.get(domain): + def add(self, ip: str, datasite: str) -> None: + if self.get(datasite): return - self.content = self.content.rstrip() + f"\n{ip}\t{domain}" + self.content = self.content.rstrip() + f"\n{ip}\t{datasite}" self.__write() - def remove(self, domain: str) -> None: - if not self.get(domain): + def remove(self, datasite: str) -> None: + if not self.get(datasite): return - self.content = re.sub(f"(.+)\s+{domain}\n", "", self.content) + self.content = re.sub(f"(.+)\s+{datasite}\n", "", self.content) self.__write() - def update(self, ip: str, domain: str) -> None: - if not self.get(domain): - self.add(ip, domain) + def update(self, ip: str, datasite: str) -> None: + if not self.get(datasite): + self.add(ip, datasite) # inplace - self.content = re.sub(f"(.+)\s+{domain}\n", f"{ip}\t{domain}\n", self.content) + self.content = re.sub( + f"(.+)\s+{datasite}\n", f"{ip}\t{datasite}\n", self.content + ) self.__write() def __write(self) -> None: @@ -128,7 +130,7 @@ def main(): nargs=2, action="append", default=[], - metavar=("IP", "DOMAIN"), + metavar=("IP", "DATASITE"), help="Add entry to hosts file", ) parser.add_argument( @@ -178,9 +180,9 @@ def main(): print(">> Hosts file:", hosts.path) if len(args.add): - for ip, domain in args.add: - print(f">> Adding {ip} {domain}") - hosts.update(ip, domain) + for ip, datasite in args.add: + print(f">> Adding {ip} {datasite}") + hosts.update(ip, datasite) if args.add_k3d_registry: print(">> Adding k3d registry host entry") diff --git a/scripts/reset_k8s.sh b/scripts/reset_k8s.sh new file mode 100755 index 00000000000..5da8041d5a3 --- /dev/null +++ b/scripts/reset_k8s.sh @@ -0,0 +1,77 @@ +#!/bin/bash + +set -e + +# Default context to current if not provided +CONTEXT=${1:-$(kubectl config current-context)} +NAMESPACE=${2:-syft} + +echo "Resetting Kubernetes resources in context $CONTEXT and namespace $NAMESPACE" + +print_progress() { + echo -e "\033[1;32m[$(date +'%Y-%m-%d %H:%M:%S')] $1\033[0m" +} + +# Set the Kubernetes context +kubectl config use-context $CONTEXT + +# Function to reset a StatefulSet and delete its PVCs +reset_statefulset() { + local statefulset=$1 + local component=$2 + + print_progress "Scaling down $statefulset StatefulSet..." + kubectl scale statefulset $statefulset --replicas=0 -n $NAMESPACE + + print_progress "Deleting PVCs for $statefulset..." + local pvcs=$(kubectl get pvc -l app.kubernetes.io/component=$component -n $NAMESPACE -o jsonpath='{.items[*].metadata.name}') + for pvc in $pvcs; do + kubectl delete pvc $pvc -n $NAMESPACE + done + + print_progress "Scaling up $statefulset StatefulSet..." + kubectl scale statefulset $statefulset --replicas=1 -n $NAMESPACE + + print_progress "Waiting for $statefulset StatefulSet to be ready..." + kubectl rollout status statefulset $statefulset -n $NAMESPACE +} + +# Function to delete a StatefulSet +delete_statefulset() { + local statefulset=$1 + + print_progress "Deleting $statefulset StatefulSet..." + kubectl delete statefulset $statefulset -n $NAMESPACE + +# # Since Default Pool does not have any PVCs, we can skip this step +# print_progress "Deleting PVCs for $statefulset..." +# local pvcs=$(kubectl get pvc -l statefulset.kubernetes.io/pod-name=${statefulset}-0 -n $NAMESPACE -o jsonpath='{.items[*].metadata.name}') +# for pvc in $pvcs; do +# kubectl delete pvc $pvc -n $NAMESPACE +# done + + print_progress "Waiting for $statefulset StatefulSet to be fully deleted..." + kubectl wait --for=delete statefulset/$statefulset -n $NAMESPACE +} + +# Reset MongoDB StatefulSet +reset_statefulset "mongo" "mongo" & + +# Reset SeaweedFS StatefulSet +reset_statefulset "seaweedfs" "seaweedfs" & + +# Wait for MongoDB and SeaweedFS to be reset +wait + +# Delete default-pool StatefulSet +delete_statefulset "default-pool" + +# Restart Backend StatefulSet +print_progress "Restarting backend StatefulSet..." +kubectl scale statefulset backend --replicas=0 -n $NAMESPACE +kubectl scale statefulset backend --replicas=1 -n $NAMESPACE +print_progress "Waiting for backend StatefulSet to be ready..." +kubectl rollout status statefulset backend -n $NAMESPACE + + +print_progress "All operations completed successfully." diff --git a/tests/integration/conftest.py b/tests/integration/conftest.py index 9152038b1f7..b0e8fdd58dd 100644 --- a/tests/integration/conftest.py +++ b/tests/integration/conftest.py @@ -8,8 +8,8 @@ # syft absolute import syft as sy -from syft.abstract_node import NodeSideType -from syft.node.worker import Worker +from syft.abstract_server import ServerSideType +from syft.server.worker import Worker def pytest_configure(config: _pytest.config.Config) -> None: @@ -18,7 +18,7 @@ def pytest_configure(config: _pytest.config.Config) -> None: config.addinivalue_line( "markers", "container_workload: container workload integration tests" ) - config.addinivalue_line("markers", "local_node: local node integration tests") + config.addinivalue_line("markers", "local_server: local server integration tests") @pytest.fixture @@ -27,12 +27,12 @@ def gateway_port() -> int: @pytest.fixture -def domain_1_port() -> int: +def datasite_1_port() -> int: return 9082 @pytest.fixture -def domain_2_port() -> int: +def datasite_2_port() -> int: return 9083 @@ -43,8 +43,8 @@ def faker(): @pytest.fixture(scope="function") def full_low_worker(n_consumers: int = 3, create_producer: bool = True) -> Worker: - _node = sy.orchestra.launch( - node_side_type=NodeSideType.LOW_SIDE, + _server = sy.orchestra.launch( + server_side_type=ServerSideType.LOW_SIDE, name=token_hex(8), # dev_mode=True, reset=True, @@ -55,16 +55,16 @@ def full_low_worker(n_consumers: int = 3, create_producer: bool = True) -> Worke thread_workers=False, ) # startup code here - yield _node + yield _server # # Cleanup code - _node.python_node.cleanup() - _node.land() + _server.python_server.cleanup() + _server.land() @pytest.fixture(scope="function") def full_high_worker(n_consumers: int = 3, create_producer: bool = True) -> Worker: - _node = sy.orchestra.launch( - node_side_type=NodeSideType.HIGH_SIDE, + _server = sy.orchestra.launch( + server_side_type=ServerSideType.HIGH_SIDE, name=token_hex(8), # dev_mode=True, reset=True, @@ -75,7 +75,7 @@ def full_high_worker(n_consumers: int = 3, create_producer: bool = True) -> Work thread_workers=False, ) # startup code here - yield _node + yield _server # Cleanup code - _node.python_node.cleanup() - _node.land() + _server.python_server.cleanup() + _server.land() diff --git a/tests/integration/container_workload/blob_storage_test.py b/tests/integration/container_workload/blob_storage_test.py index 3f072eab77c..1f6ab279258 100644 --- a/tests/integration/container_workload/blob_storage_test.py +++ b/tests/integration/container_workload/blob_storage_test.py @@ -14,21 +14,21 @@ reason="AZURE_BLOB_STORAGE_KEY is not set", ) @pytest.mark.container_workload -def test_mount_azure_blob_storage(domain_1_port): - domain_client = sy.login( - email="info@openmined.org", password="changethis", port=domain_1_port +def test_mount_azure_blob_storage(datasite_1_port): + datasite_client = sy.login( + email="info@openmined.org", password="changethis", port=datasite_1_port ) azure_storage_key = os.environ.get("AZURE_BLOB_STORAGE_KEY", None) assert azure_storage_key - domain_client.api.services.blob_storage.mount_azure( + datasite_client.api.services.blob_storage.mount_azure( account_name="citestingstorageaccount", container_name="citestingcontainer", account_key=azure_storage_key, bucket_name="helmazurebucket", ) - blob_files = domain_client.api.services.blob_storage.get_files_from_bucket( + blob_files = datasite_client.api.services.blob_storage.get_files_from_bucket( bucket_name="helmazurebucket" ) assert isinstance(blob_files, list), blob_files diff --git a/tests/integration/container_workload/pool_image_test.py b/tests/integration/container_workload/pool_image_test.py index 23fa1d6e8fc..10eab93ad31 100644 --- a/tests/integration/container_workload/pool_image_test.py +++ b/tests/integration/container_workload/pool_image_test.py @@ -8,7 +8,7 @@ # syft absolute import syft as sy -from syft.client.domain_client import DomainClient +from syft.client.datasite_client import DatasiteClient from syft.custom_worker.config import DockerWorkerConfig from syft.custom_worker.config import PrebuiltWorkerConfig from syft.service.request.request import Request @@ -21,7 +21,7 @@ from syft.util.util import get_latest_tag registry = os.getenv("SYFT_BASE_IMAGE_REGISTRY", "docker.io") -repo = "openmined/grid-backend" +repo = "openmined/syft-backend" if "k3d" in registry: tag = get_latest_tag(registry, repo) @@ -34,11 +34,11 @@ @pytest.fixture -def external_registry_uid(domain_1_port: int) -> UID: - domain_client: DomainClient = sy.login( - port=domain_1_port, email="info@openmined.org", password="changethis" +def external_registry_uid(datasite_1_port: int) -> UID: + datasite_client: DatasiteClient = sy.login( + port=datasite_1_port, email="info@openmined.org", password="changethis" ) - image_registry_list = domain_client.api.services.image_registry.get_all() + image_registry_list = datasite_client.api.services.image_registry.get_all() if len(image_registry_list) > 1: raise Exception("Only one registry should be present for testing") @@ -48,13 +48,13 @@ def external_registry_uid(domain_1_port: int) -> UID: ), "External registry different from the one set in the environment variable" return image_registry_list[0].id else: - registry_add_result = domain_client.api.services.image_registry.add( + registry_add_result = datasite_client.api.services.image_registry.add( external_registry ) assert isinstance(registry_add_result, sy.SyftSuccess), str(registry_add_result) - image_registry_list = domain_client.api.services.image_registry.get_all() + image_registry_list = datasite_client.api.services.image_registry.get_all() return image_registry_list[0].id @@ -68,25 +68,25 @@ def make_docker_config_test_case(pkg: str) -> tuple[str, str]: @pytest.mark.container_workload -def test_image_build(domain_1_port: int, external_registry_uid: UID) -> None: - domain_client: DomainClient = sy.login( - port=domain_1_port, email="info@openmined.org", password="changethis" +def test_image_build(datasite_1_port: int, external_registry_uid: UID) -> None: + datasite_client: DatasiteClient = sy.login( + port=datasite_1_port, email="info@openmined.org", password="changethis" ) docker_config, docker_tag = make_docker_config_test_case("recordlinkage") - submit_result = domain_client.api.services.worker_image.submit( + submit_result = datasite_client.api.services.worker_image.submit( worker_config=docker_config ) assert isinstance(submit_result, SyftSuccess) - assert len(domain_client.images.get_all()) == 2 + assert len(datasite_client.images.get_all()) == 2 # Validate if we can get the worker image object from its config - workerimage = domain_client.api.services.worker_image.get_by_config(docker_config) + workerimage = datasite_client.api.services.worker_image.get_by_config(docker_config) assert not isinstance(workerimage, sy.SyftError) # Build docker image - docker_build_result = domain_client.api.services.worker_image.build( + docker_build_result = datasite_client.api.services.worker_image.build( image_uid=workerimage.id, tag=docker_tag, registry_uid=external_registry_uid, @@ -94,7 +94,7 @@ def test_image_build(domain_1_port: int, external_registry_uid: UID) -> None: assert isinstance(docker_build_result, SyftSuccess) # Refresh the worker image object - workerimage = domain_client.images.get_by_uid(workerimage.id) + workerimage = datasite_client.images.get_by_uid(workerimage.id) assert not isinstance(workerimage, sy.SyftSuccess) assert workerimage.is_built @@ -107,10 +107,10 @@ def test_image_build(domain_1_port: int, external_registry_uid: UID) -> None: # @pytest.mark.parametrize("prebuilt", [True, False]) @pytest.mark.parametrize("prebuilt", [False]) def test_pool_launch( - domain_1_port: int, external_registry_uid: UID, prebuilt: bool + datasite_1_port: int, external_registry_uid: UID, prebuilt: bool ) -> None: - domain_client: DomainClient = sy.login( - port=domain_1_port, email="info@openmined.org", password="changethis" + datasite_client: DatasiteClient = sy.login( + port=datasite_1_port, email="info@openmined.org", password="changethis" ) # Submit Worker Image @@ -120,12 +120,14 @@ def test_pool_launch( if prebuilt else make_docker_config_test_case("opendp") ) - submit_result = domain_client.api.services.worker_image.submit( + submit_result = datasite_client.api.services.worker_image.submit( worker_config=worker_config ) assert isinstance(submit_result, SyftSuccess) - worker_image = domain_client.api.services.worker_image.get_by_config(worker_config) + worker_image = datasite_client.api.services.worker_image.get_by_config( + worker_config + ) assert not isinstance(worker_image, sy.SyftError) assert worker_image is not None @@ -133,7 +135,7 @@ def test_pool_launch( assert not worker_image.is_built # Build docker image - docker_build_result = domain_client.api.services.worker_image.build( + docker_build_result = datasite_client.api.services.worker_image.build( image_uid=worker_image.id, tag=docker_tag, registry_uid=external_registry_uid, @@ -141,7 +143,7 @@ def test_pool_launch( assert isinstance(docker_build_result, SyftSuccess) # Push Image to External registry - push_result = domain_client.api.services.worker_image.push( + push_result = datasite_client.api.services.worker_image.push( worker_image.id, username=external_registry_username, password=external_registry_password, @@ -150,7 +152,7 @@ def test_pool_launch( # Launch a worker pool worker_pool_name = f"custom-worker-pool-opendp{'-prebuilt' if prebuilt else ''}" - worker_pool_res = domain_client.api.services.worker_pool.launch( + worker_pool_res = datasite_client.api.services.worker_pool.launch( pool_name=worker_pool_name, image_uid=worker_image.id, num_workers=2, @@ -170,7 +172,7 @@ def test_pool_launch( assert all(worker.error is None for worker in worker_pool_res) - worker_pool = domain_client.worker_pools[worker_pool_name] + worker_pool = datasite_client.worker_pools[worker_pool_name] assert len(worker_pool.worker_list) == 2 workers = worker_pool.workers @@ -186,17 +188,17 @@ def test_pool_launch( first_worker = workers[0] # Check worker Logs - logs = domain_client.api.services.worker.logs(uid=first_worker.id) + logs = datasite_client.api.services.worker.logs(uid=first_worker.id) assert not isinstance(logs, sy.SyftError) # Check for worker status - status_res = domain_client.api.services.worker.status(uid=first_worker.id) + status_res = datasite_client.api.services.worker.status(uid=first_worker.id) assert not isinstance(status_res, sy.SyftError) assert isinstance(status_res, tuple) # Delete the pool's workers for worker in worker_pool.workers: - res = domain_client.api.services.worker.delete(uid=worker.id, force=True) + res = datasite_client.api.services.worker.delete(uid=worker.id, force=True) assert isinstance(res, sy.SyftSuccess) # TODO: delete the launched pool @@ -205,20 +207,20 @@ def test_pool_launch( @pytest.mark.container_workload @pytest.mark.parametrize("prebuilt", [True, False]) def test_pool_image_creation_job_requests( - domain_1_port: int, external_registry_uid: UID, prebuilt: bool + datasite_1_port: int, external_registry_uid: UID, prebuilt: bool ) -> None: """ Test register ds client, ds requests to create an image and pool creation, do approves, then ds creates a function attached to the worker pool, then creates another request. DO approves and runs the function """ - # construct a root client and data scientist client for the test domain - domain_client: DomainClient = sy.login( - port=domain_1_port, email="info@openmined.org", password="changethis" + # construct a root client and data scientist client for the test datasite + datasite_client: DatasiteClient = sy.login( + port=datasite_1_port, email="info@openmined.org", password="changethis" ) ds_username = uuid4().hex[:8] ds_email = ds_username + "@example.com" - res = domain_client.register( + res = datasite_client.register( name=ds_username, email=ds_email, password="secret_pw", @@ -227,10 +229,10 @@ def test_pool_image_creation_job_requests( assert isinstance(res, SyftSuccess) # Grant user permission to request code execution - ds = next(u for u in domain_client.users if u.email == ds_email) + ds = next(u for u in datasite_client.users if u.email == ds_email) ds.allow_mock_execution() - ds_client = sy.login(email=ds_email, password="secret_pw", port=domain_1_port) + ds_client = sy.login(email=ds_email, password="secret_pw", port=datasite_1_port) # the DS makes a request to create an image and a pool based on the image worker_config, docker_tag = ( @@ -257,9 +259,9 @@ def test_pool_image_creation_job_requests( assert request.changes[1].num_workers == 1 assert request.changes[1].pool_name == worker_pool_name - # the domain client approve the request, so the image should be built + # the datasite client approve the request, so the image should be built # and the worker pool should be launched - for r in domain_client.requests: + for r in datasite_client.requests: if r.id == request.id: req_result = r.approve() break @@ -302,7 +304,7 @@ def custom_worker_func(x): code_request = ds_client.code.request_code_execution(custom_worker_func) assert isinstance(code_request, Request) assert code_request.status.value == 0 # pending - for r in domain_client.requests: + for r in datasite_client.requests: if r.id == code_request.id: code_req_result = r.approve(approve_nested=True) break @@ -313,7 +315,7 @@ def custom_worker_func(x): job.wait() assert job.status.value == "completed" - job = domain_client.jobs.get_by_user_code_id(job.user_code_id)[-1] + job = datasite_client.jobs.get_by_user_code_id(job.user_code_id)[-1] assert job.job_worker_id == worker.id # Validate the result received from the syft function @@ -323,7 +325,7 @@ def custom_worker_func(x): # Delete the workers of the launched pools for worker in launched_pool.workers: - res = domain_client.api.services.worker.delete(uid=worker.id, force=True) + res = datasite_client.api.services.worker.delete(uid=worker.id, force=True) assert isinstance(res, sy.SyftSuccess) # TODO: delete the launched pool diff --git a/tests/integration/frontend/frontend_start_test.py b/tests/integration/frontend/frontend_start_test.py index fd531b3b87a..54490db450f 100644 --- a/tests/integration/frontend/frontend_start_test.py +++ b/tests/integration/frontend/frontend_start_test.py @@ -8,14 +8,14 @@ here = os.path.dirname(__file__) NETWORK_PORT = 9081 -DOMAIN_PORT = 9082 +DATASITE_PORT = 9082 HOST_IP = os.environ.get("HOST_IP", "localhost") @pytest.mark.frontend -def test_serves_domain_frontend() -> None: - title_str = "PyGrid" - url = f"http://{HOST_IP}:{DOMAIN_PORT}" +def test_serves_datasite_frontend() -> None: + title_str = "Syft UI" + url = f"http://{HOST_IP}:{DATASITE_PORT}" result = requests.get(url) assert result.status_code == 200 assert title_str in result.text @@ -23,7 +23,7 @@ def test_serves_domain_frontend() -> None: @pytest.mark.frontend def test_serves_network_frontend() -> None: - title_str = "PyGrid" + title_str = "Syft UI" url = f"http://localhost:{NETWORK_PORT}" result = requests.get(url) assert result.status_code == 200 diff --git a/tests/integration/local/enclave_local_test.py b/tests/integration/local/enclave_local_test.py index c91bdf887a6..cdff4630ed1 100644 --- a/tests/integration/local/enclave_local_test.py +++ b/tests/integration/local/enclave_local_test.py @@ -9,16 +9,16 @@ from syft.service.response import SyftError -@pytest.mark.local_node +@pytest.mark.local_server def test_enclave_root_client_exception(): - enclave_node = sy.orchestra.launch( + enclave_server = sy.orchestra.launch( name=token_hex(8), - node_type=sy.NodeType.ENCLAVE, + server_type=sy.ServerType.ENCLAVE, dev_mode=True, reset=True, local_db=True, ) - res = enclave_node.login(email="info@openmined.org", password="changethis") + res = enclave_server.login(email="info@openmined.org", password="changethis") assert isinstance(res, SyftError) - enclave_node.python_node.cleanup() - enclave_node.land() + enclave_server.python_server.cleanup() + enclave_server.land() diff --git a/tests/integration/local/gateway_local_test.py b/tests/integration/local/gateway_local_test.py index ef71e3faf0e..e10e9cb1540 100644 --- a/tests/integration/local/gateway_local_test.py +++ b/tests/integration/local/gateway_local_test.py @@ -9,13 +9,13 @@ # syft absolute import syft as sy -from syft.abstract_node import NodeType -from syft.client.domain_client import DomainClient +from syft.abstract_server import ServerType +from syft.client.datasite_client import DatasiteClient from syft.client.enclave_client import EnclaveClient from syft.client.gateway_client import GatewayClient -from syft.service.network.network_service import NodePeerAssociationStatus -from syft.service.network.node_peer import NodePeer -from syft.service.network.node_peer import NodePeerConnectionStatus +from syft.service.network.network_service import ServerPeerAssociationStatus +from syft.service.network.server_peer import ServerPeer +from syft.service.network.server_peer import ServerPeerConnectionStatus from syft.service.network.utils import PeerHealthCheckTask from syft.service.request.request import Request from syft.service.response import SyftSuccess @@ -23,13 +23,13 @@ def _launch( - node_type: NodeType, + server_type: ServerType, association_request_auto_approval: bool = True, port: int | str | None = None, ): return sy.orchestra.launch( name=token_hex(8), - node_type=node_type, + server_type=server_type, dev_mode=True, reset=True, local_db=True, @@ -41,63 +41,65 @@ def _launch( @pytest.fixture def gateway(): - node = _launch(NodeType.GATEWAY) - yield node - node.python_node.cleanup() - node.land() + server = _launch(ServerType.GATEWAY) + yield server + server.python_server.cleanup() + server.land() @pytest.fixture(params=[True, False]) def gateway_association_request_auto_approval(request: pytest.FixtureRequest): - node = _launch(NodeType.GATEWAY, association_request_auto_approval=request.param) - yield (request.param, node) - node.python_node.cleanup() - node.land() + server = _launch( + ServerType.GATEWAY, association_request_auto_approval=request.param + ) + yield (request.param, server) + server.python_server.cleanup() + server.land() @pytest.fixture -def domain(): - node = _launch(NodeType.DOMAIN) - yield node - node.python_node.cleanup() - node.land() +def datasite(): + server = _launch(ServerType.DATASITE) + yield server + server.python_server.cleanup() + server.land() @pytest.fixture -def domain_2(): - node = _launch(NodeType.DOMAIN) - yield node - node.python_node.cleanup() - node.land() +def datasite_2(): + server = _launch(ServerType.DATASITE) + yield server + server.python_server.cleanup() + server.land() @pytest.fixture def enclave(): - node = _launch(NodeType.ENCLAVE) - yield node - node.python_node.cleanup() - node.land() + server = _launch(ServerType.ENCLAVE) + yield server + server.python_server.cleanup() + server.land() @pytest.fixture def gateway_webserver(): - node = _launch(node_type=NodeType.GATEWAY, port="auto") - yield node - node.land() + server = _launch(server_type=ServerType.GATEWAY, port="auto") + yield server + server.land() @pytest.fixture -def domain_webserver(): - node = _launch(NodeType.DOMAIN, port="auto") - yield node - node.land() +def datasite_webserver(): + server = _launch(ServerType.DATASITE, port="auto") + yield server + server.land() @pytest.fixture -def domain_2_webserver(): - node = _launch(NodeType.DOMAIN, port="auto") - yield node - node.land() +def datasite_2_webserver(): + server = _launch(ServerType.DATASITE, port="auto") + yield server + server.land() @pytest.fixture(scope="function") @@ -127,9 +129,12 @@ def set_network_json_env_var(gateway_webserver): del os.environ["NETWORK_REGISTRY_JSON"] -@pytest.mark.local_node +@pytest.mark.local_server def test_create_gateway( - set_network_json_env_var, gateway_webserver, domain_webserver, domain_2_webserver + set_network_json_env_var, + gateway_webserver, + datasite_webserver, + datasite_2_webserver, ): assert isinstance(sy.gateways, sy.NetworkRegistry) assert len(sy.gateways) == 1 @@ -145,34 +150,36 @@ def test_create_gateway( res = gateway_client.settings.allow_association_request_auto_approval(enable=True) assert isinstance(res, SyftSuccess) - domain_client: DomainClient = domain_webserver.login( + datasite_client: DatasiteClient = datasite_webserver.login( email="info@openmined.org", password="changethis", ) - domain_client_2: DomainClient = domain_2_webserver.login( + datasite_client_2: DatasiteClient = datasite_2_webserver.login( email="info@openmined.org", password="changethis", ) - result = domain_client.connect_to_gateway(handle=gateway_webserver) + result = datasite_client.connect_to_gateway(handle=gateway_webserver) assert isinstance(result, SyftSuccess) - result = domain_client_2.connect_to_gateway(handle=gateway_webserver) + result = datasite_client_2.connect_to_gateway(handle=gateway_webserver) assert isinstance(result, SyftSuccess) time.sleep(PeerHealthCheckTask.repeat_time * 2 + 1) - assert len(sy.domains.all_domains) == 2 - assert len(sy.domains.online_domains) == 2 + assert len(sy.datasites.all_datasites) == 2 + assert len(sy.datasites.online_datasites) == 2 # check for peer connection status for peer in gateway_client.api.services.network.get_all_peers(): - assert peer.ping_status == NodePeerConnectionStatus.ACTIVE + assert peer.ping_status == ServerPeerConnectionStatus.ACTIVE # check the guest client client = gateway_webserver.client assert isinstance(client, GatewayClient) - assert client.metadata.node_type == NodeType.GATEWAY.value + assert client.metadata.server_type == ServerType.GATEWAY.value -@pytest.mark.local_node -def test_domain_connect_to_gateway(gateway_association_request_auto_approval, domain): +@pytest.mark.local_server +def test_datasite_connect_to_gateway( + gateway_association_request_auto_approval, datasite +): association_request_auto_approval, gateway = ( gateway_association_request_auto_approval ) @@ -180,12 +187,12 @@ def test_domain_connect_to_gateway(gateway_association_request_auto_approval, do email="info@openmined.org", password="changethis", ) - domain_client: DomainClient = domain.login( + datasite_client: DatasiteClient = datasite.login( email="info@openmined.org", password="changethis", ) - result = domain_client.connect_to_gateway(handle=gateway) + result = datasite_client.connect_to_gateway(handle=gateway) if association_request_auto_approval: assert isinstance(result, SyftSuccess) @@ -196,97 +203,100 @@ def test_domain_connect_to_gateway(gateway_association_request_auto_approval, do # check priority all_peers = gateway_client.api.services.network.get_all_peers() - assert all_peers[0].node_routes[0].priority == 1 + assert all_peers[0].server_routes[0].priority == 1 # Try again (via client approach) - result_2 = domain_client.connect_to_gateway(via_client=gateway_client) + result_2 = datasite_client.connect_to_gateway(via_client=gateway_client) assert isinstance(result_2, SyftSuccess) - assert len(domain_client.peers) == 1 + assert len(datasite_client.peers) == 1 assert len(gateway_client.peers) == 1 - proxy_domain_client = gateway_client.peers[0] - domain_peer = domain_client.peers[0] + proxy_datasite_client = gateway_client.peers[0] + datasite_peer = datasite_client.peers[0] - assert isinstance(proxy_domain_client, DomainClient) - assert isinstance(domain_peer, NodePeer) + assert isinstance(proxy_datasite_client, DatasiteClient) + assert isinstance(datasite_peer, ServerPeer) - # Domain's peer is a gateway and vice-versa - assert domain_peer.node_type == NodeType.GATEWAY + # Datasite's peer is a gateway and vice-versa + assert datasite_peer.server_type == ServerType.GATEWAY - assert gateway_client.name == domain_peer.name - assert domain_client.name == proxy_domain_client.name + assert gateway_client.name == datasite_peer.name + assert datasite_client.name == proxy_datasite_client.name - assert len(gateway_client.domains) == 1 + assert len(gateway_client.datasites) == 1 assert len(gateway_client.enclaves) == 0 - assert proxy_domain_client.metadata == domain_client.metadata - assert proxy_domain_client.user_role == ServiceRole.NONE + assert proxy_datasite_client.metadata == datasite_client.metadata + assert proxy_datasite_client.user_role == ServiceRole.NONE - domain_client = domain_client.login( + datasite_client = datasite_client.login( email="info@openmined.org", password="changethis" ) - proxy_domain_client = proxy_domain_client.login( + proxy_datasite_client = proxy_datasite_client.login( email="info@openmined.org", password="changethis" ) - assert proxy_domain_client.logged_in_user == "info@openmined.org" - assert proxy_domain_client.user_role == ServiceRole.ADMIN - assert proxy_domain_client.credentials == domain_client.credentials + assert proxy_datasite_client.logged_in_user == "info@openmined.org" + assert proxy_datasite_client.user_role == ServiceRole.ADMIN + assert proxy_datasite_client.credentials == datasite_client.credentials assert ( - proxy_domain_client.api.endpoints.keys() == domain_client.api.endpoints.keys() + proxy_datasite_client.api.endpoints.keys() + == datasite_client.api.endpoints.keys() ) # check priority all_peers = gateway_client.api.services.network.get_all_peers() - assert all_peers[0].node_routes[0].priority == 1 + assert all_peers[0].server_routes[0].priority == 1 -@pytest.mark.local_node -def test_domain_connect_to_gateway_routes_priority(gateway, domain, domain_2) -> None: +@pytest.mark.local_server +def test_datasite_connect_to_gateway_routes_priority( + gateway, datasite, datasite_2 +) -> None: """ - A test for routes' priority (PythonNodeRoute) + A test for routes' priority (PythonServerRoute) """ gateway_client: GatewayClient = gateway.login( email="info@openmined.org", password="changethis", ) - domain_client: DomainClient = domain.login( + datasite_client: DatasiteClient = datasite.login( email="info@openmined.org", password="changethis", ) - result = domain_client.connect_to_gateway(handle=gateway) + result = datasite_client.connect_to_gateway(handle=gateway) assert isinstance(result, SyftSuccess) all_peers = gateway_client.api.services.network.get_all_peers() assert len(all_peers) == 1 - domain_1_routes = all_peers[0].node_routes - assert domain_1_routes[0].priority == 1 + datasite_1_routes = all_peers[0].server_routes + assert datasite_1_routes[0].priority == 1 # reconnect to the gateway - result = domain_client.connect_to_gateway(via_client=gateway_client) + result = datasite_client.connect_to_gateway(via_client=gateway_client) assert isinstance(result, SyftSuccess) all_peers = gateway_client.api.services.network.get_all_peers() assert len(all_peers) == 1 - domain_1_routes = all_peers[0].node_routes - assert domain_1_routes[0].priority == 1 + datasite_1_routes = all_peers[0].server_routes + assert datasite_1_routes[0].priority == 1 - # another domain client connects to the gateway - domain_client_2: DomainClient = domain_2.login( + # another datasite client connects to the gateway + datasite_client_2: DatasiteClient = datasite_2.login( email="info@openmined.org", password="changethis", ) - result = domain_client_2.connect_to_gateway(handle=gateway) + result = datasite_client_2.connect_to_gateway(handle=gateway) assert isinstance(result, SyftSuccess) all_peers = gateway_client.api.services.network.get_all_peers() assert len(all_peers) == 2 for peer in all_peers: - assert peer.node_routes[0].priority == 1 + assert peer.server_routes[0].priority == 1 -@pytest.mark.local_node +@pytest.mark.local_server def test_enclave_connect_to_gateway(faker: Faker, gateway, enclave): gateway_client = gateway.client enclave_client: EnclaveClient = enclave.client @@ -305,15 +315,15 @@ def test_enclave_connect_to_gateway(faker: Faker, gateway, enclave): enclave_peer = enclave_client.peers[0] assert isinstance(proxy_enclave_client, EnclaveClient) - assert isinstance(enclave_peer, NodePeer) + assert isinstance(enclave_peer, ServerPeer) assert gateway_client.name == enclave_peer.name assert enclave_client.name == proxy_enclave_client.name - # Domain's peer is a gateway and vice-versa - assert enclave_peer.node_type == NodeType.GATEWAY + # Datasite's peer is a gateway and vice-versa + assert enclave_peer.server_type == ServerType.GATEWAY - assert len(gateway_client.domains) == 0 + assert len(gateway_client.datasites) == 0 assert len(gateway_client.enclaves) == 1 assert proxy_enclave_client.metadata == enclave_client.metadata @@ -341,38 +351,38 @@ def test_enclave_connect_to_gateway(faker: Faker, gateway, enclave): ) -@pytest.mark.local_node +@pytest.mark.local_server @pytest.mark.parametrize( "gateway_association_request_auto_approval", [False], indirect=True ) def test_repeated_association_requests_peers_health_check( - gateway_association_request_auto_approval, domain + gateway_association_request_auto_approval, datasite ): _, gateway = gateway_association_request_auto_approval gateway_client: GatewayClient = gateway.login( email="info@openmined.org", password="changethis", ) - domain_client: DomainClient = domain.login( + datasite_client: DatasiteClient = datasite.login( email="info@openmined.org", password="changethis", ) - result = domain_client.connect_to_gateway(handle=gateway) + result = datasite_client.connect_to_gateway(handle=gateway) assert isinstance(result, Request) - result = domain_client.connect_to_gateway(handle=gateway) + result = datasite_client.connect_to_gateway(handle=gateway) assert isinstance(result, Request) r = gateway_client.api.services.request.get_all()[-1].approve() assert isinstance(r, SyftSuccess) - result = domain_client.connect_to_gateway(handle=gateway) + result = datasite_client.connect_to_gateway(handle=gateway) assert isinstance(result, SyftSuccess) # the gateway client checks that the peer is associated res = gateway_client.api.services.network.check_peer_association( - peer_id=domain_client.id + peer_id=datasite_client.id ) - assert isinstance(res, NodePeerAssociationStatus) + assert isinstance(res, ServerPeerAssociationStatus) assert res.value == "PEER_ASSOCIATED" diff --git a/tests/integration/local/job_test.py b/tests/integration/local/job_test.py index e713da731df..4c56faa516e 100644 --- a/tests/integration/local/job_test.py +++ b/tests/integration/local/job_test.py @@ -2,7 +2,6 @@ # stdlib from secrets import token_hex -import time # third party import pytest @@ -17,7 +16,7 @@ from syft.service.response import SyftSuccess -@pytest.mark.local_node +@pytest.mark.local_server def test_job_restart(job) -> None: job.wait(timeout=2) @@ -61,32 +60,33 @@ def test_job_restart(job) -> None: @pytest.fixture -def node(): - node = sy.orchestra.launch( +def server(): + server = sy.orchestra.launch( name=token_hex(8), dev_mode=False, thread_workers=False, reset=True, n_consumers=4, create_producer=True, - node_side_type=sy.NodeSideType.LOW_SIDE, + server_side_type=sy.ServerSideType.LOW_SIDE, ) try: - yield node + yield server finally: - node.python_node.cleanup() - node.land() + server.python_server.cleanup() + server.land() @pytest.fixture -def job(node): - client = node.login(email="info@openmined.org", password="changethis") +def job(server): + client = server.login(email="info@openmined.org", password="changethis") _ = client.register(name="a", email="aa@b.org", password="c", password_verify="c") - ds_client = node.login(email="aa@b.org", password="c") + ds_client = server.login(email="aa@b.org", password="c") @syft_function() def process_batch(): # stdlib + import time while time.sleep(1) is None: ... @@ -94,18 +94,19 @@ def process_batch(): ds_client.code.submit(process_batch) @syft_function_single_use() - def process_all(domain): + def process_all(datasite): # stdlib + import time - _ = domain.launch_job(process_batch) - _ = domain.launch_job(process_batch) + _ = datasite.launch_job(process_batch) + _ = datasite.launch_job(process_batch) while time.sleep(1) is None: ... _ = ds_client.code.request_code_execution(process_all) client.requests[-1].approve(approve_nested=True) - client = node.login(email="info@openmined.org", password="changethis") + client = server.login(email="info@openmined.org", password="changethis") job = client.code.process_all(blocking=False) try: yield job @@ -113,7 +114,7 @@ def process_all(domain): job.kill() -@pytest.mark.local_node +@pytest.mark.local_server def test_job_kill(job) -> None: job.wait(timeout=2) assert wait_until( diff --git a/tests/integration/local/request_multiple_nodes_test.py b/tests/integration/local/request_multiple_nodes_test.py index 4b0a2950c66..c729131afa2 100644 --- a/tests/integration/local/request_multiple_nodes_test.py +++ b/tests/integration/local/request_multiple_nodes_test.py @@ -10,10 +10,10 @@ @pytest.fixture(scope="function") -def node_1(): - node = sy.orchestra.launch( +def server_1(): + server = sy.orchestra.launch( name=token_hex(8), - node_side_type="low", + server_side_type="low", dev_mode=False, reset=True, local_db=True, @@ -21,16 +21,16 @@ def node_1(): n_consumers=1, queue_port=None, ) - yield node - node.python_node.cleanup() - node.land() + yield server + server.python_server.cleanup() + server.land() @pytest.fixture(scope="function") -def node_2(): - node = sy.orchestra.launch( +def server_2(): + server = sy.orchestra.launch( name=token_hex(8), - node_side_type="high", + server_side_type="high", dev_mode=False, reset=True, local_db=True, @@ -38,27 +38,27 @@ def node_2(): n_consumers=1, queue_port=None, ) - yield node - node.python_node.cleanup() - node.land() + yield server + server.python_server.cleanup() + server.land() @pytest.fixture(scope="function") -def client_do_1(node_1): - return node_1.login(email="info@openmined.org", password="changethis") +def client_do_1(server_1): + return server_1.login(email="info@openmined.org", password="changethis") @pytest.fixture(scope="function") -def client_do_2(node_2): - return node_2.login(email="info@openmined.org", password="changethis") +def client_do_2(server_2): + return server_2.login(email="info@openmined.org", password="changethis") @pytest.fixture(scope="function") -def client_ds_1(node_1, client_do_1): +def client_ds_1(server_1, client_do_1): client_do_1.register( name="test_user", email="test@us.er", password="1234", password_verify="1234" ) - return node_1.login(email="test@us.er", password="1234") + return server_1.login(email="test@us.er", password="1234") @pytest.fixture(scope="function") diff --git a/tests/integration/local/syft_function_test.py b/tests/integration/local/syft_function_test.py index 8cc85cce4e2..89dd6326c98 100644 --- a/tests/integration/local/syft_function_test.py +++ b/tests/integration/local/syft_function_test.py @@ -10,13 +10,14 @@ from syft import ActionObject from syft import syft_function from syft import syft_function_single_use +from syft.service.job.job_stash import Job from syft.service.response import SyftError from syft.service.response import SyftSuccess @pytest.fixture -def node(): - _node = sy.orchestra.launch( +def server(): + _server = sy.orchestra.launch( name=token_hex(8), dev_mode=True, reset=True, @@ -26,25 +27,38 @@ def node(): local_db=False, ) # startup code here - yield _node + yield _server # Cleanup code - _node.python_node.cleanup() - _node.land() + _server.python_server.cleanup() + _server.land() # @pytest.mark.flaky(reruns=3, reruns_delay=3) @pytest.mark.skipif(sys.platform == "win32", reason="does not run on windows") -def test_nested_jobs(node): - client = node.login(email="info@openmined.org", password="changethis") +@pytest.mark.local_server +def test_nested_jobs(server): + client = server.login(email="info@openmined.org", password="changethis") - res = client.register(name="a", email="aa@b.org", password="c", password_verify="c") + new_user_email = "aa@b.org" + res = client.register( + name="a", email=new_user_email, password="c", password_verify="c" + ) assert isinstance(res, SyftSuccess) - ds_client = node.login(email="aa@b.org", password="c") + ## Dataset x = ActionObject.from_obj([1, 2]) x_ptr = x.send(client) + search_result = [u for u in client.users.get_all() if u.email == new_user_email] + assert len(search_result) == 1 + + new_ds_user = search_result[0] + new_ds_user.allow_mock_execution() + + # Login as data scientist + ds_client = server.login(email=new_user_email, password="c") + ## aggregate function @sy.syft_function() def aggregate_job(job_results): @@ -64,13 +78,13 @@ def process_batch(batch): ## Main function @syft_function_single_use(x=x_ptr) - def process_all(domain, x): + def process_all(datasite, x): job_results = [] for elem in x: - batch_job = domain.launch_job(process_batch, batch=elem) + batch_job = datasite.launch_job(process_batch, batch=elem) job_results += [batch_job.result] - job = domain.launch_job(aggregate_job, job_results=job_results) + job = datasite.launch_job(aggregate_job, job_results=job_results) return job.result assert process_all.worker_pool_name is None @@ -85,7 +99,9 @@ def process_all(domain, x): job = ds_client.code.process_all(x=x_ptr, blocking=False) - job.wait(timeout=0) + assert isinstance(job, Job) + + job.wait(timeout=5) assert len(job.subjobs) == 3 diff --git a/tests/integration/local/syft_worker_deletion_test.py b/tests/integration/local/syft_worker_deletion_test.py index 0094caf1b2c..181030b1a7b 100644 --- a/tests/integration/local/syft_worker_deletion_test.py +++ b/tests/integration/local/syft_worker_deletion_test.py @@ -2,6 +2,7 @@ from collections.abc import Generator from collections.abc import Iterable from itertools import product +import operator from secrets import token_hex import time from typing import Any @@ -12,22 +13,22 @@ # syft absolute import syft as sy -from syft.orchestra import NodeHandle +from syft.orchestra import ServerHandle from syft.service.job.job_stash import JobStatus from syft.service.response import SyftError # equivalent to adding this mark to every test in this file -pytestmark = pytest.mark.local_node +pytestmark = pytest.mark.local_server @pytest.fixture() -def node_args() -> dict[str, Any]: +def server_args() -> dict[str, Any]: return {} @pytest.fixture -def node(node_args: dict[str, Any]) -> Generator[NodeHandle, None, None]: - _node = sy.orchestra.launch( +def server(server_args: dict[str, Any]) -> Generator[ServerHandle, None, None]: + _server = sy.orchestra.launch( **{ "name": token_hex(8), "dev_mode": True, @@ -36,14 +37,14 @@ def node(node_args: dict[str, Any]) -> Generator[NodeHandle, None, None]: "create_producer": True, "queue_port": None, "local_db": False, - **node_args, + **server_args, } ) # startup code here - yield _node + yield _server # Cleanup code - _node.python_node.cleanup() - _node.land() + _server.python_server.cleanup() + _server.land() def matrix( @@ -67,37 +68,66 @@ def matrix( return [dict(kvs) for kvs in args] -NODE_ARGS_TEST_CASES = matrix( - n_consumers=[1], - dev_mode=[True, False], - thread_workers=[True, False], -) +SERVER_ARGS_TEST_CASES = { + "n_consumers": [1], + "dev_mode": [True, False], + "thread_workers": [True, False], +} + + +class FlakyMark(RuntimeError): + """To mark a flaky part of a test to use with @pytest.mark.flaky""" + pass -@pytest.mark.parametrize("node_args", NODE_ARGS_TEST_CASES) + +@pytest.mark.flaky(reruns=3, rerun_delay=1, only_rerun=["FlakyMark"]) +@pytest.mark.parametrize( + "server_args", + matrix( + **{**SERVER_ARGS_TEST_CASES, "n_consumers": [3]}, + ), +) @pytest.mark.parametrize("force", [True, False]) -def test_delete_idle_worker(node: NodeHandle, force: bool) -> None: - client = node.login(email="info@openmined.org", password="changethis") - worker = client.worker.get_all()[0] +def test_delete_idle_worker( + server: ServerHandle, force: bool, server_args: dict[str, Any] +) -> None: + client = server.login(email="info@openmined.org", password="changethis") + + original_workers = client.worker.get_all() + worker_to_delete = max(original_workers, key=operator.attrgetter("name")) - res = client.worker.delete(worker.id, force=force) + res = client.worker.delete(worker_to_delete.id, force=force) assert not isinstance(res, SyftError) if force: - assert len(client.worker.get_all()) == 0 + assert ( + len(workers := client.worker.get_all()) == len(original_workers) - 1 + and all(w.id != worker_to_delete.id for w in workers) + ), f"{workers.message=} {server_args=} {[(w.id, w.name) for w in original_workers]}" + return start = time.time() while True: - if len(client.worker.get_all()) == 0: + workers = client.worker.get_all() + if isinstance(workers, SyftError): + raise FlakyMark( + f"`workers = client.worker.get_all()` failed.\n" + f"{workers.message=} {server_args=} {[(w.id, w.name) for w in original_workers]}" + ) + + if len(workers) == len(original_workers) - 1 and all( + w.id != worker_to_delete.id for w in workers + ): break if time.time() - start > 3: raise TimeoutError("Worker did not get removed from stash.") -@pytest.mark.parametrize("node_args", NODE_ARGS_TEST_CASES) +@pytest.mark.parametrize("server_args", matrix(**SERVER_ARGS_TEST_CASES)) @pytest.mark.parametrize("force", [True, False]) -def test_delete_worker(node: NodeHandle, force: bool) -> None: - client = node.login(email="info@openmined.org", password="changethis") +def test_delete_worker(server: ServerHandle, force: bool) -> None: + client = server.login(email="info@openmined.org", password="changethis") data = np.array([1, 2, 3]) data_action_obj = sy.ActionObject.from_obj(data) diff --git a/tests/integration/local/twin_api_sync_test.py b/tests/integration/local/twin_api_sync_test.py index b30b4e50382..77e8dee2e74 100644 --- a/tests/integration/local/twin_api_sync_test.py +++ b/tests/integration/local/twin_api_sync_test.py @@ -7,7 +7,7 @@ # syft absolute import syft import syft as sy -from syft.client.domain_client import DomainClient +from syft.client.datasite_client import DatasiteClient from syft.client.syncing import compare_clients from syft.client.syncing import resolve from syft.service.action.action_object import ActionObject @@ -16,7 +16,7 @@ from syft.service.response import SyftSuccess -def compare_and_resolve(*, from_client: DomainClient, to_client: DomainClient): +def compare_and_resolve(*, from_client: DatasiteClient, to_client: DatasiteClient): diff_state_before = compare_clients(from_client, to_client) for obj_diff_batch in diff_state_before.batches: widget = resolve(obj_diff_batch) @@ -35,7 +35,7 @@ def run_and_accept_result(client): return job_high -def get_ds_client(client: DomainClient) -> DomainClient: +def get_ds_client(client: DatasiteClient) -> DatasiteClient: client.register( name="a", email="a@a.com", @@ -56,7 +56,7 @@ def private_function(context) -> str: @pytest.mark.skipif(sys.platform == "win32", reason="does not run on windows") -@pytest.mark.local_node +@pytest.mark.local_server def test_twin_api_integration(full_high_worker, full_low_worker): low_client = full_low_worker.login( email="info@openmined.org", password="changethis" @@ -124,7 +124,7 @@ def compute(query): # verify updating twin api endpoint works timeout_before = ( - full_low_worker.python_node.get_service("apiservice") + full_low_worker.python_server.get_service("apiservice") .stash.get_all( credentials=full_low_worker.client.credentials, has_permission=True ) @@ -141,7 +141,7 @@ def compute(query): assert result, result timeout_after = ( - full_low_worker.python_node.get_service("apiservice") + full_low_worker.python_server.get_service("apiservice") .stash.get_all( credentials=full_low_worker.client.credentials, has_permission=True ) @@ -154,23 +154,23 @@ def compute(query): @pytest.mark.skipif(sys.platform == "win32", reason="does not run on windows") -@pytest.mark.local_node +@pytest.mark.local_server def test_function_error(full_low_worker) -> None: - root_domain_client = full_low_worker.login( + root_datasite_client = full_low_worker.login( email="info@openmined.org", password="changethis" ) - root_domain_client.register( + root_datasite_client.register( name="data-scientist", email="test_user@openmined.org", password="0000", password_verify="0000", ) - ds_client = root_domain_client.login( + ds_client = root_datasite_client.login( email="test_user@openmined.org", password="0000", ) - users = root_domain_client.users.get_all() + users = root_datasite_client.users.get_all() @sy.syft_function_single_use() def compute_sum(): diff --git a/tests/integration/network/client_test.py b/tests/integration/network/client_test.py index 018d66eab90..113ee79ea8d 100644 --- a/tests/integration/network/client_test.py +++ b/tests/integration/network/client_test.py @@ -3,19 +3,19 @@ # syft absolute import syft as sy -from syft.client.domain_client import DomainClient +from syft.client.datasite_client import DatasiteClient from syft.client.gateway_client import GatewayClient -DOMAIN_PORT = 9082 +DATASITE_PORT = 9082 NETWORK_PORT = 9081 @pytest.mark.parametrize( - "node_metadata", [(NETWORK_PORT, GatewayClient), (DOMAIN_PORT, DomainClient)] + "server_metadata", [(NETWORK_PORT, GatewayClient), (DATASITE_PORT, DatasiteClient)] ) @pytest.mark.network -def test_client_type(node_metadata): - port, client_type = node_metadata +def test_client_type(server_metadata): + port, client_type = server_metadata client = sy.login(port=port, email="info@openmined.org", password="changethis") assert isinstance(client, client_type) diff --git a/tests/integration/network/gateway_test.py b/tests/integration/network/gateway_test.py index 48cddf8ce4c..fd4b9751c42 100644 --- a/tests/integration/network/gateway_test.py +++ b/tests/integration/network/gateway_test.py @@ -10,20 +10,20 @@ # syft absolute import syft as sy -from syft.abstract_node import NodeType +from syft.abstract_server import ServerType from syft.client.client import HTTPConnection from syft.client.client import SyftClient -from syft.client.domain_client import DomainClient +from syft.client.datasite_client import DatasiteClient from syft.client.gateway_client import GatewayClient from syft.client.registry import NetworkRegistry from syft.client.search import SearchResults from syft.service.dataset.dataset import Dataset from syft.service.network.association_request import AssociationRequestChange -from syft.service.network.network_service import NodePeerAssociationStatus -from syft.service.network.node_peer import NodePeer -from syft.service.network.node_peer import NodePeerConnectionStatus -from syft.service.network.routes import HTTPNodeRoute -from syft.service.network.routes import NodeRouteType +from syft.service.network.network_service import ServerPeerAssociationStatus +from syft.service.network.routes import HTTPServerRoute +from syft.service.network.routes import ServerRouteType +from syft.service.network.server_peer import ServerPeer +from syft.service.network.server_peer import ServerPeerConnectionStatus from syft.service.network.utils import PeerHealthCheckTask from syft.service.request.request import Request from syft.service.response import SyftError @@ -34,7 +34,7 @@ @pytest.fixture(scope="function") def set_env_var( gateway_port: int, - gateway_node: str = "testgateway1", + gateway_server: str = "testgateway1", host_or_ip: str = "localhost", protocol: str = "http", ): @@ -44,7 +44,7 @@ def set_env_var( "2.0.0": {{ "gateways": [ {{ - "name": "{gateway_node}", + "name": "{gateway_server}", "host_or_ip": "{host_or_ip}", "protocol": "{protocol}", "port": {gateway_port}, @@ -68,7 +68,7 @@ def _random_hash() -> str: def _remove_existing_peers(client: SyftClient) -> SyftSuccess | SyftError: - peers: list[NodePeer] | SyftError = client.api.services.network.get_all_peers() + peers: list[ServerPeer] | SyftError = client.api.services.network.get_all_peers() if isinstance(peers, SyftError): return peers for peer in peers: @@ -93,35 +93,35 @@ def test_network_registry_env_var(set_env_var) -> None: @pytest.mark.network -def test_domain_connect_to_gateway( - set_env_var, domain_1_port: int, gateway_port: int +def test_datasite_connect_to_gateway( + set_env_var, datasite_1_port: int, gateway_port: int ) -> None: # check if we can see the online gateways assert isinstance(sy.gateways, NetworkRegistry) assert len(sy.gateways.all_networks) == len(sy.gateways.online_networks) == 1 - # login to the domain and gateway + # login to the datasite and gateway gateway_client: GatewayClient = sy.login( port=gateway_port, email="info@openmined.org", password="changethis" ) - domain_client: DomainClient = sy.login( - port=domain_1_port, email="info@openmined.org", password="changethis" + datasite_client: DatasiteClient = sy.login( + port=datasite_1_port, email="info@openmined.org", password="changethis" ) # Try removing existing peers just to make sure - _remove_existing_peers(domain_client) + _remove_existing_peers(datasite_client) _remove_existing_peers(gateway_client) # Disable automatic acceptance of association requests res = gateway_client.settings.allow_association_request_auto_approval(enable=False) assert isinstance(res, SyftSuccess) - # connecting the domain to the gateway - result = domain_client.connect_to_gateway(gateway_client) + # connecting the datasite to the gateway + result = datasite_client.connect_to_gateway(gateway_client) assert isinstance(result, Request) assert isinstance(result.changes[0], AssociationRequestChange) - assert len(domain_client.peers) == 1 + assert len(datasite_client.peers) == 1 assert len(gateway_client.peers) == 0 gateway_client_root = gateway_client.login( @@ -133,84 +133,85 @@ def test_domain_connect_to_gateway( assert len(gateway_client.peers) == 1 time.sleep(PeerHealthCheckTask.repeat_time * 2 + 1) - # check that the domain is online on the network - assert len(sy.domains.all_domains) == 1 - assert len(sy.domains.online_domains) == 1 + # check that the datasite is online on the network + assert len(sy.datasites.all_datasites) == 1 + assert len(sy.datasites.online_datasites) == 1 - proxy_domain_client = gateway_client.peers[0] - domain_peer = domain_client.peers[0] + proxy_datasite_client = gateway_client.peers[0] + datasite_peer = datasite_client.peers[0] - assert isinstance(proxy_domain_client, DomainClient) - assert isinstance(domain_peer, NodePeer) + assert isinstance(proxy_datasite_client, DatasiteClient) + assert isinstance(datasite_peer, ServerPeer) - # Domain's peer is a gateway and vice-versa - assert domain_peer.node_type == NodeType.GATEWAY + # Datasite's peer is a gateway and vice-versa + assert datasite_peer.server_type == ServerType.GATEWAY - assert gateway_client.name == domain_peer.name - assert domain_client.name == proxy_domain_client.name + assert gateway_client.name == datasite_peer.name + assert datasite_client.name == proxy_datasite_client.name - assert len(gateway_client.domains) == 1 + assert len(gateway_client.datasites) == 1 assert len(gateway_client.enclaves) == 0 - assert proxy_domain_client.metadata == domain_client.metadata - assert proxy_domain_client.user_role == ServiceRole.NONE + assert proxy_datasite_client.metadata == datasite_client.metadata + assert proxy_datasite_client.user_role == ServiceRole.NONE - domain_client = domain_client.login( + datasite_client = datasite_client.login( email="info@openmined.org", password="changethis" ) - proxy_domain_client = proxy_domain_client.login( + proxy_datasite_client = proxy_datasite_client.login( email="info@openmined.org", password="changethis" ) - assert proxy_domain_client.logged_in_user == "info@openmined.org" - assert proxy_domain_client.user_role == ServiceRole.ADMIN - assert proxy_domain_client.credentials == domain_client.credentials + assert proxy_datasite_client.logged_in_user == "info@openmined.org" + assert proxy_datasite_client.user_role == ServiceRole.ADMIN + assert proxy_datasite_client.credentials == datasite_client.credentials assert ( - proxy_domain_client.api.endpoints.keys() == domain_client.api.endpoints.keys() + proxy_datasite_client.api.endpoints.keys() + == datasite_client.api.endpoints.keys() ) # Remove existing peers - assert isinstance(_remove_existing_peers(domain_client), SyftSuccess) + assert isinstance(_remove_existing_peers(datasite_client), SyftSuccess) assert isinstance(_remove_existing_peers(gateway_client), SyftSuccess) @pytest.mark.network -def test_dataset_search(set_env_var, gateway_port: int, domain_1_port: int) -> None: +def test_dataset_search(set_env_var, gateway_port: int, datasite_1_port: int) -> None: """ - Scenario: Connecting a domain node to a gateway node. The domain + Scenario: Connecting a datasite server to a gateway server. The datasite client then upload a dataset, which should be searchable by the syft network. People who install syft can see the mock data and metadata of the uploaded datasets """ - # login to the domain and gateway + # login to the datasite and gateway gateway_client: GatewayClient = sy.login( port=gateway_port, email="info@openmined.org", password="changethis" ) - domain_client: DomainClient = sy.login( - port=domain_1_port, email="info@openmined.org", password="changethis" + datasite_client: DatasiteClient = sy.login( + port=datasite_1_port, email="info@openmined.org", password="changethis" ) # Try removing existing peers just to make sure - _remove_existing_peers(domain_client) + _remove_existing_peers(datasite_client) _remove_existing_peers(gateway_client) res = gateway_client.settings.allow_association_request_auto_approval(enable=True) assert isinstance(res, SyftSuccess) - # connect the domain to the gateway - result = domain_client.connect_to_gateway(gateway_client) + # connect the datasite to the gateway + result = datasite_client.connect_to_gateway(gateway_client) assert isinstance(result, SyftSuccess) - # the domain client uploads a dataset + # the datasite client uploads a dataset input_data = np.array([1, 2, 3]) mock_data = np.array([4, 5, 6]) asset_name = _random_hash() asset = sy.Asset(name=asset_name, data=input_data, mock=mock_data) dataset_name = _random_hash() dataset = sy.Dataset(name=dataset_name, asset_list=[asset]) - dataset_res = domain_client.upload_dataset(dataset) + dataset_res = datasite_client.upload_dataset(dataset) assert isinstance(dataset_res, SyftSuccess) - # since dataset search is done by checking from the online domains, + # since dataset search is done by checking from the online datasites, # we need to wait to make sure peers health check is done time.sleep(PeerHealthCheckTask.repeat_time * 2 + 1) # test if the dataset can be searched by the syft network @@ -227,44 +228,44 @@ def test_dataset_search(set_env_var, gateway_port: int, domain_1_port: int) -> N wrong_search = sy.search(_random_hash()) assert len(wrong_search) == 0 - # the domain client delete the dataset - domain_client.api.services.dataset.delete_by_uid(uid=dataset.id) + # the datasite client delete the dataset + datasite_client.api.services.dataset.delete(uid=dataset.id) # Remove existing peers - assert isinstance(_remove_existing_peers(domain_client), SyftSuccess) + assert isinstance(_remove_existing_peers(datasite_client), SyftSuccess) assert isinstance(_remove_existing_peers(gateway_client), SyftSuccess) @pytest.mark.skip(reason="Possible bug") @pytest.mark.network -def test_domain_gateway_user_code( - set_env_var, domain_1_port: int, gateway_port: int +def test_datasite_gateway_user_code( + set_env_var, datasite_1_port: int, gateway_port: int ) -> None: - # login to the domain and gateway + # login to the datasite and gateway gateway_client: GatewayClient = sy.login( port=gateway_port, email="info@openmined.org", password="changethis" ) - domain_client: DomainClient = sy.login( - port=domain_1_port, email="info@openmined.org", password="changethis" + datasite_client: DatasiteClient = sy.login( + port=datasite_1_port, email="info@openmined.org", password="changethis" ) # Try removing existing peers just to make sure - _remove_existing_peers(domain_client) + _remove_existing_peers(datasite_client) _remove_existing_peers(gateway_client) - # the domain client uploads a dataset + # the datasite client uploads a dataset input_data = np.array([1, 2, 3]) mock_data = np.array([4, 5, 6]) asset_name = _random_hash() asset = sy.Asset(name=asset_name, data=input_data, mock=mock_data) dataset_name = _random_hash() dataset = sy.Dataset(name=dataset_name, asset_list=[asset]) - dataset_res = domain_client.upload_dataset(dataset) + dataset_res = datasite_client.upload_dataset(dataset) assert isinstance(dataset_res, SyftSuccess) - # the domain client registers a data data scientist account on its domain + # the datasite client registers a data data scientist account on its datasite random_name: str = str(_random_hash()) - user_create_res = domain_client.register( + user_create_res = datasite_client.register( name=random_name, email=f"{random_name}@caltech.edu", password="changethis", @@ -277,12 +278,12 @@ def test_domain_gateway_user_code( res = gateway_client.settings.allow_association_request_auto_approval(enable=True) assert isinstance(res, SyftSuccess) - # the domain client connects to the gateway - gateway_con_res = domain_client.connect_to_gateway(gateway_client) + # the datasite client connects to the gateway + gateway_con_res = datasite_client.connect_to_gateway(gateway_client) assert isinstance(gateway_con_res, SyftSuccess) - # get the proxy client to the domain, login to the data scientist account - proxy_client = gateway_client.domains[0] + # get the proxy client to the datasite, login to the data scientist account + proxy_client = gateway_client.datasites[0] proxy_ds = proxy_client.login( email=f"{random_name}@caltech.edu", password="changethis", @@ -299,9 +300,9 @@ def mock_function(asset): request_res = proxy_ds.code.request_code_execution(mock_function) assert isinstance(request_res, Request) - # domain client approves the request - assert len(domain_client.requests.get_all()) == 1 - req_approve_res = domain_client.requests[-1].approve() + # datasite client approves the request + assert len(datasite_client.requests.get_all()) == 1 + req_approve_res = datasite_client.requests[-1].approve() assert isinstance(req_approve_res, SyftSuccess) # the proxy data scientist client executes the code and gets the result @@ -309,593 +310,599 @@ def mock_function(asset): final_result = result.get() assert (final_result == input_data + 1).all() - # the domain client delete the dataset - domain_client.api.services.dataset.delete_by_uid(uid=dataset.id) + # the datasite client delete the dataset + datasite_client.api.services.dataset.delete(uid=dataset.id) # Remove existing peers - assert isinstance(_remove_existing_peers(domain_client), SyftSuccess) + assert isinstance(_remove_existing_peers(datasite_client), SyftSuccess) assert isinstance(_remove_existing_peers(gateway_client), SyftSuccess) @pytest.mark.network -def test_deleting_peers(set_env_var, domain_1_port: int, gateway_port: int) -> None: - # login to the domain and gateway +def test_deleting_peers(set_env_var, datasite_1_port: int, gateway_port: int) -> None: + # login to the datasite and gateway gateway_client: GatewayClient = sy.login( port=gateway_port, email="info@openmined.org", password="changethis" ) - domain_client: DomainClient = sy.login( - port=domain_1_port, email="info@openmined.org", password="changethis" + datasite_client: DatasiteClient = sy.login( + port=datasite_1_port, email="info@openmined.org", password="changethis" ) # clean up before test - _remove_existing_peers(domain_client) + _remove_existing_peers(datasite_client) _remove_existing_peers(gateway_client) # Enable automatic acceptance of association requests res = gateway_client.settings.allow_association_request_auto_approval(enable=True) assert isinstance(res, SyftSuccess) - # connecting the domain to the gateway - result = domain_client.connect_to_gateway(gateway_client) + # connecting the datasite to the gateway + result = datasite_client.connect_to_gateway(gateway_client) assert isinstance(result, SyftSuccess) - assert len(domain_client.peers) == 1 + assert len(datasite_client.peers) == 1 assert len(gateway_client.peers) == 1 # Remove existing peers - assert isinstance(_remove_existing_peers(domain_client), SyftSuccess) + assert isinstance(_remove_existing_peers(datasite_client), SyftSuccess) assert isinstance(_remove_existing_peers(gateway_client), SyftSuccess) # check that removing peers work as expected - assert len(domain_client.peers) == 0 + assert len(datasite_client.peers) == 0 assert len(gateway_client.peers) == 0 - # check that the online domains and gateways are updated + # check that the online datasites and gateways are updated time.sleep(PeerHealthCheckTask.repeat_time * 2 + 1) assert len(sy.gateways.all_networks) == 1 - assert len(sy.domains.all_domains) == 0 - assert len(sy.domains.online_domains) == 0 + assert len(sy.datasites.all_datasites) == 0 + assert len(sy.datasites.online_datasites) == 0 - # reconnect the domain to the gateway - result = domain_client.connect_to_gateway(gateway_client) + # reconnect the datasite to the gateway + result = datasite_client.connect_to_gateway(gateway_client) assert isinstance(result, SyftSuccess) - assert len(domain_client.peers) == 1 + assert len(datasite_client.peers) == 1 assert len(gateway_client.peers) == 1 # Remove existing peers - assert isinstance(_remove_existing_peers(domain_client), SyftSuccess) + assert isinstance(_remove_existing_peers(datasite_client), SyftSuccess) assert isinstance(_remove_existing_peers(gateway_client), SyftSuccess) # check that removing peers work as expected - assert len(domain_client.peers) == 0 + assert len(datasite_client.peers) == 0 assert len(gateway_client.peers) == 0 @pytest.mark.network -def test_add_route(set_env_var, gateway_port: int, domain_1_port: int) -> None: +def test_add_route(set_env_var, gateway_port: int, datasite_1_port: int) -> None: """ Test the network service's `add_route` functionalities to add routes directly - for a self domain. - Scenario: Connect a domain to a gateway. The gateway adds 2 new routes to the domain + for a self datasite. + Scenario: Connect a datasite to a gateway. The gateway adds 2 new routes to the datasite and check their priorities get updated. - Check for the gateway if the proxy client to connect to the domain uses the + Check for the gateway if the proxy client to connect to the datasite uses the route with the highest priority. """ - # login to the domain and gateway + # login to the datasite and gateway gateway_client: GatewayClient = sy.login( port=gateway_port, email="info@openmined.org", password="changethis" ) - domain_client: DomainClient = sy.login( - port=domain_1_port, email="info@openmined.org", password="changethis" + datasite_client: DatasiteClient = sy.login( + port=datasite_1_port, email="info@openmined.org", password="changethis" ) # Try removing existing peers just to make sure - _remove_existing_peers(domain_client) + _remove_existing_peers(datasite_client) _remove_existing_peers(gateway_client) # Enable automatic acceptance of association requests res = gateway_client.settings.allow_association_request_auto_approval(enable=True) assert isinstance(res, SyftSuccess) - # connecting the domain to the gateway - result = domain_client.connect_to_gateway(gateway_client) + # connecting the datasite to the gateway + result = datasite_client.connect_to_gateway(gateway_client) assert isinstance(result, SyftSuccess) - assert len(domain_client.peers) == 1 + assert len(datasite_client.peers) == 1 assert len(gateway_client.peers) == 1 - # add a new route to connect to the domain - new_route = HTTPNodeRoute(host_or_ip="localhost", port=10000) - domain_peer: NodePeer = gateway_client.api.services.network.get_all_peers()[0] + # add a new route to connect to the datasite + new_route = HTTPServerRoute(host_or_ip="localhost", port=10000) + datasite_peer: ServerPeer = gateway_client.api.services.network.get_all_peers()[0] res = gateway_client.api.services.network.add_route( - peer_verify_key=domain_peer.verify_key, route=new_route + peer_verify_key=datasite_peer.verify_key, route=new_route ) assert isinstance(res, SyftSuccess) - domain_peer = gateway_client.api.services.network.get_all_peers()[0] - assert len(domain_peer.node_routes) == 2 - assert domain_peer.node_routes[-1].port == new_route.port + datasite_peer = gateway_client.api.services.network.get_all_peers()[0] + assert len(datasite_peer.server_routes) == 2 + assert datasite_peer.server_routes[-1].port == new_route.port - # adding another route to the domain - new_route2 = HTTPNodeRoute(host_or_ip="localhost", port=10001) + # adding another route to the datasite + new_route2 = HTTPServerRoute(host_or_ip="localhost", port=10001) res = gateway_client.api.services.network.add_route( - peer_verify_key=domain_peer.verify_key, route=new_route2 + peer_verify_key=datasite_peer.verify_key, route=new_route2 ) assert isinstance(res, SyftSuccess) - domain_peer = gateway_client.api.services.network.get_all_peers()[0] - assert len(domain_peer.node_routes) == 3 - assert domain_peer.node_routes[-1].port == new_route2.port - assert domain_peer.node_routes[-1].priority == 3 + datasite_peer = gateway_client.api.services.network.get_all_peers()[0] + assert len(datasite_peer.server_routes) == 3 + assert datasite_peer.server_routes[-1].port == new_route2.port + assert datasite_peer.server_routes[-1].priority == 3 - # add an existed route to the domain. Its priority should not be updated + # add an existed route to the datasite. Its priority should not be updated res = gateway_client.api.services.network.add_route( - peer_verify_key=domain_peer.verify_key, route=domain_peer.node_routes[0] + peer_verify_key=datasite_peer.verify_key, route=datasite_peer.server_routes[0] ) assert "route already exists" in res.message assert isinstance(res, SyftSuccess) - domain_peer = gateway_client.api.services.network.get_all_peers()[0] - assert len(domain_peer.node_routes) == 3 - assert domain_peer.node_routes[0].priority == 1 + datasite_peer = gateway_client.api.services.network.get_all_peers()[0] + assert len(datasite_peer.server_routes) == 3 + assert datasite_peer.server_routes[0].priority == 1 # getting the proxy client using the current highest priority route should # be successful since now we pick the oldest route (port 9082 with priority 1) # to have the highest priority by default - proxy_domain_client = gateway_client.peers[0] - assert isinstance(proxy_domain_client, DomainClient) + proxy_datasite_client = gateway_client.peers[0] + assert isinstance(proxy_datasite_client, DatasiteClient) - # the routes the domain client uses to connect to the gateway should stay the same - gateway_peer: NodePeer = domain_client.peers[0] - assert len(gateway_peer.node_routes) == 1 + # the routes the datasite client uses to connect to the gateway should stay the same + gateway_peer: ServerPeer = datasite_client.peers[0] + assert len(gateway_peer.server_routes) == 1 # Remove existing peers - assert isinstance(_remove_existing_peers(domain_client), SyftSuccess) + assert isinstance(_remove_existing_peers(datasite_client), SyftSuccess) assert isinstance(_remove_existing_peers(gateway_client), SyftSuccess) @pytest.mark.network -def test_delete_route(set_env_var, gateway_port: int, domain_1_port: int) -> None: +def test_delete_route(set_env_var, gateway_port: int, datasite_1_port: int) -> None: """ Scenario: - Connect a domain to a gateway. The gateway adds a new route to the domain + Connect a datasite to a gateway. The gateway adds a new route to the datasite and then deletes it. """ - # login to the domain and gateway + # login to the datasite and gateway gateway_client: GatewayClient = sy.login( port=gateway_port, email="info@openmined.org", password="changethis" ) - domain_client: DomainClient = sy.login( - port=domain_1_port, email="info@openmined.org", password="changethis" + datasite_client: DatasiteClient = sy.login( + port=datasite_1_port, email="info@openmined.org", password="changethis" ) # Try removing existing peers just to make sure - _remove_existing_peers(domain_client) + _remove_existing_peers(datasite_client) _remove_existing_peers(gateway_client) # Enable automatic acceptance of association requests res = gateway_client.settings.allow_association_request_auto_approval(enable=True) assert isinstance(res, SyftSuccess) - # connecting the domain to the gateway - result = domain_client.connect_to_gateway(gateway_client) + # connecting the datasite to the gateway + result = datasite_client.connect_to_gateway(gateway_client) assert isinstance(result, SyftSuccess) - assert len(domain_client.peers) == 1 + assert len(datasite_client.peers) == 1 assert len(gateway_client.peers) == 1 - # add a new route to connect to the domain - new_route = HTTPNodeRoute(host_or_ip="localhost", port=10000) - domain_peer: NodePeer = gateway_client.api.services.network.get_all_peers()[0] + # add a new route to connect to the datasite + new_route = HTTPServerRoute(host_or_ip="localhost", port=10000) + datasite_peer: ServerPeer = gateway_client.api.services.network.get_all_peers()[0] res = gateway_client.api.services.network.add_route( - peer_verify_key=domain_peer.verify_key, route=new_route + peer_verify_key=datasite_peer.verify_key, route=new_route ) assert isinstance(res, SyftSuccess) - domain_peer = gateway_client.api.services.network.get_all_peers()[0] - assert len(domain_peer.node_routes) == 2 - assert domain_peer.node_routes[-1].port == new_route.port + datasite_peer = gateway_client.api.services.network.get_all_peers()[0] + assert len(datasite_peer.server_routes) == 2 + assert datasite_peer.server_routes[-1].port == new_route.port # delete the added route res = gateway_client.api.services.network.delete_route( - peer_verify_key=domain_peer.verify_key, route=new_route + peer_verify_key=datasite_peer.verify_key, route=new_route ) assert isinstance(res, SyftSuccess) - domain_peer = gateway_client.api.services.network.get_all_peers()[0] - assert len(domain_peer.node_routes) == 1 - assert domain_peer.node_routes[-1].port == domain_1_port + datasite_peer = gateway_client.api.services.network.get_all_peers()[0] + assert len(datasite_peer.server_routes) == 1 + assert datasite_peer.server_routes[-1].port == datasite_1_port # Remove existing peers - assert isinstance(_remove_existing_peers(domain_client), SyftSuccess) + assert isinstance(_remove_existing_peers(datasite_client), SyftSuccess) assert isinstance(_remove_existing_peers(gateway_client), SyftSuccess) @pytest.mark.network -def test_add_route_on_peer(set_env_var, gateway_port: int, domain_1_port: int) -> None: +def test_add_route_on_peer( + set_env_var, gateway_port: int, datasite_1_port: int +) -> None: """ Test the `add_route_on_peer` of network service. - Connect a domain to a gateway. - The gateway adds 2 new routes for itself remotely on the domain and check their priorities. - Then the domain adds a route to itself for the gateway. + Connect a datasite to a gateway. + The gateway adds 2 new routes for itself remotely on the datasite and check their priorities. + Then the datasite adds a route to itself for the gateway. """ - # login to the domain and gateway + # login to the datasite and gateway gateway_client: GatewayClient = sy.login( port=gateway_port, email="info@openmined.org", password="changethis" ) - domain_client: DomainClient = sy.login( - port=domain_1_port, email="info@openmined.org", password="changethis" + datasite_client: DatasiteClient = sy.login( + port=datasite_1_port, email="info@openmined.org", password="changethis" ) # Remove existing peers - _remove_existing_peers(domain_client) + _remove_existing_peers(datasite_client) _remove_existing_peers(gateway_client) # Enable automatic acceptance of association requests res = gateway_client.settings.allow_association_request_auto_approval(enable=True) assert isinstance(res, SyftSuccess) - # connecting the domain to the gateway - result = domain_client.connect_to_gateway(gateway_client) + # connecting the datasite to the gateway + result = datasite_client.connect_to_gateway(gateway_client) assert isinstance(result, SyftSuccess) - assert len(domain_client.peers) == 1 + assert len(datasite_client.peers) == 1 assert len(gateway_client.peers) == 1 - gateway_peer: NodePeer = domain_client.peers[0] - assert len(gateway_peer.node_routes) == 1 - assert gateway_peer.node_routes[-1].priority == 1 + gateway_peer: ServerPeer = datasite_client.peers[0] + assert len(gateway_peer.server_routes) == 1 + assert gateway_peer.server_routes[-1].priority == 1 - # adding a new route for the domain - new_route = HTTPNodeRoute(host_or_ip="localhost", port=10000) - domain_peer: NodePeer = gateway_client.api.services.network.get_all_peers()[0] + # adding a new route for the datasite + new_route = HTTPServerRoute(host_or_ip="localhost", port=10000) + datasite_peer: ServerPeer = gateway_client.api.services.network.get_all_peers()[0] res = gateway_client.api.services.network.add_route_on_peer( - peer=domain_peer, route=new_route + peer=datasite_peer, route=new_route ) assert isinstance(res, SyftSuccess) - gateway_peer = domain_client.api.services.network.get_all_peers()[0] - assert len(gateway_peer.node_routes) == 2 - assert gateway_peer.node_routes[-1].port == new_route.port - assert gateway_peer.node_routes[-1].priority == 2 + gateway_peer = datasite_client.api.services.network.get_all_peers()[0] + assert len(gateway_peer.server_routes) == 2 + assert gateway_peer.server_routes[-1].port == new_route.port + assert gateway_peer.server_routes[-1].priority == 2 - # adding another route for the domain - new_route2 = HTTPNodeRoute(host_or_ip="localhost", port=10001) + # adding another route for the datasite + new_route2 = HTTPServerRoute(host_or_ip="localhost", port=10001) res = gateway_client.api.services.network.add_route_on_peer( - peer=domain_peer, route=new_route2 + peer=datasite_peer, route=new_route2 ) assert isinstance(res, SyftSuccess) - gateway_peer = domain_client.api.services.network.get_all_peers()[0] - assert len(gateway_peer.node_routes) == 3 - assert gateway_peer.node_routes[-1].port == new_route2.port - assert gateway_peer.node_routes[-1].priority == 3 + gateway_peer = datasite_client.api.services.network.get_all_peers()[0] + assert len(gateway_peer.server_routes) == 3 + assert gateway_peer.server_routes[-1].port == new_route2.port + assert gateway_peer.server_routes[-1].priority == 3 - # the domain calls `add_route_on_peer` to to add a route to itself for the gateway - assert len(domain_peer.node_routes) == 1 - res = domain_client.api.services.network.add_route_on_peer( - peer=domain_client.peers[0], route=new_route + # the datasite calls `add_route_on_peer` to to add a route to itself for the gateway + assert len(datasite_peer.server_routes) == 1 + res = datasite_client.api.services.network.add_route_on_peer( + peer=datasite_client.peers[0], route=new_route ) assert isinstance(res, SyftSuccess) - domain_peer = gateway_client.api.services.network.get_all_peers()[0] - assert domain_peer.node_routes[-1].port == new_route.port - assert len(domain_peer.node_routes) == 2 + datasite_peer = gateway_client.api.services.network.get_all_peers()[0] + assert datasite_peer.server_routes[-1].port == new_route.port + assert len(datasite_peer.server_routes) == 2 # Remove existing peers - assert isinstance(_remove_existing_peers(domain_client), SyftSuccess) + assert isinstance(_remove_existing_peers(datasite_client), SyftSuccess) assert isinstance(_remove_existing_peers(gateway_client), SyftSuccess) @pytest.mark.network @pytest.mark.flaky(reruns=2, reruns_delay=2) def test_delete_route_on_peer( - set_env_var, gateway_port: int, domain_1_port: int + set_env_var, gateway_port: int, datasite_1_port: int ) -> None: """ - Connect a domain to a gateway, the gateway adds 2 new routes for the domain + Connect a datasite to a gateway, the gateway adds 2 new routes for the datasite , then delete them. """ - # login to the domain and gateway + # login to the datasite and gateway gateway_client: GatewayClient = sy.login( port=gateway_port, email="info@openmined.org", password="changethis" ) - domain_client: DomainClient = sy.login( - port=domain_1_port, email="info@openmined.org", password="changethis" + datasite_client: DatasiteClient = sy.login( + port=datasite_1_port, email="info@openmined.org", password="changethis" ) # Remove existing peers - _remove_existing_peers(domain_client) + _remove_existing_peers(datasite_client) _remove_existing_peers(gateway_client) # Enable automatic acceptance of association requests res = gateway_client.settings.allow_association_request_auto_approval(enable=True) assert isinstance(res, SyftSuccess) - # connecting the domain to the gateway - result = domain_client.connect_to_gateway(gateway_client) + # connecting the datasite to the gateway + result = datasite_client.connect_to_gateway(gateway_client) assert isinstance(result, SyftSuccess) - # gateway adds 2 new routes for the domain - new_route = HTTPNodeRoute(host_or_ip="localhost", port=10000) - new_route2 = HTTPNodeRoute(host_or_ip="localhost", port=10001) - domain_peer: NodePeer = gateway_client.api.services.network.get_all_peers()[0] + # gateway adds 2 new routes for the datasite + new_route = HTTPServerRoute(host_or_ip="localhost", port=10000) + new_route2 = HTTPServerRoute(host_or_ip="localhost", port=10001) + datasite_peer: ServerPeer = gateway_client.api.services.network.get_all_peers()[0] res = gateway_client.api.services.network.add_route_on_peer( - peer=domain_peer, route=new_route + peer=datasite_peer, route=new_route ) assert isinstance(res, SyftSuccess) res = gateway_client.api.services.network.add_route_on_peer( - peer=domain_peer, route=new_route2 + peer=datasite_peer, route=new_route2 ) assert isinstance(res, SyftSuccess) - gateway_peer: NodePeer = domain_client.peers[0] - assert len(gateway_peer.node_routes) == 3 + gateway_peer: ServerPeer = datasite_client.peers[0] + assert len(gateway_peer.server_routes) == 3 - # gateway delete the routes for the domain + # gateway delete the routes for the datasite res = gateway_client.api.services.network.delete_route_on_peer( - peer=domain_peer, route=new_route + peer=datasite_peer, route=new_route ) assert isinstance(res, SyftSuccess) - gateway_peer = domain_client.peers[0] - assert len(gateway_peer.node_routes) == 2 + gateway_peer = datasite_client.peers[0] + assert len(gateway_peer.server_routes) == 2 res = gateway_client.api.services.network.delete_route_on_peer( - peer=domain_peer, route=new_route2 + peer=datasite_peer, route=new_route2 ) assert isinstance(res, SyftSuccess) - gateway_peer = domain_client.peers[0] - assert len(gateway_peer.node_routes) == 1 + gateway_peer = datasite_client.peers[0] + assert len(gateway_peer.server_routes) == 1 - # gateway deletes the last the route to it for the domain - last_route: NodeRouteType = gateway_peer.node_routes[0] + # gateway deletes the last the route to it for the datasite + last_route: ServerRouteType = gateway_peer.server_routes[0] res = gateway_client.api.services.network.delete_route_on_peer( - peer=domain_peer, route=last_route + peer=datasite_peer, route=last_route ) assert isinstance(res, SyftSuccess) assert "There is no routes left" in res.message - assert len(domain_client.peers) == 0 # gateway is no longer a peer of the domain + assert ( + len(datasite_client.peers) == 0 + ) # gateway is no longer a peer of the datasite - # The gateway client also removes the domain as a peer + # The gateway client also removes the datasite as a peer assert isinstance(_remove_existing_peers(gateway_client), SyftSuccess) @pytest.mark.network def test_update_route_priority( - set_env_var, gateway_port: int, domain_1_port: int + set_env_var, gateway_port: int, datasite_1_port: int ) -> None: - # login to the domain and gateway + # login to the datasite and gateway gateway_client: GatewayClient = sy.login( port=gateway_port, email="info@openmined.org", password="changethis" ) - domain_client: DomainClient = sy.login( - port=domain_1_port, email="info@openmined.org", password="changethis" + datasite_client: DatasiteClient = sy.login( + port=datasite_1_port, email="info@openmined.org", password="changethis" ) # Try remove existing peers - _remove_existing_peers(domain_client) + _remove_existing_peers(datasite_client) _remove_existing_peers(gateway_client) # Enable automatic acceptance of association requests res = gateway_client.settings.allow_association_request_auto_approval(enable=True) assert isinstance(res, SyftSuccess) - # connecting the domain to the gateway - result = domain_client.connect_to_gateway(gateway_client) + # connecting the datasite to the gateway + result = datasite_client.connect_to_gateway(gateway_client) assert isinstance(result, SyftSuccess) - # gateway adds 2 new routes to the domain - new_route = HTTPNodeRoute(host_or_ip="localhost", port=10000) - new_route2 = HTTPNodeRoute(host_or_ip="localhost", port=10001) - domain_peer: NodePeer = gateway_client.api.services.network.get_all_peers()[0] + # gateway adds 2 new routes to the datasite + new_route = HTTPServerRoute(host_or_ip="localhost", port=10000) + new_route2 = HTTPServerRoute(host_or_ip="localhost", port=10001) + datasite_peer: ServerPeer = gateway_client.api.services.network.get_all_peers()[0] res = gateway_client.api.services.network.add_route( - peer_verify_key=domain_peer.verify_key, route=new_route + peer_verify_key=datasite_peer.verify_key, route=new_route ) assert isinstance(res, SyftSuccess) res = gateway_client.api.services.network.add_route( - peer_verify_key=domain_peer.verify_key, route=new_route2 + peer_verify_key=datasite_peer.verify_key, route=new_route2 ) assert isinstance(res, SyftSuccess) # check if the priorities of the routes are correct - domain_peer = gateway_client.api.services.network.get_all_peers()[0] + datasite_peer = gateway_client.api.services.network.get_all_peers()[0] routes_port_priority: dict = { - route.port: route.priority for route in domain_peer.node_routes + route.port: route.priority for route in datasite_peer.server_routes } - assert routes_port_priority[domain_1_port] == 1 + assert routes_port_priority[datasite_1_port] == 1 assert routes_port_priority[new_route.port] == 2 assert routes_port_priority[new_route2.port] == 3 # update the priorities for the routes res = gateway_client.api.services.network.update_route_priority( - peer_verify_key=domain_peer.verify_key, route=new_route, priority=5 + peer_verify_key=datasite_peer.verify_key, route=new_route, priority=5 ) assert isinstance(res, SyftSuccess) - domain_peer = gateway_client.api.services.network.get_all_peers()[0] + datasite_peer = gateway_client.api.services.network.get_all_peers()[0] routes_port_priority: dict = { - route.port: route.priority for route in domain_peer.node_routes + route.port: route.priority for route in datasite_peer.server_routes } assert routes_port_priority[new_route.port] == 5 # if we don't specify `priority`, the route will be automatically updated # to have the biggest priority value among all routes res = gateway_client.api.services.network.update_route_priority( - peer_verify_key=domain_peer.verify_key, route=new_route2 + peer_verify_key=datasite_peer.verify_key, route=new_route2 ) assert isinstance(res, SyftSuccess) - domain_peer = gateway_client.api.services.network.get_all_peers()[0] + datasite_peer = gateway_client.api.services.network.get_all_peers()[0] routes_port_priority: dict = { - route.port: route.priority for route in domain_peer.node_routes + route.port: route.priority for route in datasite_peer.server_routes } assert routes_port_priority[new_route2.port] == 6 # Remove existing peers - assert isinstance(_remove_existing_peers(domain_client), SyftSuccess) + assert isinstance(_remove_existing_peers(datasite_client), SyftSuccess) assert isinstance(_remove_existing_peers(gateway_client), SyftSuccess) @pytest.mark.network def test_update_route_priority_on_peer( - set_env_var, gateway_port: int, domain_1_port: int + set_env_var, gateway_port: int, datasite_1_port: int ) -> None: - # login to the domain and gateway + # login to the datasite and gateway gateway_client: GatewayClient = sy.login( port=gateway_port, email="info@openmined.org", password="changethis" ) - domain_client: DomainClient = sy.login( - port=domain_1_port, email="info@openmined.org", password="changethis" + datasite_client: DatasiteClient = sy.login( + port=datasite_1_port, email="info@openmined.org", password="changethis" ) # Remove existing peers - _remove_existing_peers(domain_client) + _remove_existing_peers(datasite_client) _remove_existing_peers(gateway_client) # Enable automatic acceptance of association requests res = gateway_client.settings.allow_association_request_auto_approval(enable=True) assert isinstance(res, SyftSuccess) - # connecting the domain to the gateway - result = domain_client.connect_to_gateway(gateway_client) + # connecting the datasite to the gateway + result = datasite_client.connect_to_gateway(gateway_client) assert isinstance(result, SyftSuccess) - # gateway adds 2 new routes to itself remotely on the domain node - domain_peer: NodePeer = gateway_client.api.services.network.get_all_peers()[0] - new_route = HTTPNodeRoute(host_or_ip="localhost", port=10000) + # gateway adds 2 new routes to itself remotely on the datasite server + datasite_peer: ServerPeer = gateway_client.api.services.network.get_all_peers()[0] + new_route = HTTPServerRoute(host_or_ip="localhost", port=10000) res = gateway_client.api.services.network.add_route_on_peer( - peer=domain_peer, route=new_route + peer=datasite_peer, route=new_route ) assert isinstance(res, SyftSuccess) - new_route2 = HTTPNodeRoute(host_or_ip="localhost", port=10001) + new_route2 = HTTPServerRoute(host_or_ip="localhost", port=10001) res = gateway_client.api.services.network.add_route_on_peer( - peer=domain_peer, route=new_route2 + peer=datasite_peer, route=new_route2 ) assert isinstance(res, SyftSuccess) # check if the priorities of the routes are correct - gateway_peer = domain_client.api.services.network.get_all_peers()[0] + gateway_peer = datasite_client.api.services.network.get_all_peers()[0] routes_port_priority: dict = { - route.port: route.priority for route in gateway_peer.node_routes + route.port: route.priority for route in gateway_peer.server_routes } assert routes_port_priority[gateway_port] == 1 assert routes_port_priority[new_route.port] == 2 assert routes_port_priority[new_route2.port] == 3 - # gateway updates the route priorities for the domain remotely + # gateway updates the route priorities for the datasite remotely res = gateway_client.api.services.network.update_route_priority_on_peer( - peer=domain_peer, route=new_route, priority=5 + peer=datasite_peer, route=new_route, priority=5 ) assert isinstance(res, SyftSuccess) res = gateway_client.api.services.network.update_route_priority_on_peer( - peer=domain_peer, route=new_route2 + peer=datasite_peer, route=new_route2 ) assert isinstance(res, SyftSuccess) - gateway_peer = domain_client.api.services.network.get_all_peers()[0] + gateway_peer = datasite_client.api.services.network.get_all_peers()[0] routes_port_priority: dict = { - route.port: route.priority for route in gateway_peer.node_routes + route.port: route.priority for route in gateway_peer.server_routes } assert routes_port_priority[new_route.port] == 5 assert routes_port_priority[new_route2.port] == 6 # Remove existing peers - assert isinstance(_remove_existing_peers(domain_client), SyftSuccess) + assert isinstance(_remove_existing_peers(datasite_client), SyftSuccess) assert isinstance(_remove_existing_peers(gateway_client), SyftSuccess) @pytest.mark.network -def test_dataset_stream(set_env_var, gateway_port: int, domain_1_port: int) -> None: +def test_dataset_stream(set_env_var, gateway_port: int, datasite_1_port: int) -> None: """ - Scenario: Connecting a domain node to a gateway node. The domain + Scenario: Connecting a datasite server to a gateway server. The datasite client then upload a dataset, which should be searchable by the syft network. People who install syft can see the mock data and metadata of the uploaded datasets """ - # login to the domain and gateway + # login to the datasite and gateway gateway_client: GatewayClient = sy.login( port=gateway_port, email="info@openmined.org", password="changethis" ) - domain_client: DomainClient = sy.login( - port=domain_1_port, email="info@openmined.org", password="changethis" + datasite_client: DatasiteClient = sy.login( + port=datasite_1_port, email="info@openmined.org", password="changethis" ) # Remove existing peers just to make sure - _remove_existing_peers(domain_client) + _remove_existing_peers(datasite_client) _remove_existing_peers(gateway_client) res = gateway_client.settings.allow_association_request_auto_approval(enable=True) assert isinstance(res, SyftSuccess) - # connect the domain to the gateway - result = domain_client.connect_to_gateway(gateway_client) + # connect the datasite to the gateway + result = datasite_client.connect_to_gateway(gateway_client) assert isinstance(result, SyftSuccess) - # the domain client uploads a dataset + # the datasite client uploads a dataset input_data = np.array([1, 2, 3]) mock_data = np.array([4, 5, 6]) asset_name = _random_hash() asset = sy.Asset(name=asset_name, data=input_data, mock=mock_data) dataset_name = _random_hash() dataset = sy.Dataset(name=dataset_name, asset_list=[asset]) - dataset_res = domain_client.upload_dataset(dataset) + dataset_res = datasite_client.upload_dataset(dataset) assert isinstance(dataset_res, SyftSuccess) - domain_proxy_client = next( - gateway_client.domains[i] + datasite_proxy_client = next( + gateway_client.datasites[i] for i in itertools.count() - if gateway_client.domains[i].name == domain_client.name + if gateway_client.datasites[i].name == datasite_client.name ) - root_proxy_client = domain_proxy_client.login( + root_proxy_client = datasite_proxy_client.login( email="info@openmined.org", password="changethis" ) retrieved_dataset = root_proxy_client.datasets[dataset_name] retrieved_asset = retrieved_dataset.assets[asset_name] assert np.all(retrieved_asset.data == input_data) - # the domain client delete the dataset - domain_client.api.services.dataset.delete_by_uid(uid=retrieved_dataset.id) + # the datasite client delete the dataset + datasite_client.api.services.dataset.delete(uid=retrieved_dataset.id) # Remove existing peers - assert isinstance(_remove_existing_peers(domain_client), SyftSuccess) + assert isinstance(_remove_existing_peers(datasite_client), SyftSuccess) assert isinstance(_remove_existing_peers(gateway_client), SyftSuccess) @pytest.mark.network -def test_peer_health_check(set_env_var, gateway_port: int, domain_1_port: int) -> None: +def test_peer_health_check( + set_env_var, gateway_port: int, datasite_1_port: int +) -> None: """ - Scenario: Connecting a domain node to a gateway node. + Scenario: Connecting a datasite server to a gateway server. The gateway client approves the association request. - The gateway client checks that the domain peer is associated + The gateway client checks that the datasite peer is associated """ - # login to the domain and gateway + # login to the datasite and gateway gateway_client: GatewayClient = sy.login( port=gateway_port, email="info@openmined.org", password="changethis" ) - domain_client: DomainClient = sy.login( - port=domain_1_port, email="info@openmined.org", password="changethis" + datasite_client: DatasiteClient = sy.login( + port=datasite_1_port, email="info@openmined.org", password="changethis" ) res = gateway_client.settings.allow_association_request_auto_approval(enable=False) assert isinstance(res, SyftSuccess) # Try removing existing peers just to make sure - _remove_existing_peers(domain_client) + _remove_existing_peers(datasite_client) _remove_existing_peers(gateway_client) - # gateway checks that the domain is not yet associated + # gateway checks that the datasite is not yet associated res = gateway_client.api.services.network.check_peer_association( - peer_id=domain_client.id + peer_id=datasite_client.id ) - assert isinstance(res, NodePeerAssociationStatus) + assert isinstance(res, ServerPeerAssociationStatus) assert res.value == "PEER_NOT_FOUND" - # the domain tries to connect to the gateway - result = domain_client.connect_to_gateway(gateway_client) + # the datasite tries to connect to the gateway + result = datasite_client.connect_to_gateway(gateway_client) assert isinstance(result, Request) assert isinstance(result.changes[0], AssociationRequestChange) # check that the peer's association request is pending res = gateway_client.api.services.network.check_peer_association( - peer_id=domain_client.id + peer_id=datasite_client.id ) - assert isinstance(res, NodePeerAssociationStatus) + assert isinstance(res, ServerPeerAssociationStatus) assert res.value == "PEER_ASSOCIATION_PENDING" - # the domain tries to connect to the gateway (again) - result = domain_client.connect_to_gateway(gateway_client) + # the datasite tries to connect to the gateway (again) + result = datasite_client.connect_to_gateway(gateway_client) assert isinstance(result, Request) # the pending request is returned - # there should be only 1 association requests from the domain + # there should be only 1 association requests from the datasite assert len(gateway_client.api.services.request.get_all()) == 1 # check again that the peer's association request is still pending res = gateway_client.api.services.network.check_peer_association( - peer_id=domain_client.id + peer_id=datasite_client.id ) - assert isinstance(res, NodePeerAssociationStatus) + assert isinstance(res, ServerPeerAssociationStatus) assert res.value == "PEER_ASSOCIATION_PENDING" # the gateway client approves one of the association requests @@ -905,49 +912,49 @@ def test_peer_health_check(set_env_var, gateway_port: int, domain_1_port: int) - # the gateway client checks that the peer is associated res = gateway_client.api.services.network.check_peer_association( - peer_id=domain_client.id + peer_id=datasite_client.id ) - assert isinstance(res, NodePeerAssociationStatus) + assert isinstance(res, ServerPeerAssociationStatus) assert res.value == "PEER_ASSOCIATED" time.sleep(PeerHealthCheckTask.repeat_time * 2 + 1) - domain_peer = gateway_client.api.services.network.get_all_peers()[0] - assert domain_peer.ping_status == NodePeerConnectionStatus.ACTIVE + datasite_peer = gateway_client.api.services.network.get_all_peers()[0] + assert datasite_peer.ping_status == ServerPeerConnectionStatus.ACTIVE # Remove existing peers - assert isinstance(_remove_existing_peers(domain_client), SyftSuccess) + assert isinstance(_remove_existing_peers(datasite_client), SyftSuccess) assert isinstance(_remove_existing_peers(gateway_client), SyftSuccess) @pytest.mark.network -def test_reverse_tunnel_connection(domain_1_port: int, gateway_port: int): - # login to the domain and gateway +def test_reverse_tunnel_connection(datasite_1_port: int, gateway_port: int): + # login to the datasite and gateway gateway_client: GatewayClient = sy.login( port=gateway_port, email="info@openmined.org", password="changethis" ) - domain_client: DomainClient = sy.login( - port=domain_1_port, email="info@openmined.org", password="changethis" + datasite_client: DatasiteClient = sy.login( + port=datasite_1_port, email="info@openmined.org", password="changethis" ) res = gateway_client.settings.allow_association_request_auto_approval(enable=False) # Try removing existing peers just to make sure - _remove_existing_peers(domain_client) + _remove_existing_peers(datasite_client) _remove_existing_peers(gateway_client) - # connecting the domain to the gateway - result = domain_client.connect_to_gateway(gateway_client, reverse_tunnel=True) + # connecting the datasite to the gateway + result = datasite_client.connect_to_gateway(gateway_client, reverse_tunnel=True) assert isinstance(result, Request) assert isinstance(result.changes[0], AssociationRequestChange) - assert len(domain_client.peers) == 1 + assert len(datasite_client.peers) == 1 - # Domain's peer is a gateway and vice-versa - domain_peer = domain_client.peers[0] - assert domain_peer.node_type == NodeType.GATEWAY - assert domain_peer.node_routes[0].rtunnel_token is None + # Datasite's peer is a gateway and vice-versa + datasite_peer = datasite_client.peers[0] + assert datasite_peer.server_type == ServerType.GATEWAY + assert datasite_peer.server_routes[0].rtunnel_token is None assert len(gateway_client.peers) == 0 gateway_client_root = gateway_client.login( @@ -960,18 +967,18 @@ def test_reverse_tunnel_connection(domain_1_port: int, gateway_port: int): gateway_peers = gateway_client.api.services.network.get_all_peers() assert len(gateway_peers) == 1 - assert len(gateway_peers[0].node_routes) == 1 - assert gateway_peers[0].node_routes[0].rtunnel_token is not None + assert len(gateway_peers[0].server_routes) == 1 + assert gateway_peers[0].server_routes[0].rtunnel_token is not None - proxy_domain_client = gateway_client.peers[0] + proxy_datasite_client = gateway_client.peers[0] - assert isinstance(proxy_domain_client, DomainClient) - assert isinstance(domain_peer, NodePeer) - assert gateway_client.name == domain_peer.name - assert domain_client.name == proxy_domain_client.name + assert isinstance(proxy_datasite_client, DatasiteClient) + assert isinstance(datasite_peer, ServerPeer) + assert gateway_client.name == datasite_peer.name + assert datasite_client.name == proxy_datasite_client.name - assert not isinstance(proxy_domain_client.datasets.get_all(), SyftError) + assert not isinstance(proxy_datasite_client.datasets.get_all(), SyftError) # Try removing existing peers just to make sure _remove_existing_peers(gateway_client) - _remove_existing_peers(domain_client) + _remove_existing_peers(datasite_client) diff --git a/tests/integration/orchestra/orchestra_test.py b/tests/integration/orchestra/orchestra_test.py index d814b89fabb..accf1b0065d 100644 --- a/tests/integration/orchestra/orchestra_test.py +++ b/tests/integration/orchestra/orchestra_test.py @@ -7,38 +7,38 @@ # syft absolute import syft as sy -from syft.node.node import Node +from syft.server.server import Server -@pytest.mark.parametrize("node_type", ["domain", "gateway", "enclave"]) -def test_orchestra_python_local(node_type): +@pytest.mark.parametrize("server_type", ["datasite", "gateway", "enclave"]) +def test_orchestra_python_local(server_type): name = token_hex(8) - node = sy.orchestra.launch(name=name, node_type=node_type, local_db=False) + server = sy.orchestra.launch(name=name, server_type=server_type, local_db=False) try: - assert isinstance(node.python_node, Node) - assert node.python_node.name == name - assert node.python_node.node_type == node_type - assert node.python_node.metadata.node_type == node_type + assert isinstance(server.python_server, Server) + assert server.python_server.name == name + assert server.python_server.server_type == server_type + assert server.python_server.metadata.server_type == server_type finally: - node.python_node.cleanup() - node.land() + server.python_server.cleanup() + server.land() -@pytest.mark.parametrize("node_type", ["domain", "gateway", "enclave"]) -def test_orchestra_python_server(node_type): +@pytest.mark.parametrize("server_type", ["datasite", "gateway", "enclave"]) +def test_orchestra_python_server(server_type): name = token_hex(8) - node = sy.orchestra.launch( + server = sy.orchestra.launch( name=name, port="auto", - node_type=node_type, + server_type=server_type, local_db=False, ) try: - metadata = requests.get(f"http://localhost:{node.port}/api/v2/metadata") + metadata = requests.get(f"http://localhost:{server.port}/api/v2/metadata") assert metadata.status_code == 200 assert metadata.json()["name"] == name - assert metadata.json()["node_type"] == node_type + assert metadata.json()["server_type"] == server_type finally: - node.land() + server.land() diff --git a/tox.ini b/tox.ini index 26901e321d2..9cd583eff4a 100644 --- a/tox.ini +++ b/tox.ini @@ -1,6 +1,6 @@ [tox] envlist = - dev.k8s.launch.domain + dev.k8s.launch.datasite dev.k8s.launch.gateway dev.k8s.registry dev.k8s.start @@ -178,7 +178,7 @@ commands = pnpm install; \ pnpm run test:unit; \ else \ - docker build --target grid-ui-tests -t ui-test -f frontend.dockerfile .; \ + docker build --target syft-ui-tests -t ui-test -f frontend.dockerfile .; \ docker run -t ui-test; \ fi' @@ -244,7 +244,10 @@ deps = commands = bandit -r src # restrictedpython 6.2 - safety check -i 70612 + # Temporarily ignore pytorch vulnerability warning here + # https://data.safetycli.com/v/71670/97c + # TODO: Remove `-i 71670` once torch is updated + safety check -i 70612 -i 71670 [testenv:syft.test.unit] description = Syft Unit Tests @@ -303,35 +306,35 @@ description = Launch a single backend container using the dockerfile changedir = {toxinidir}/packages setenv = N_CONSUMERS = {env:N_CONSUMERS:1} - NODE_NAME = {env:NODE_NAME:test_domain_sc} - NODE_TYPE = {env:NODE_TYPE:domain} - NODE_PORT = {env:NODE_PORT:8080} + SERVER_NAME = {env:SERVER_NAME:test_datasite_sc} + SERVER_TYPE = {env:SERVER_TYPE:datasite} + SERVER_PORT = {env:SERVER_PORT:8080} allowlist_externals = bash commands = bash -c 'tox -e single_container.destroy' - bash -c 'docker build -f grid/backend/backend.dockerfile . -t openmined/grid-backend:local-dev' + bash -c 'docker build -f grid/backend/backend.dockerfile . -t openmined/syft-backend:local-dev' bash -c 'docker run -d \ - -e NODE_NAME=${NODE_NAME} \ - -e NODE_TYPE=${NODE_TYPE} \ + -e SERVER_NAME=${SERVER_NAME} \ + -e SERVER_TYPE=${SERVER_TYPE} \ -e N_CONSUMERS=${N_CONSUMERS} \ -e SINGLE_CONTAINER_MODE=true \ -e CREATE_PRODUCER=true \ -e INMEMORY_WORKERS=true \ - -p ${NODE_PORT}:80 --add-host=host.docker.internal:host-gateway \ - --name ${NODE_NAME} openmined/grid-backend:local-dev' + -p ${SERVER_PORT}:80 --add-host=host.docker.internal:host-gateway \ + --name ${SERVER_NAME} openmined/syft-backend:local-dev' [testenv:single_container.destroy] description = Destroy the single backend container run using single_container.launch changedir = {toxinidir}/packages setenv = - NODE_NAME = {env:NODE_NAME:test_domain_sc} + SERVER_NAME = {env:SERVER_NAME:test_datasite_sc} allowlist_externals = bash commands = # Image is not cleaned up - bash -c 'docker stop ${NODE_NAME} || true' - bash -c 'docker rm ${NODE_NAME} || true' + bash -c 'docker stop ${SERVER_NAME} || true' + bash -c 'docker rm ${SERVER_NAME} || true' [testenv:stack.test.notebook] description = Stack Notebook Tests @@ -346,8 +349,8 @@ setenv = DEV_MODE = {env:DEV_MODE:True} TEST_NOTEBOOK_PATHS = {env:TEST_NOTEBOOK_PATHS:api/0.8} ENABLE_SIGNUP=True - NODE_URL = {env:NODE_URL:http://localhost} - NODE_PORT = {env:NODE_PORT:8080} + SERVER_URL = {env:SERVER_URL:http://localhost} + SERVER_PORT = {env:SERVER_PORT:8080} commands = # Volume cleanup @@ -410,7 +413,7 @@ passenv=HOME, USER allowlist_externals = bash setenv = - PYTEST_MODULES = {env:PYTEST_MODULES:local_node} + PYTEST_MODULES = {env:PYTEST_MODULES:local_server} ASSOCIATION_REQUEST_AUTO_APPROVAL = {env:ASSOCIATION_REQUEST_AUTO_APPROVAL:true} PYTEST_FLAGS = {env:PYTEST_FLAGS:--ignore=tests/integration/local/job_test.py} commands = @@ -448,10 +451,10 @@ allowlist_externals = echo tox setenv = - NODE_PORT = {env:NODE_PORT:9082} + SERVER_PORT = {env:SERVER_PORT:9082} GITHUB_CI = {env:GITHUB_CI:false} PYTEST_MODULES = {env:PYTEST_MODULES:frontend network container_workload} - DOMAIN_CLUSTER_NAME = {env:DOMAIN_CLUSTER_NAME:test-domain-1} + DATASITE_CLUSTER_NAME = {env:DATASITE_CLUSTER_NAME:test-datasite-1} GATEWAY_CLUSTER_NAME = {env:GATEWAY_CLUSTER_NAME:test-gateway-1} ASSOCIATION_REQUEST_AUTO_APPROVAL = {env:ASSOCIATION_REQUEST_AUTO_APPROVAL:true} SYFT_BASE_IMAGE_REGISTRY = {env:SYFT_BASE_IMAGE_REGISTRY:k3d-registry.localhost:5800} @@ -461,12 +464,12 @@ commands = k3d version # Deleting Old Cluster - bash -c "k3d cluster delete ${DOMAIN_CLUSTER_NAME} || true" + bash -c "k3d cluster delete ${DATASITE_CLUSTER_NAME} || true" bash -c "k3d cluster delete ${GATEWAY_CLUSTER_NAME} || true" # Deleting registry & volumes bash -c "k3d registry delete k3d-registry.localhost || true" - bash -c "docker volume rm k3d-${DOMAIN_CLUSTER_NAME}-images --force || true" + bash -c "docker volume rm k3d-${DATASITE_CLUSTER_NAME}-images --force || true" bash -c "docker volume rm k3d-${GATEWAY_CLUSTER_NAME}-images --force || true" # Create registry @@ -478,9 +481,9 @@ commands = tox -e dev.k8s.start && \ tox -e dev.k8s.deploy' - # Creating test-domain-1 cluster on port 9082 + # Creating test-datasite-1 cluster on port 9082 bash -c '\ - export CLUSTER_NAME=${DOMAIN_CLUSTER_NAME} CLUSTER_HTTP_PORT=9082 DEVSPACE_PROFILE=domain-tunnel && \ + export CLUSTER_NAME=${DATASITE_CLUSTER_NAME} CLUSTER_HTTP_PORT=9082 DEVSPACE_PROFILE=datasite-tunnel && \ tox -e dev.k8s.start && \ tox -e dev.k8s.deploy' @@ -497,16 +500,16 @@ commands = bash packages/grid/scripts/wait_for.sh service backend --context k3d-{env:GATEWAY_CLUSTER_NAME} --namespace syft bash packages/grid/scripts/wait_for.sh service proxy --context k3d-{env:GATEWAY_CLUSTER_NAME} --namespace syft - # wait for test domain 1 - bash packages/grid/scripts/wait_for.sh service mongo --context k3d-{env:DOMAIN_CLUSTER_NAME} --namespace syft - bash packages/grid/scripts/wait_for.sh service backend --context k3d-{env:DOMAIN_CLUSTER_NAME} --namespace syft - bash packages/grid/scripts/wait_for.sh service proxy --context k3d-{env:DOMAIN_CLUSTER_NAME} --namespace syft - bash packages/grid/scripts/wait_for.sh service seaweedfs --context k3d-{env:DOMAIN_CLUSTER_NAME} --namespace syft - bash packages/grid/scripts/wait_for.sh service frontend --context k3d-{env:DOMAIN_CLUSTER_NAME} --namespace syft - bash -c '(kubectl logs service/frontend --context k3d-${DOMAIN_CLUSTER_NAME} --namespace syft -f &) | grep -q -E "Network:\s+https?://[a-zA-Z0-9.-]+:[0-9]+/" || true' + # wait for test datasite 1 + bash packages/grid/scripts/wait_for.sh service mongo --context k3d-{env:DATASITE_CLUSTER_NAME} --namespace syft + bash packages/grid/scripts/wait_for.sh service backend --context k3d-{env:DATASITE_CLUSTER_NAME} --namespace syft + bash packages/grid/scripts/wait_for.sh service proxy --context k3d-{env:DATASITE_CLUSTER_NAME} --namespace syft + bash packages/grid/scripts/wait_for.sh service seaweedfs --context k3d-{env:DATASITE_CLUSTER_NAME} --namespace syft + bash packages/grid/scripts/wait_for.sh service frontend --context k3d-{env:DATASITE_CLUSTER_NAME} --namespace syft + bash -c '(kubectl logs service/frontend --context k3d-${DATASITE_CLUSTER_NAME} --namespace syft -f &) | grep -q -E "Network:\s+https?://[a-zA-Z0-9.-]+:[0-9]+/" || true' - # Checking logs generated & startup of test-domain 1 - bash -c '(kubectl logs service/backend --context k3d-${DOMAIN_CLUSTER_NAME} --namespace syft -f &) | grep -q "Application startup complete" || true' + # Checking logs generated & startup of test-datasite 1 + bash -c '(kubectl logs service/backend --context k3d-${DATASITE_CLUSTER_NAME} --namespace syft -f &) | grep -q "Application startup complete" || true' # Checking logs generated & startup of testgateway1 bash -c '(kubectl logs service/backend --context k3d-${GATEWAY_CLUSTER_NAME} --namespace syft -f &) | grep -q "Application startup complete" || true' @@ -525,10 +528,10 @@ commands = done' # deleting clusters created - bash -c "CLUSTER_NAME=${DOMAIN_CLUSTER_NAME} tox -e dev.k8s.destroy || true" + bash -c "CLUSTER_NAME=${DATASITE_CLUSTER_NAME} tox -e dev.k8s.destroy || true" bash -c "CLUSTER_NAME=${GATEWAY_CLUSTER_NAME} tox -e dev.k8s.destroy || true" bash -c "k3d registry delete k3d-registry.localhost || true" - bash -c "docker volume rm k3d-${DOMAIN_CLUSTER_NAME}-images --force || true" + bash -c "docker volume rm k3d-${DATASITE_CLUSTER_NAME}-images --force || true" bash -c "docker volume rm k3d-${GATEWAY_CLUSTER_NAME}-images --force || true" [testenv:stack.test.notebook.k8s] @@ -552,28 +555,28 @@ setenv = ORCHESTRA_DEPLOYMENT_TYPE = {env:ORCHESTRA_DEPLOYMENT_TYPE:remote} GITHUB_CI = {env:GITHUB_CI:false} SYFT_BASE_IMAGE_REGISTRY = {env:SYFT_BASE_IMAGE_REGISTRY:k3d-registry.localhost:5800} - DOMAIN_CLUSTER_NAME = {env:DOMAIN_CLUSTER_NAME:test-domain-1} - NODE_URL = {env:NODE_URL:http://localhost} - NODE_PORT = {env:NODE_PORT:8080} + DATASITE_CLUSTER_NAME = {env:DATASITE_CLUSTER_NAME:test-datasite-1} + SERVER_URL = {env:SERVER_URL:http://localhost} + SERVER_PORT = {env:SERVER_PORT:8080} commands = bash -c "echo Running with GITHUB_CI=$GITHUB_CI; date" python -c 'import syft as sy; sy.stage_protocol_changes()' k3d version # Deleting Old Cluster - bash -c "k3d cluster delete ${DOMAIN_CLUSTER_NAME} || true" + bash -c "k3d cluster delete ${DATASITE_CLUSTER_NAME} || true" # Deleting registry & volumes bash -c "k3d registry delete k3d-registry.localhost || true" - bash -c "docker volume rm k3d-${DOMAIN_CLUSTER_NAME}-images --force || true" + bash -c "docker volume rm k3d-${DATASITE_CLUSTER_NAME}-images --force || true" # Create registry tox -e dev.k8s.registry - # Creating test-domain-1 cluster on port NODE_PORT + # Creating test-datasite-1 cluster on port SERVER_PORT bash -c '\ - export CLUSTER_NAME=${DOMAIN_CLUSTER_NAME} CLUSTER_HTTP_PORT=${NODE_PORT} && \ + export CLUSTER_NAME=${DATASITE_CLUSTER_NAME} CLUSTER_HTTP_PORT=${SERVER_PORT} && \ tox -e dev.k8s.start && \ tox -e dev.k8s.deploy' @@ -585,23 +588,23 @@ commands = sleep 30 - # wait for test-domain-1 - bash packages/grid/scripts/wait_for.sh service mongo --context k3d-{env:DOMAIN_CLUSTER_NAME} --namespace syft - bash packages/grid/scripts/wait_for.sh service backend --context k3d-{env:DOMAIN_CLUSTER_NAME} --namespace syft - bash packages/grid/scripts/wait_for.sh service proxy --context k3d-{env:DOMAIN_CLUSTER_NAME} --namespace syft - bash packages/grid/scripts/wait_for.sh service seaweedfs --context k3d-{env:DOMAIN_CLUSTER_NAME} --namespace syft - bash packages/grid/scripts/wait_for.sh service frontend --context k3d-{env:DOMAIN_CLUSTER_NAME} --namespace syft - bash -c '(kubectl logs service/frontend --context k3d-${DOMAIN_CLUSTER_NAME} --namespace syft -f &) | grep -q -E "Network:\s+https?://[a-zA-Z0-9.-]+:[0-9]+/" || true' + # wait for test-datasite-1 + bash packages/grid/scripts/wait_for.sh service mongo --context k3d-{env:DATASITE_CLUSTER_NAME} --namespace syft + bash packages/grid/scripts/wait_for.sh service backend --context k3d-{env:DATASITE_CLUSTER_NAME} --namespace syft + bash packages/grid/scripts/wait_for.sh service proxy --context k3d-{env:DATASITE_CLUSTER_NAME} --namespace syft + bash packages/grid/scripts/wait_for.sh service seaweedfs --context k3d-{env:DATASITE_CLUSTER_NAME} --namespace syft + bash packages/grid/scripts/wait_for.sh service frontend --context k3d-{env:DATASITE_CLUSTER_NAME} --namespace syft + bash -c '(kubectl logs service/frontend --context k3d-${DATASITE_CLUSTER_NAME} --namespace syft -f &) | grep -q -E "Network:\s+https?://[a-zA-Z0-9.-]+:[0-9]+/" || true' - # Checking logs generated & startup of test-domain 1 - bash -c '(kubectl logs service/backend --context k3d-${DOMAIN_CLUSTER_NAME} --namespace syft -f &) | grep -q "Application startup complete" || true' + # Checking logs generated & startup of test-datasite 1 + bash -c '(kubectl logs service/backend --context k3d-${DATASITE_CLUSTER_NAME} --namespace syft -f &) | grep -q "Application startup complete" || true' bash -c "pytest -x --nbmake notebooks/api/0.8 -p no:randomly -k 'not 10-container-images.ipynb' -vvvv --nbmake-timeout=1000" # deleting clusters created - bash -c "CLUSTER_NAME=${DOMAIN_CLUSTER_NAME} tox -e dev.k8s.destroy || true" + bash -c "CLUSTER_NAME=${DATASITE_CLUSTER_NAME} tox -e dev.k8s.destroy || true" bash -c "k3d registry delete k3d-registry.localhost || true" - bash -c "docker volume rm k3d-${DOMAIN_CLUSTER_NAME}-images --force || true" + bash -c "docker volume rm k3d-${DATASITE_CLUSTER_NAME}-images --force || true" [testenv:syft.build.helm] @@ -659,7 +662,7 @@ allowlist_externals = curl setenv = CLUSTER_NAME = {env:CLUSTER_NAME:syft} - CLUSTER_HTTP_PORT = {env:NODE_PORT:8080} + CLUSTER_HTTP_PORT = {env:SERVER_PORT:8080} ; Usage for posargs: names of the relevant services among {frontend backend proxy mongo seaweedfs registry} commands = bash -c "env; date; k3d version" @@ -703,14 +706,14 @@ allowlist_externals = tox setenv = ORCHESTRA_DEPLOYMENT_TYPE = {env:ORCHESTRA_DEPLOYMENT_TYPE:remote} - NODE_PORT = {env:NODE_PORT:8080} - NODE_URL = {env:NODE_URL:http://localhost} + SERVER_PORT = {env:SERVER_PORT:8080} + SERVER_URL = {env:SERVER_URL:http://localhost} EXCLUDE_NOTEBOOKS = {env:EXCLUDE_NOTEBOOKS:not 10-container-images.ipynb} SYFT_VERSION = {env:SYFT_VERSION:local} EXTERNAL_REGISTRY = {env:EXTERNAL_REGISTRY:k3d-registry.localhost:5800} ; env vars for dev.k8s.start - CLUSTER_NAME = testdomain - CLUSTER_HTTP_PORT = {env:NODE_PORT:8080} + CLUSTER_NAME = testdatasite + CLUSTER_HTTP_PORT = {env:SERVER_PORT:8080} ; Usage for posargs: if you pass override to this tox command, then resourcesPreset will be overridden commands = bash -c "env; date; k3d version" @@ -723,9 +726,9 @@ commands = echo "Installing local helm charts"; \ if [[ "{posargs}" == "override" ]]; then \ echo "Overriding resourcesPreset"; \ - helm install ${CLUSTER_NAME} ./helm/syft -f ./helm/values.dev.yaml --kube-context k3d-${CLUSTER_NAME} --namespace syft --create-namespace --set node.resourcesPreset=null --set seaweedfs.resourcesPreset=null --set mongo.resourcesPreset=null --set registry.resourcesPreset=null --set proxy.resourcesPreset=null --set frontend.resourcesPreset=null; \ + helm install ${CLUSTER_NAME} ./helm/syft -f ./helm/examples/dev/base.yaml --kube-context k3d-${CLUSTER_NAME} --namespace syft --create-namespace --set server.resourcesPreset=null --set seaweedfs.resourcesPreset=null --set mongo.resourcesPreset=null --set registry.resourcesPreset=null --set proxy.resourcesPreset=null --set frontend.resourcesPreset=null; \ else \ - helm install ${CLUSTER_NAME} ./helm/syft -f ./helm/values.dev.yaml --kube-context k3d-${CLUSTER_NAME} --namespace syft --create-namespace; \ + helm install ${CLUSTER_NAME} ./helm/syft -f ./helm/examples/dev/base.yaml --kube-context k3d-${CLUSTER_NAME} --namespace syft --create-namespace; \ fi \ else \ echo "Installing helm charts from repo for syft version: ${SYFT_VERSION}"; \ @@ -733,9 +736,9 @@ commands = helm repo update openmined; \ if [[ "{posargs}" == "override" ]]; then \ echo "Overriding resourcesPreset"; \ - helm install ${CLUSTER_NAME} openmined/syft --version=${SYFT_VERSION} -f ./helm/values.dev.yaml --kube-context k3d-${CLUSTER_NAME} --namespace syft --create-namespace --set node.resourcesPreset=null --set seaweedfs.resourcesPreset=null --set mongo.resourcesPreset=null --set registry.resourcesPreset=null --set proxy.resourcesPreset=null --set frontend.resourcesPreset=null; \ + helm install ${CLUSTER_NAME} openmined/syft --version=${SYFT_VERSION} -f ./helm/examples/dev/base.yaml --kube-context k3d-${CLUSTER_NAME} --namespace syft --create-namespace --set server.resourcesPreset=null --set seaweedfs.resourcesPreset=null --set mongo.resourcesPreset=null --set registry.resourcesPreset=null --set proxy.resourcesPreset=null --set frontend.resourcesPreset=null; \ else \ - helm install ${CLUSTER_NAME} openmined/syft --version=${SYFT_VERSION} -f ./helm/values.dev.yaml --kube-context k3d-${CLUSTER_NAME} --namespace syft --create-namespace; \ + helm install ${CLUSTER_NAME} openmined/syft --version=${SYFT_VERSION} -f ./helm/examples/dev/base.yaml --kube-context k3d-${CLUSTER_NAME} --namespace syft --create-namespace; \ fi \ fi' @@ -914,11 +917,11 @@ commands = tox -e dev.k8s.start tox -e dev.k8s.{posargs:deploy} -[testenv:dev.k8s.launch.domain] -description = Launch a single domain on K8s +[testenv:dev.k8s.launch.datasite] +description = Launch a single datasite on K8s passenv = HOME, USER setenv= - CLUSTER_NAME = {env:CLUSTER_NAME:test-domain-1} + CLUSTER_NAME = {env:CLUSTER_NAME:test-datasite-1} CLUSTER_HTTP_PORT={env:CLUSTER_HTTP_PORT:9082} allowlist_externals = tox @@ -932,11 +935,21 @@ passenv = HOME, USER setenv= CLUSTER_NAME = {env:CLUSTER_NAME:test-enclave-1} CLUSTER_HTTP_PORT={env:CLUSTER_HTTP_PORT:9083} - DEVSPACE_PROFILE=enclave + K3S_DOCKER_IMAGE={env:K3S_DOCKER_IMAGE:docker.io/rasswanth/k3s:v1.28.8-k3s1-cuda-12.2.0-base-ubuntu22.04} + CLUSTER_ARGS={env:CLUSTER_ARGS:--volume /sys/kernel/security:/sys/kernel/security --volume /dev/tmprm0:/dev/tmprm0} + DEVSPACE_PROFILE={env:DEVSPACE_PROFILE:enclave-cpu} allowlist_externals = tox + bash commands = - tox -e dev.k8s.start -- --volume /sys/kernel/security:/sys/kernel/security --volume /dev/tmprm0:/dev/tmprm0 + bash -c "echo k3S_DOCKER_IMAGE=$K3S_DOCKER_IMAGE GPU_ENABLED=$GPU_ENABLED" + bash -c "if [ $DEVSPACE_PROFILE == 'enclave-gpu' ]; then \ + echo 'Starting cluster with GPU support'; \ + tox -e dev.k8s.start -- $CLUSTER_ARGS --image=$K3S_DOCKER_IMAGE --gpus=all; \ + else \ + echo 'Starting cluster with CPU support'; \ + tox -e dev.k8s.start -- $CLUSTER_ARGS; \ + fi" tox -e dev.k8s.{posargs:deploy} [testenv:dev.k8s.destroy] @@ -1033,12 +1046,12 @@ allowlist_externals = passenv = EXTERNAL_REGISTRY,EXTERNAL_REGISTRY_USERNAME,EXTERNAL_REGISTRY_PASSWORD setenv = ORCHESTRA_DEPLOYMENT_TYPE = {env:ORCHESTRA_DEPLOYMENT_TYPE:remote} - NODE_PORT = {env:NODE_PORT:8080} - NODE_URL = {env:NODE_URL:http://localhost} + SERVER_PORT = {env:SERVER_PORT:8080} + SERVER_URL = {env:SERVER_URL:http://localhost} EXCLUDE_NOTEBOOKS = {env:EXCLUDE_NOTEBOOKS:} SYFT_VERSION = {env:SYFT_VERSION:local} commands = - bash -c "echo Running with ORCHESTRA_DEPLOYMENT_TYPE=$ORCHESTRA_DEPLOYMENT_TYPE NODE_PORT=$NODE_PORT NODE_URL=$NODE_URL \ + bash -c "echo Running with ORCHESTRA_DEPLOYMENT_TYPE=$ORCHESTRA_DEPLOYMENT_TYPE SERVER_PORT=$SERVER_PORT SERVER_URL=$SERVER_URL \ Excluding notebooks: $EXCLUDE_NOTEBOOKS SYFT_VERSION=$SYFT_VERSION \ EXTERNAL_REGISTRY=$EXTERNAL_REGISTRY; date" @@ -1113,8 +1126,8 @@ description = Migration Test on K8s setenv = GITHUB_CI = {env:GITHUB_CI:false} SYFT_BASE_IMAGE_REGISTRY = {env:SYFT_BASE_IMAGE_REGISTRY:k3d-registry.localhost:5800} - DOMAIN_CLUSTER_NAME = {env:DOMAIN_CLUSTER_NAME:test-domain-1} - NODE_PORT = {env:NODE_PORT:8080} + DATASITE_CLUSTER_NAME = {env:DATASITE_CLUSTER_NAME:test-datasite-1} + SERVER_PORT = {env:SERVER_PORT:8080} deps = {[testenv:syft]deps} nbmake @@ -1131,8 +1144,8 @@ allowlist_externals = k3d echo commands = - # - create migration data on local 0.8.6 node - # - dump the data to a file on a local dev node + # - create migration data on local 0.8.6 server + # - dump the data to a file on a local dev server # - migrate the data to a new cluster # NOTE This is only needed because our previous deployed version # does not have the migration feature, once it does we can create @@ -1154,21 +1167,21 @@ commands = k3d version # Deleting Old Cluster - bash -c "k3d cluster delete ${DOMAIN_CLUSTER_NAME} || true" + bash -c "k3d cluster delete ${DATASITE_CLUSTER_NAME} || true" # Deleting registry & volumes bash -c "k3d registry delete k3d-registry.localhost || true" - bash -c "docker volume rm k3d-${DOMAIN_CLUSTER_NAME}-images --force || true" + bash -c "docker volume rm k3d-${DATASITE_CLUSTER_NAME}-images --force || true" # Create registry tox -e dev.k8s.registry - # Creating test-domain-1 cluster on port NODE_PORT - # NOTE set DEVSPACE_PROFILE=migrated-domain will start the cluster with variables from migration.yaml + # Creating test-datasite-1 cluster on port SERVER_PORT + # NOTE set DEVSPACE_PROFILE=migrated-datasite will start the cluster with variables from migration.yaml bash -c '\ - export CLUSTER_NAME=${DOMAIN_CLUSTER_NAME} \ - CLUSTER_HTTP_PORT=${NODE_PORT} \ - DEVSPACE_PROFILE=migrated-domain && \ + export CLUSTER_NAME=${DATASITE_CLUSTER_NAME} \ + CLUSTER_HTTP_PORT=${SERVER_PORT} \ + DEVSPACE_PROFILE=migrated-datasite && \ tox -e dev.k8s.start && \ tox -e dev.k8s.deploy' @@ -1180,23 +1193,23 @@ commands = sleep 30 - ; # wait for test-domain-1 - ; bash packages/grid/scripts/wait_for.sh service mongo --context k3d-{env:DOMAIN_CLUSTER_NAME} --namespace syft - ; bash packages/grid/scripts/wait_for.sh service backend --context k3d-{env:DOMAIN_CLUSTER_NAME} --namespace syft - ; bash packages/grid/scripts/wait_for.sh service proxy --context k3d-{env:DOMAIN_CLUSTER_NAME} --namespace syft - ; bash packages/grid/scripts/wait_for.sh service seaweedfs --context k3d-{env:DOMAIN_CLUSTER_NAME} --namespace syft - ; bash packages/grid/scripts/wait_for.sh service frontend --context k3d-{env:DOMAIN_CLUSTER_NAME} --namespace syft - ; bash -c '(kubectl logs service/frontend --context k3d-${DOMAIN_CLUSTER_NAME} --namespace syft -f &) | grep -q -E "Network:\s+https?://[a-zA-Z0-9.-]+:[0-9]+/" || true' + ; # wait for test-datasite-1 + ; bash packages/grid/scripts/wait_for.sh service mongo --context k3d-{env:DATASITE_CLUSTER_NAME} --namespace syft + ; bash packages/grid/scripts/wait_for.sh service backend --context k3d-{env:DATASITE_CLUSTER_NAME} --namespace syft + ; bash packages/grid/scripts/wait_for.sh service proxy --context k3d-{env:DATASITE_CLUSTER_NAME} --namespace syft + ; bash packages/grid/scripts/wait_for.sh service seaweedfs --context k3d-{env:DATASITE_CLUSTER_NAME} --namespace syft + ; bash packages/grid/scripts/wait_for.sh service frontend --context k3d-{env:DATASITE_CLUSTER_NAME} --namespace syft + ; bash -c '(kubectl logs service/frontend --context k3d-${DATASITE_CLUSTER_NAME} --namespace syft -f &) | grep -q -E "Network:\s+https?://[a-zA-Z0-9.-]+:[0-9]+/" || true' - ; # Checking logs generated & startup of test-domain 1 - ; bash -c '(kubectl logs service/backend --context k3d-${DOMAIN_CLUSTER_NAME} --namespace syft -f &) | grep -q "Application startup complete" || true' + ; # Checking logs generated & startup of test-datasite 1 + ; bash -c '(kubectl logs service/backend --context k3d-${DATASITE_CLUSTER_NAME} --namespace syft -f &) | grep -q "Application startup complete" || true' # Run Notebook tests pytest -x --nbmake --nbmake-timeout=1000 notebooks/tutorials/version-upgrades/2-migrate-from-file.ipynb -vvvv # deleting clusters created - bash -c "CLUSTER_NAME=${DOMAIN_CLUSTER_NAME} tox -e dev.k8s.destroy || true" + bash -c "CLUSTER_NAME=${DATASITE_CLUSTER_NAME} tox -e dev.k8s.destroy || true" commands_post = bash -c 'rm -f notebooks/tutorials/version-upgrades/migration.blob' - bash -c 'rm -f notebooks/tutorials/version-upgrades/migration.yaml' \ No newline at end of file + bash -c 'rm -f notebooks/tutorials/version-upgrades/migration.yaml'