diff --git a/.github/workflows/continuous-integration.yaml b/.github/workflows/continuous-integration.yaml index b5cd62113..36b2fda72 100644 --- a/.github/workflows/continuous-integration.yaml +++ b/.github/workflows/continuous-integration.yaml @@ -6,7 +6,7 @@ on: branches: - main - 'release/*' - + release: types: [published] @@ -37,14 +37,14 @@ on: type: boolean required: false default: true - - # Workflow call refers to the weekly or release process (it enables the current CI workflow to be + + # Workflow call refers to the weekly or release process (it enables the current CI workflow to be # called by another workflow from the same repository, in this case the release one) - # No default value is put in order to avoid running the following CI without explicitly + # No default value is put in order to avoid running the following CI without explicitly # indicating it in the caller workflow # Besides, GitHub actions are not able to differentiate 'workflow_dispatch' from 'workflow_call' - # based on 'github.event_name' and both are set to 'workflow_dispatch'. Therefore, an optional - # input 'manual_call' with proper default values is added to both as a workaround, following one + # based on 'github.event_name' and both are set to 'workflow_dispatch'. Therefore, an optional + # input 'manual_call' with proper default values is added to both as a workaround, following one # user's suggestion : https://github.com/actions/runner/discussions/1884 # FIXME: https://github.com/zama-ai/concrete-ml-internal/issues/3930 workflow_call: @@ -68,23 +68,23 @@ concurrency: env: ACTION_RUN_URL: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }} - # The CI can be triggered by the release workflow which itself can be triggered by the merge of a - # pull-request (following the 'prepare_release' workflow). Since GitHub weirdly propagates the - # original 'github.event_name' (here "pull_request") in all nested workflows, we need to + # The CI can be triggered by the release workflow which itself can be triggered by the merge of a + # pull-request (following the 'prepare_release' workflow). Since GitHub weirdly propagates the + # original 'github.event_name' (here "pull_request") in all nested workflows, we need to # differentiate the release CI from regular CIs by using 'inputs.event_name', which should be set # to "release" by the release workflow IS_PR: ${{ github.event_name == 'pull_request' && inputs.event_name != 'release' }} - # Run the weekly CI if it has been triggered manually by the weekly workflow, meaning + # Run the weekly CI if it has been triggered manually by the weekly workflow, meaning # 'inputs.event_name' is set to "weekly" IS_WEEKLY: ${{ inputs.event_name == 'weekly'}} - # The 'IS_RELEASE' variable indicates that the workflow has been triggered by the releasing + # The 'IS_RELEASE' variable indicates that the workflow has been triggered by the releasing # process itself, before publishing it. It should only happen when the release workflow triggers # the CI, in which 'inputs.event_name' is set to "release" IS_RELEASE: ${{ inputs.event_name == 'release' }} IS_PUSH_TO_MAIN: ${{ github.event_name == 'push' && github.ref == 'refs/heads/main' }} IS_PUSH_TO_RELEASE: ${{ github.event_name == 'push' && startsWith(github.ref, 'refs/heads/release/') }} IS_WORKFLOW_DISPATCH: ${{ github.event_name == 'workflow_dispatch' && inputs.manual_call}} - # The 'IS_PUBLISHED_RELEASE' variable indicates that the workflow has been triggered by a + # The 'IS_PUBLISHED_RELEASE' variable indicates that the workflow has been triggered by a # release's successful publishing IS_PUBLISHED_RELEASE: ${{ github.event_name == 'release'}} AGENT_TOOLSDIRECTORY: /opt/hostedtoolcache @@ -172,7 +172,7 @@ jobs: echo "Unknown BUILD_TYPE! Aborting" exit 1 fi - + echo "LINUX_PYTHON_VERSIONS: ${LINUX_PYTHON_VERSIONS}" echo "MACOS_PYTHON_VERSIONS: ${MACOS_PYTHON_VERSIONS}" @@ -356,6 +356,8 @@ jobs: PIP_EXTRA_INDEX_URL: ${{ secrets.PIP_EXTRA_INDEX_URL }} AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} + outputs: + hashes: ${{ steps.hash.outputs.hashes }} steps: - name: Add masks @@ -386,8 +388,8 @@ jobs: uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 with: fetch-depth: 0 - - # Pull necessary LFS files (and thus avoid downloading files stored for benchmarks, use cases, ...) + + # Pull necessary LFS files (and thus avoid downloading files stored for benchmarks, use cases, ...) - name: Pull LFS files run: | git lfs pull --include "tests/data/**, src/concrete/ml/pandas/_client_server_files/**" --exclude "" @@ -473,17 +475,17 @@ jobs: run: | make pcc - # Checked for changes between main and the current branch in a PR. More specifically, - # this is used in regular CIs to avoid launching Pytest, checking codeblocks, building docs + # Checked for changes between main and the current branch in a PR. More specifically, + # this is used in regular CIs to avoid launching Pytest, checking codeblocks, building docs # or other steps if the associated files were not touched. For most, we also check that the # linux MD5 has not changed, which means that no libraries got updated. This is done in order # to handle PRs which only upgrades dependencies - # Following the 'files_yaml' section, we define what files should trigger a defined acronym - # (src, codeblocks, ...) when some changes are detected in them. For example, if some + # Following the 'files_yaml' section, we define what files should trigger a defined acronym + # (src, codeblocks, ...) when some changes are detected in them. For example, if some # dependencies were changed, 'tests', 'determinism', 'codeblocks' and 'determinism' acronyms - # will be affected. We use the license MD5 file for that because it is built on the + # will be affected. We use the license MD5 file for that because it is built on the # poetry.lock as well as the Concrete Python version, which can be installed manually in the - # makefile. + # makefile. # For codeblocks, 'make pytest_codeblocks' runs the `make_utils/pytest_codeblocks.sh` script, # which executes a find and grep command to find them. In the following section, we manually # re-define what this command does by looking at all markdown files that are neither in hidden @@ -493,10 +495,10 @@ jobs: # as for release and weekly checks, as there are no changes to check in these cases - name: Get all changed files from main in PR id: changed-files-in-pr - if: | + if: | fromJSON(env.IS_PR) && steps.install-deps.outcome == 'success' - && steps.make-pcc.outcome == 'success' + && steps.make-pcc.outcome == 'success' && !cancelled() uses: tj-actions/changed-files@d6babd6899969df1a11d14c368283ea4436bca78 # v44.5.2 with: @@ -531,7 +533,7 @@ jobs: - Makefile # Run determinism test if: - # - during weekly or release CI, as well as when the CI has been triggered manually (through + # - during weekly or release CI, as well as when the CI has been triggered manually (through # GitHub's Action interface) # - the determinism test file has been changed # - the source code has been changed @@ -542,22 +544,22 @@ jobs: id: determinism if: | ( - steps.changed-files-in-pr.outcome == 'skipped' + steps.changed-files-in-pr.outcome == 'skipped' || steps.changed-files-in-pr.outputs.determinism_any_changed == 'true' || steps.changed-files-in-pr.outputs.src_any_changed == 'true' || steps.changed-files-in-pr.outputs.dependencies_any_changed == 'true' || steps.changed-files-in-pr.outputs.conftest_any_changed == 'true' || steps.changed-files-in-pr.outputs.makefile_any_changed == 'true' ) - && steps.install-deps.outcome == 'success' - && steps.make-pcc.outcome == 'success' + && steps.install-deps.outcome == 'success' + && steps.make-pcc.outcome == 'success' && !cancelled() run: | make determinism # Build the documentation if : # - the current workflow takes place in a release CI with the reference build - # - the current workflow takes place in a weekly CI or it has been triggered manually (through + # - the current workflow takes place in a weekly CI or it has been triggered manually (through # GitHub's Action interface) # - any documentation files has been changed # - the source code has been changed @@ -567,19 +569,19 @@ jobs: if: | ( (fromJSON(env.IS_RELEASE) && fromJSON(env.IS_REF_BUILD)) - || steps.changed-files-in-pr.outcome == 'skipped' - || steps.changed-files-in-pr.outputs.docs_any_changed == 'true' - || steps.changed-files-in-pr.outputs.use_cases_any_changed == 'true' + || steps.changed-files-in-pr.outcome == 'skipped' + || steps.changed-files-in-pr.outputs.docs_any_changed == 'true' + || steps.changed-files-in-pr.outputs.use_cases_any_changed == 'true' || steps.changed-files-in-pr.outputs.src_any_changed == 'true' || steps.changed-files-in-pr.outputs.makefile_any_changed == 'true' ) - && steps.install-deps.outcome == 'success' - && steps.make-pcc.outcome == 'success' - && steps.determinism.outcome != 'failure' + && steps.install-deps.outcome == 'success' + && steps.make-pcc.outcome == 'success' + && steps.determinism.outcome != 'failure' && !cancelled() run: | make docs_no_links - + # Do not check links during the release process in order to avoid temporary connection errors - name: Check links id: check_links @@ -591,7 +593,7 @@ jobs: make check_links make check_symlinks - # Make sure all necessary steps passed. For build-docs and determinism steps, we only check for + # Make sure all necessary steps passed. For build-docs and determinism steps, we only check for # non-failures as the 'changed-files-in-pr' step might skip them - name: Stop if previous steps failed id: conformance @@ -599,8 +601,8 @@ jobs: env: CONFORMANCE_STATUS: >- ${{ - steps.commit-conformance.outcome == 'success' - && steps.make-pcc.outcome == 'success' + steps.commit-conformance.outcome == 'success' + && steps.make-pcc.outcome == 'success' && steps.determinism.outcome != 'failure' && steps.build-docs.outcome != 'failure' && steps.check_links.outcome != 'failure' @@ -621,10 +623,10 @@ jobs: - name: Tar docs artifacts id: tar-docs if: | - fromJSON(env.IS_RELEASE) - && fromJSON(env.IS_REF_BUILD) - && steps.conformance.outcome == 'success' - && steps.build-docs.outcome == 'success' + fromJSON(env.IS_RELEASE) + && fromJSON(env.IS_REF_BUILD) + && steps.conformance.outcome == 'success' + && steps.build-docs.outcome == 'success' && !cancelled() run: | cd docs/_build/html @@ -638,12 +640,12 @@ jobs: path: docs/_build/html/docs.tar # Generate the changelog for releases with the reference build only - # The changelog is generated by considering all commits from the latest stable previous + # The changelog is generated by considering all commits from the latest stable previous # version (not a release candidate) up to the new upcoming version - name: Generate release changelog id: changelog if: | - fromJSON(env.IS_RELEASE) + fromJSON(env.IS_RELEASE) && fromJSON(env.IS_REF_BUILD) && steps.conformance.outcome == 'success' && !cancelled() @@ -654,7 +656,7 @@ jobs: echo "changelog-file=${CHANGELOG_FILE}" >> $GITHUB_OUTPUT poetry run python ./script/make_utils/changelog_helper.py \ --to-ref "${{ github.sha }}" > "${CHANGELOG_FILE}" - + - name: Upload changelog artifacts if: ${{ steps.changelog.outcome == 'success' && !cancelled() }} uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 @@ -668,45 +670,51 @@ jobs: # and macOS as long as the dependencies are available - name: Build wheel id: build-wheel - if: | + if: | fromJSON(env.IS_RELEASE) - && fromJSON(env.IS_REF_BUILD) - && steps.conformance.outcome == 'success' + && fromJSON(env.IS_REF_BUILD) + && steps.conformance.outcome == 'success' && !cancelled() run: | rm -rf dist poetry build -f wheel + - name: "Generate hashes" + id: hash + if: ${{ steps.build-wheel.outcome == 'success' && !cancelled() }} + run: | + cd dist && echo "hashes=$(sha256sum * | base64 -w0)" >> $GITHUB_OUTPUT + - name: Upload wheel artifacts if: ${{ steps.build-wheel.outcome == 'success' && !cancelled() }} uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 with: name: py3-wheel path: dist/*.whl - + # Run pytest if : # - the current workflow does no take place release CI # - if the CI has been triggered manually (through GitHub's Action interface) # - the source code has been changed # - any tests utils (pytest, data) has been changed - # - any dependency has been updated + # - any dependency has been updated # - conftest.py has been changed # - Makefile has been changed # If the workflow takes place in a release CI, an option is added to take into account more tests # If only some test files were changed, this step is skipped and each associated tests will be # run individually in a following step (pytest_modified_tests_only) - # If regular tests failed, a following script checks for flaky tests. If all failed tests + # If regular tests failed, a following script checks for flaky tests. If all failed tests # are known flaky tests, they are rerun. Otherwise, the step exits with status 1. - # The 'bash +e {0}' is added here in order to make sure that the step does not exit directly + # The 'bash +e {0}' is added here in order to make sure that the step does not exit directly # if 'make pytest' fails - name: PyTest Source Code (regular, weekly) id: pytest if: | ( ( - steps.changed-files-in-pr.outcome == 'success' + steps.changed-files-in-pr.outcome == 'success' && ( - steps.changed-files-in-pr.outputs.src_any_changed == 'true' + steps.changed-files-in-pr.outputs.src_any_changed == 'true' || steps.changed-files-in-pr.outputs.tests_utils_any_changed == 'true' || steps.changed-files-in-pr.outputs.dependencies_any_changed == 'true' || steps.changed-files-in-pr.outputs.conftest_any_changed == 'true' @@ -716,7 +724,7 @@ jobs: || fromJSON(env.IS_WORKFLOW_DISPATCH) || fromJSON(env.IS_WEEKLY) ) - && steps.conformance.outcome == 'success' + && steps.conformance.outcome == 'success' && !cancelled() shell: bash +e {0} @@ -726,7 +734,7 @@ jobs: else PYTEST_OPTIONS="" fi - + make pytest_and_report PYTEST_OPTIONS=${PYTEST_OPTIONS} # If regular tests failed, check for flaky tests @@ -747,7 +755,7 @@ jobs: # If all failed tests are known flaky tests, try to rerun them if [[ "${FAILED_TESTS_ARE_FLAKY}" == "true" ]]; then make pytest_run_last_failed - + # Else, return exit status 1 in order to make this step fail else exit 1 @@ -758,9 +766,9 @@ jobs: - name: Upload flaky tests list (weekly) uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # v4.3.3 if: | - fromJSON(env.IS_WEEKLY) - && steps.pytest.outcome == 'success' - && fromJSON(env.FAILED_TESTS_ARE_FLAKY) + fromJSON(env.IS_WEEKLY) + && steps.pytest.outcome == 'success' + && fromJSON(env.FAILED_TESTS_ARE_FLAKY) && !cancelled() with: name: failed_flaky_${{ matrix.python_version }} @@ -771,17 +779,17 @@ jobs: - name: Warn PR with flaky tests (regular) uses: marocchino/sticky-pull-request-comment@331f8f5b4215f0445d3c07b4967662a32a2d3e31 if: | - fromJSON(env.IS_PR) - && steps.pytest.outcome == 'success' - && fromJSON(env.FAILED_TESTS_ARE_FLAKY) + fromJSON(env.IS_PR) + && steps.pytest.outcome == 'success' + && fromJSON(env.FAILED_TESTS_ARE_FLAKY) && !cancelled() with: header: flaky-test recreate: true path: failed_tests_comment_${{ matrix.python_version }}.txt - # If pytest step has been skipped but some changes has been detected in test files, - # meaning there was no other changed impacting our testing suite, we only need to run these + # If pytest step has been skipped but some changes has been detected in test files, + # meaning there was no other changed impacting our testing suite, we only need to run these # modified tests # Note that if pytest utils or test data are changed, the pytest step should have been # triggered instead @@ -792,29 +800,29 @@ jobs: && steps.changed-files-in-pr.outcome == 'success' && steps.pytest.outcome == 'skipped' && steps.changed-files-in-pr.outputs.tests_any_changed == 'true' - && steps.conformance.outcome == 'success' + && steps.conformance.outcome == 'success' && !cancelled() run: | for file in ${{ steps.changed-files-in-pr.outputs.tests_all_changed_files }}; do make pytest_one TEST="$file" done - # Run Pytest on all of our tests (except flaky ones) using PyPI's local wheel in the weekly + # Run Pytest on all of our tests (except flaky ones) using PyPI's local wheel in the weekly # or during the release process - name: PyTest (no flaky) with PyPI local wheel of Concrete ML (weekly, release) if: | - (fromJSON(env.IS_WEEKLY) || fromJSON(env.IS_RELEASE)) - && steps.conformance.outcome == 'success' + (fromJSON(env.IS_WEEKLY) || fromJSON(env.IS_RELEASE)) + && steps.conformance.outcome == 'success' && !cancelled() run: | make pytest_pypi_wheel_cml_no_flaky - # Run Pytest on all of our tests (except flaky ones) using Concrete ML's latest version + # Run Pytest on all of our tests (except flaky ones) using Concrete ML's latest version # available on PyPI after publishing a release - name: PyTest (no flaky) with PyPI (published release) if: | fromJSON(env.IS_PUBLISHED_RELEASE) - && steps.conformance.outcome == 'success' + && steps.conformance.outcome == 'success' && !cancelled() run: | PROJECT_VERSION="$(poetry version --short)" @@ -825,7 +833,7 @@ jobs: - name: Test coverage (regular, weekly) id: coverage if: | - fromJSON(env.IS_REF_BUILD) + fromJSON(env.IS_REF_BUILD) && steps.pytest.outcome != 'skipped' && !cancelled() run: | @@ -850,9 +858,9 @@ jobs: if: | ( ( - steps.changed-files-in-pr.outcome == 'success' + steps.changed-files-in-pr.outcome == 'success' && ( - steps.changed-files-in-pr.outputs.src_any_changed == 'true' + steps.changed-files-in-pr.outputs.src_any_changed == 'true' || steps.changed-files-in-pr.outputs.codeblocks_any_changed == 'true' || steps.changed-files-in-pr.outputs.dependencies_any_changed == 'true' || steps.changed-files-in-pr.outputs.makefile_any_changed == 'true' @@ -860,7 +868,7 @@ jobs: ) || fromJSON(env.IS_WORKFLOW_DISPATCH) ) - && steps.conformance.outcome == 'success' + && steps.conformance.outcome == 'success' && !cancelled() run: | make pytest_codeblocks @@ -868,18 +876,18 @@ jobs: # Run Pytest on all codeblocks on a weekly basis or while releasing - name: PyTest CodeBlocks with PyPI local wheel of Concrete ML (weekly, release) if: | - (fromJSON(env.IS_WEEKLY) || fromJSON(env.IS_RELEASE)) - && steps.conformance.outcome == 'success' + (fromJSON(env.IS_WEEKLY) || fromJSON(env.IS_RELEASE)) + && steps.conformance.outcome == 'success' && !cancelled() run: | - make pytest_codeblocks_pypi_wheel_cml + make pytest_codeblocks_pypi_wheel_cml - # Run Pytest on all codeblocks using Concrete ML's latest version available on PyPI after + # Run Pytest on all codeblocks using Concrete ML's latest version available on PyPI after # publishing a release - name: PyTest CodeBlocks with PyPI (published release) if: | - fromJSON(env.IS_PUBLISHED_RELEASE) - && steps.conformance.outcome == 'success' + fromJSON(env.IS_PUBLISHED_RELEASE) + && steps.conformance.outcome == 'success' && !cancelled() run: | PROJECT_VERSION="$(poetry version --short)" @@ -889,9 +897,9 @@ jobs: # Run Pytest on all notebooks on a weekly basis # Note: some notebooks need specific data stored in LFS - name: PyTest Notebooks (weekly) - if: | - fromJSON(env.IS_WEEKLY) - && steps.conformance.outcome == 'success' + if: | + fromJSON(env.IS_WEEKLY) + && steps.conformance.outcome == 'success' && !cancelled() run: | git lfs pull --include "docs/advanced_examples/data/**" --exclude "" @@ -902,12 +910,12 @@ jobs: if: ${{ steps.conformance.outcome == 'success' && !cancelled() }} run: | make fast_sanity_check - + # Check installation with sync_env - name: Check installation with sync_env and python ${{ matrix.python_version }} (weekly, release) if: | - (fromJSON(env.IS_WEEKLY) || fromJSON(env.IS_RELEASE)) - && steps.conformance.outcome == 'success' + (fromJSON(env.IS_WEEKLY) || fromJSON(env.IS_RELEASE)) + && steps.conformance.outcome == 'success' && !cancelled() run: | ./script/make_utils/check_installation_with_all_python.sh --version ${{ matrix.python_version }} --sync_env @@ -915,8 +923,8 @@ jobs: # Check installation with pip - name: Check installation with pip and python ${{ matrix.python_version }} (weekly, release) if: | - (fromJSON(env.IS_WEEKLY) || fromJSON(env.IS_RELEASE)) - && steps.conformance.outcome == 'success' + (fromJSON(env.IS_WEEKLY) || fromJSON(env.IS_RELEASE)) + && steps.conformance.outcome == 'success' && !cancelled() run: | ./script/make_utils/check_installation_with_all_python.sh --version ${{ matrix.python_version }} --pip @@ -924,8 +932,8 @@ jobs: # Check installation with wheel - name: Check installation with wheel and python ${{ matrix.python_version }} (weekly, release) if: | - (fromJSON(env.IS_WEEKLY) || fromJSON(env.IS_RELEASE)) - && steps.conformance.outcome == 'success' + (fromJSON(env.IS_WEEKLY) || fromJSON(env.IS_RELEASE)) + && steps.conformance.outcome == 'success' && !cancelled() run: | ./script/make_utils/check_installation_with_all_python.sh --version ${{ matrix.python_version }} --wheel @@ -933,18 +941,30 @@ jobs: # Check installation with git clone - name: Check installation with clone and python ${{ matrix.python_version }} (weekly, release) if: | - (fromJSON(env.IS_WEEKLY) || fromJSON(env.IS_RELEASE)) - && steps.conformance.outcome == 'success' + (fromJSON(env.IS_WEEKLY) || fromJSON(env.IS_RELEASE)) + && steps.conformance.outcome == 'success' && !cancelled() run: | ./script/make_utils/check_installation_with_all_python.sh --version ${{ matrix.python_version }} --clone + provenance: + needs: [build-linux] + permissions: + actions: read + contents: write + id-token: write # Needed to access the workflow's OIDC identity. + uses: slsa-framework/slsa-github-generator/.github/workflows/generator_generic_slsa3.yml@v2.0.0 # Not pinned by commit on purpose + # see https://github.com/slsa-framework/slsa-github-generator/blob/main/README.md#referencing-slsa-builders-and-generators + if: ${{ needs.build-linux.outputs.hashes != '' }} + with: + base64-subjects: "${{ needs.build-linux.outputs.hashes }}" + # This is to manage build matrices and have a single status point for PRs - # This can be updated to take macOS into account but it is impractical because of long builds + # This can be updated to take macOS into account but it is impractical because of long builds # and therefore expensive macOS testing linux-build-status: name: Build Status (Linux) - needs: [build-linux] + needs: [build-linux, provenance] runs-on: ubuntu-20.04 timeout-minutes: 2 if: success() || failure() @@ -1027,17 +1047,17 @@ jobs: run: | echo "::add-mask::${{ secrets.INTERNAL_PYPI_URL_FOR_MASK }}" echo "::add-mask::${{ secrets.INTERNAL_REPO_URL_FOR_MASK }}" - + # By default, `git clone` downloads all LFS files, which we want to avoid in CIs - name: Disable LFS download by default run: | git lfs install --skip-smudge - + # Checkout the code - name: Checkout Code uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 - - # Pull necessary LFS files (and thus avoid downloading files stored for benchmarks, use cases, ...) + + # Pull necessary LFS files (and thus avoid downloading files stored for benchmarks, use cases, ...) - name: Pull LFS files run: | git lfs pull --include "tests/data/**, src/concrete/ml/pandas/_client_server_files/**" --exclude "" @@ -1082,7 +1102,7 @@ jobs: # macOS builds are already long, so we decide not to use --weekly on them, it could be # changed. Remark also that, for mac, due to unexpected issues with GitHub, we have a # slightly different way to launch pytest - # Add support for re-running flaky tests on macOS (intel) CI + # Add support for re-running flaky tests on macOS (intel) CI # FIXME: https://github.com/zama-ai/concrete-ml-internal/issues/4428 - name: PyTest Source Code run: | @@ -1109,16 +1129,16 @@ jobs: # - when pushing to main # - when pushing to a release branch # - when running weekly tests - # In these cases, we want to send the report whenever one of the step was triggered, which is + # In these cases, we want to send the report whenever one of the step was triggered, which is # basically when the `matrix-preparation` has not been skipped - # Side note: environmental variables cannot be used for jobs conditions, so we need to determine + # Side note: environmental variables cannot be used for jobs conditions, so we need to determine # if the job should be run or not in an previous job and store it in its output slack-report: name: Slack report runs-on: ubuntu-20.04 if: | - always() - && needs.matrix-preparation.result != 'skipped' + always() + && needs.matrix-preparation.result != 'skipped' && fromJSON(needs.decide-slack-report.outputs.send_slack_report) timeout-minutes: 2 needs: @@ -1153,16 +1173,16 @@ jobs: if [[ "${{ env.JOB_STATUS }}" == "success" ]]; then TITLE_STATUS="passed ✅" - elif [[ "${{ env.JOB_STATUS }}" == "cancelled" ]]; then + elif [[ "${{ env.JOB_STATUS }}" == "cancelled" ]]; then TITLE_STATUS="cancelled :black_square_for_stop:" - elif [[ "${{ env.JOB_STATUS }}" == "skipped" ]]; then + elif [[ "${{ env.JOB_STATUS }}" == "skipped" ]]; then TITLE_STATUS="skipped :fast_forward:" else TITLE_STATUS="failed ❌" fi echo "SLACK_TITLE=${TITLE_START} ${TITLE_STATUS}" >> "$GITHUB_ENV" - + # Retrieve the list of flaky tests that have been re-run if they were some # Enable 'merge-multiple' to download all files in the root directory - name: Download artifacts @@ -1171,12 +1191,12 @@ jobs: merge-multiple: true pattern: failed_flaky_* - # Add support for re-running flaky tests on macOS (intel) CI + # Add support for re-running flaky tests on macOS (intel) CI # FIXME: https://github.com/zama-ai/concrete-ml-internal/issues/4428 - name: Set message body run: | { - echo "SLACK_BODY<> "$GITHUB_ENV" echo "IS_RC=${IS_RC}" >> "$GITHUB_ENV" @@ -182,7 +182,7 @@ jobs: echo "GIT_TAG=${GIT_TAG}" >> "$GITHUB_ENV" echo "RELEASE_BRANCH_NAME=${RELEASE_BRANCH_NAME}" >> "$GITHUB_ENV" - # Store project version, tag version, branch name and if they represent a release + # Store project version, tag version, branch name and if they represent a release # candidate as job outputs in order to be able to use them in following jobs echo "project_version=${PROJECT_VERSION}" >> "$GITHUB_OUTPUT" echo "is_rc=${IS_RC}" >> "$GITHUB_OUTPUT" @@ -190,7 +190,7 @@ jobs: echo "git_tag=${GIT_TAG}" >> "$GITHUB_OUTPUT" echo "release_branch_name=${RELEASE_BRANCH_NAME}" >> "$GITHUB_OUTPUT" - # Make sure that the workflow has been triggered from a release branch if this is a patch + # Make sure that the workflow has been triggered from a release branch if this is a patch # release or from main otherwise. If not, the release preparation process is stopped # Additionally, for patch releases, if the release branch name does not match the current # release version, the release preparation process is stopped as well @@ -217,21 +217,21 @@ jobs: fi # Make sure that the tag related to the current version does not already exist in the - # repository. Otherwise, the version has probably not been updated properly and the release + # repository. Otherwise, the version has probably not been updated properly and the release # process is thus stopped - name: Check tag does not exist remotely run: | ./script/actions_utils/check_tag_not_remote.sh --tag_name "${{ env.GIT_TAG }}" - # Make sure that the branch related to the current version does not already exist in - # the repository if this is a non-rc and non-patch release. Otherwise, the version has + # Make sure that the branch related to the current version does not already exist in + # the repository if this is a non-rc and non-patch release. Otherwise, the version has # probably not been updated properly and the release process is thus stopped - name: Check release branch does not exist remotely if: env.IS_RC == 'false' && env.IS_PATCH == 'false' run: | ./script/actions_utils/check_branch_not_remote.sh --branch_name "${{ env.RELEASE_BRANCH_NAME }}" - # The caller workflow's job (here 'release-tests') does not need to run on the current runner as + # The caller workflow's job (here 'release-tests') does not need to run on the current runner as # the reusable workflow (here 'continuous-integration.yaml') uses its own runner release-tests: name: Run tests @@ -286,11 +286,11 @@ jobs: github_token: ${{ secrets.BOT_TOKEN }} branch: ${{ env.TARGET_BRANCH_NAME }} tags: true - + # This action creates docker and pypi images directly on the AWS EC2 instance # The 'PRIVATE_RELEASE_IMAGE_BASE' variable is kept here in case Concrete-ML starts to publish - # private nightly releases one day. Currently, release candidates and actual releases are all - # done through the 'PUBLIC_RELEASE_IMAGE_BASE' image. The private image is also used to list all + # private nightly releases one day. Currently, release candidates and actual releases are all + # done through the 'PUBLIC_RELEASE_IMAGE_BASE' image. The private image is also used to list all # tags easily release-package: name: Release package and artifacts @@ -309,7 +309,7 @@ jobs: GIT_TAG: ${{ needs.release-checks.outputs.git_tag }} PROJECT_VERSION: ${{ needs.release-checks.outputs.project_version }} - # Jobs are separated runners, we therefore need to install dependencies again in order to + # Jobs are separated runners, we therefore need to install dependencies again in order to # pursue (without using cache or upload/download them as artifacts) steps: # Mask internal URLs if logged @@ -443,6 +443,14 @@ jobs: name: py3-wheel path: ${{ env.ARTIFACTS_PACKAGED_DIR }}/ + - name: Download provenance file + if: ${{ success() && !cancelled() }} + id: download-provenance + uses: actions/download-artifact@c850b930e6ba138125429b7e5c93fc707a7f8427 + with: + pattern: '*.intoto.jsonl' + path: ${{ env.ARTIFACTS_PACKAGED_DIR }}/ + - name: Copy wheel to docker build context run: | mkdir -p ./pkg @@ -583,7 +591,7 @@ jobs: -r pypi "${{ env.ARTIFACTS_PACKAGED_DIR }}"/*.whl # This step is kept if Concrete ML starts to publish private nightly release one day. For now, - # since release candidates and actual releases are public, we don't need to publish anything + # since release candidates and actual releases are public, we don't need to publish anything # to the private internal repo - name: Push package to Internal PyPi if: false @@ -648,7 +656,7 @@ jobs: id: get-release-link run: | echo "RELEASE_URL=${{ steps.create-release.outputs.url }}" >> $GITHUB_ENV - + # Close the AWS EC2 instance stop-runner-linux: @@ -713,7 +721,7 @@ jobs: SLACK_CHANNEL: ${{ secrets.SLACK_CHANNEL }} SLACK_ICON: https://pbs.twimg.com/profile_images/1274014582265298945/OjBKP9kn_400x400.png SLACK_COLOR: ${{ env.JOB_STATUS || 'failure' }} - SLACK_MESSAGE: "Creating release for ${{ env.GIT_TAG }} finished with status\ + SLACK_MESSAGE: "Creating release for ${{ env.GIT_TAG }} finished with status\ ${{ env.JOB_STATUS || 'failure' }} (${{ env.ACTION_RUN_URL }})\n\ - start-runner-linux: ${{ needs.start-runner-linux.result || 'Did not run.'}}\n\n\ - release-checks: ${{ needs.release-checks.result || 'Did not run.' }}\n\n\ diff --git a/README.md b/README.md index 063c284f1..ce4d139a9 100644 --- a/README.md +++ b/README.md @@ -17,6 +17,7 @@ + SLSA 3

## About @@ -102,7 +103,7 @@ pip install concrete-ml *Find more detailed installation instructions in [this part of the documentation](https://docs.zama.ai/concrete-ml/getting-started/pip_installing)*

- ↑ Back to top + ↑ Back to top

### A simple example @@ -129,7 +130,7 @@ model.fit(X_train, y_train) # We can simulate the predictions in the clear y_pred_clear = model.predict(X_test) -# We then compile on a representative set +# We then compile on a representative set model.compile(X_train) # Finally we run the inference on encrypted inputs ! @@ -179,7 +180,7 @@ print("Probability with encrypt/run/decrypt calls: ", y0) Concrete ML built-in models have APIs that are almost identical to their scikit-learn counterparts. It is also possible to convert PyTorch networks to FHE with the Concrete ML conversion APIs. Please refer to the [linear models](docs/built-in-models/linear.md), [tree-based models](docs/built-in-models/tree.md) and [neural networks](docs/built-in-models/neural-networks.md) documentation for more examples, showing the scikit-learn-like API of the built-in models.

- ↑ Back to top + ↑ Back to top

## Resources @@ -224,7 +225,7 @@ Concrete ML built-in models have APIs that are almost identical to their scikit- Full, comprehensive documentation is available here: [https://docs.zama.ai/concrete-ml](https://docs.zama.ai/concrete-ml).

- ↑ Back to top + ↑ Back to top

## Working with Concrete ML @@ -272,7 +273,7 @@ This software is distributed under the **BSD-3-Clause-Clear** license. Read [thi > We are open to collaborating and advancing the FHE space with our partners. If you have specific needs, please email us at hello@zama.ai.

- ↑ Back to top + ↑ Back to top

## Support @@ -288,5 +289,5 @@ This software is distributed under the **BSD-3-Clause-Clear** license. Read [thi 🌟 If you find this project helpful or interesting, please consider giving it a star on GitHub! Your support helps to grow the community and motivates further development.

- ↑ Back to top + ↑ Back to top