Skip to content

Benchmarks

Benchmarks #80

Workflow file for this run

name: Benchmarks
on:
schedule:
# Mon-Fri at 04:39 UTC, see https://crontab.guru/
- cron: "39 4 * * 1-5"
workflow_dispatch:
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
defaults:
# Required shell entrypoint to have properly activated conda environments
run:
shell: bash -l {0}
jobs:
tests:
name: ${{ matrix.name_prefix }} ${{ matrix.os }} py${{ matrix.python_version }}
runs-on: ${{ matrix.os }}
strategy:
fail-fast: false
matrix:
os: ["ubuntu-latest"]
python-version: ["3.11"]
extra-env: [""]
cubed-config: [tests/configs/local_single-threaded.yaml]
lithops-config: [""]
name_prefix: [single-threaded]
include:
- os: "ubuntu-latest"
python-version: "3.11"
extra-env: ci/environment-lithops-aws.yml
cubed-config: tests/configs/lithops_aws.yaml
lithops-config: .github/workflows/.lithops_config_aws
name_prefix: lithops-aws
- os: "ubuntu-latest"
python-version: "3.11"
extra-env: ci/environment-lithops-gcp.yml
cubed-config: tests/configs/lithops_gcp.yaml
lithops-config: .github/workflows/.lithops_config_gcp
name_prefix: lithops-gcp
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Copy lithops configuration templates
run: |
cp $GITHUB_WORKSPACE/.github/workflows/.lithops_config_aws.template $GITHUB_WORKSPACE/.github/workflows/.lithops_config_aws
cp $GITHUB_WORKSPACE/.github/workflows/.lithops_config_gcp.template $GITHUB_WORKSPACE/.github/workflows/.lithops_config_gcp
- name: Google auth
id: 'auth'
uses: 'google-github-actions/auth@v2'
with:
credentials_json: '${{ secrets.GOOGLE_CREDENTIALS }}'
create_credentials_file: true
- name: Configure lithops AWS
uses: microsoft/variable-substitution@v1
with:
files: ${{ github.workspace }}/.github/workflows/.lithops_config_aws
env:
aws.access_key_id: ${{ secrets.AWS_ACCESS_KEY_ID }}
aws.secret_access_key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
- name: Configure lithops GCP
uses: microsoft/variable-substitution@v1
with:
files: ${{ github.workspace }}/.github/workflows/.lithops_config_gcp
env:
gcp.credentials_path: ${{ steps.auth.outputs.credentials_file_path }}
- name: Set up environment
uses: conda-incubator/setup-miniconda@v2
with:
miniforge-variant: Mambaforge
use-mamba: true
condarc-file: ci/condarc
python-version: ${{ matrix.python_version }}
environment-file: ci/environment.yml
- name: Add extra packages to environment
if: ${{ matrix.extra-env != '' }}
run: mamba env update --file ${{ matrix.extra-env }}
- name: Add test dependencies
run: mamba env update --file ci/environment-test.yml
- name: Dump environment
run: |
# For debugging
echo -e "--\n--Conda Environment (re-create this with \`conda env create --name <name> -f <output_file>\`)\n--"
mamba env export | grep -E -v '^prefix:.*$'
- name: Setup Graphviz
uses: ts-graphviz/setup-graphviz@v2
- name: Run benchmarks
env:
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }}
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
CUBED_CONFIG: ${{ matrix.cubed-config }}
DB_NAME: ${{ matrix.name_prefix }}-${{ matrix.os }}-py${{ matrix.python_version }}.db
LITHOPS_CONFIG_FILE: ${{ github.workspace }}/${{ matrix.lithops-config }}
NAME_PREFIX: ${{ matrix.name_prefix }}
run: |
pytest --benchmark --basetemp=pytest-temp
- name: Upload benchmark results
uses: actions/upload-artifact@v4
if: always()
with:
name: ${{ matrix.name_prefix }}-${{ matrix.os }}-py${{ matrix.python_version }}
path: |
${{ matrix.name_prefix }}-${{ matrix.os }}-py${{ matrix.python_version }}.db
history/
pytest-temp/
mamba_env_export.yml
process-results:
needs: tests
name: Combine separate benchmark results
if: always() && github.repository == 'cubed-dev/cubed-benchmarks'
runs-on: ubuntu-latest
concurrency:
# Fairly strict concurrency rule to avoid stepping on benchmark db.
group: process-benchmarks
cancel-in-progress: false
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Install Python
uses: actions/setup-python@v4
with:
python-version: "3.11"
- name: Install dependencies
run: pip install alembic
- name: Download artifacts
uses: actions/download-artifact@v4
with:
path: benchmarks
- name: Download benchmark db
env:
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }}
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
AWS_DEFAULT_REGION: us-east-2 # this is needed for boto for some reason
DB_NAME: benchmark.db
run: |
aws s3 cp s3://cubed-runtime-ci/benchmarks/$DB_NAME . || true
- name: Combine benchmarks
run: |
ls -lhR benchmarks
bash ci/scripts/combine-dbs.sh
- name: Upload benchmark db
if: always() && github.ref == 'refs/heads/main' && github.repository == 'cubed-dev/cubed-benchmarks'
env:
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }}
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
AWS_DEFAULT_REGION: us-east-2 # this is needed for boto for some reason
DB_NAME: benchmark.db
run: |
aws s3 cp $DB_NAME s3://cubed-runtime-ci/benchmarks/
- name: Upload benchmark results as artifact
uses: actions/upload-artifact@v3
with:
name: benchmark
path: benchmark.db