diff --git a/.codespellrc b/.codespellrc
index 18147b67cf15..5c64576d148d 100644
--- a/.codespellrc
+++ b/.codespellrc
@@ -3,6 +3,6 @@ skip = .git,*.pdf,*.svg,versioneer.py,package-lock.json,_vendor,*.css,.codespell
# from https://github.com/PrefectHQ/prefect/pull/10813#issuecomment-1732676130
ignore-regex = .*lazy=\"selectin\"|.*e import Bloc$|America/Nome
-ignore-words-list = selectin,aci,wqs,aks,ines,dependant,fsspec,automations,nmme
+ignore-words-list = selectin,aci,wqs,aks,ines,dependant,fsspec,automations,nmme,afterall
check-hidden = true
\ No newline at end of file
diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS
index 86f4dda0a7ac..a7e2ffe5e921 100644
--- a/.github/CODEOWNERS
+++ b/.github/CODEOWNERS
@@ -7,5 +7,8 @@
# documentation
/docs @discdiver @cicdw @desertaxle @zzstoatzz
# imports
-/src/prefect/__init__.py @aaazzam @chrisguidry @cicdw @desertaxle @zzstoatzz
-/src/prefect/main.py @aaazzam @chrisguidry @cicdw @desertaxle @zzstoatzz
+/src/prefect/__init__.py @aaazzam @chrisguidry @cicdw @desertaxle @zzstoatzz
+/src/prefect/main.py @aaazzam @chrisguidry @cicdw @desertaxle @zzstoatzz
+
+# UI Replatform
+/ui-v2 @aaazzam @cicdw @desertaxle @zzstoatzz
diff --git a/.github/release.yml b/.github/release.yml
index a0a2c06b504b..8d07814866fb 100644
--- a/.github/release.yml
+++ b/.github/release.yml
@@ -26,6 +26,7 @@ changelog:
- title: Development & Tidiness 🧹
labels:
- development
+ - docs
- title: Uncategorized
labels:
- "*"
\ No newline at end of file
diff --git a/.github/workflows/codspeed-benchmarks.yaml b/.github/workflows/codspeed-benchmarks.yaml
index 63693197d49f..b8b6a67edc47 100644
--- a/.github/workflows/codspeed-benchmarks.yaml
+++ b/.github/workflows/codspeed-benchmarks.yaml
@@ -61,7 +61,8 @@ jobs:
- name: Install packages
run: |
python -m pip install -U uv
- uv pip install --upgrade --system .[dev]
+ uv pip install --upgrade --system .[dev] pytest-codspeed
+ uv pip uninstall --system pytest-benchmark
- name: Start server
run: |
diff --git a/.github/workflows/integration-tests.yaml b/.github/workflows/integration-tests.yaml
index 1a3ff9971ced..ab3c069447ab 100644
--- a/.github/workflows/integration-tests.yaml
+++ b/.github/workflows/integration-tests.yaml
@@ -1,3 +1,6 @@
+# TODO: Replace `wait-for-server` with dedicated command
+# https://github.com/PrefectHQ/prefect/issues/6990
+
name: Integration tests
on:
pull_request:
@@ -85,9 +88,6 @@ jobs:
./scripts/wait-for-server.py
- # TODO: Replace `wait-for-server` with dedicated command
- # https://github.com/PrefectHQ/prefect/issues/6990
-
- name: Start server
if: ${{ matrix.server-version.version == 'main' }}
env:
@@ -98,9 +98,6 @@ jobs:
./scripts/wait-for-server.py
- # TODO: Replace `wait-for-server` with dedicated command
- # https://github.com/PrefectHQ/prefect/issues/6990
-
- name: Run integration flows
env:
PREFECT_API_URL: http://127.0.0.1:4200/api
@@ -113,3 +110,35 @@ jobs:
run: |
cat server.log || echo "No logs available"
docker logs prefect-server || echo "No logs available"
+
+ sqlite-3-24-0:
+ name: Test SQLite 3.24.0 Compatibility
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v4
+ with:
+ fetch-depth: 0
+
+ - name: Test with SQLite 3.24.0
+ run: >
+ docker build -t prefect-server-old-sqlite \
+ --build-arg SQLITE_VERSION=3240000 \
+ --build-arg SQLITE_YEAR=2018 \
+ -f old-sqlite.Dockerfile . &&
+ docker run prefect-server-old-sqlite sh -c "prefect server database downgrade --yes -r base && prefect server database upgrade --yes"
+
+ sqlite-3-31-1:
+ name: Test SQLite 3.31.1 Compatibility
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v4
+ with:
+ fetch-depth: 0
+
+ - name: Test with SQLite 3.31.1
+ run: >
+ docker build -t prefect-server-new-sqlite \
+ --build-arg SQLITE_VERSION=3310100 \
+ --build-arg SQLITE_YEAR=2020 \
+ -f old-sqlite.Dockerfile . &&
+ docker run prefect-server-new-sqlite sh -c "prefect server database downgrade --yes -r base && prefect server database upgrade --yes"
diff --git a/.github/workflows/issue-bot.yaml b/.github/workflows/issue-bot.yaml
deleted file mode 100644
index 2a3edfa577c9..000000000000
--- a/.github/workflows/issue-bot.yaml
+++ /dev/null
@@ -1,15 +0,0 @@
-name: Issue bot
-
-on:
- issues:
- types: [closed, assigned, unassigned, labeled]
-
-jobs:
- remove_label:
- runs-on: ubuntu-latest
- steps:
- - name: Remove status labels on close
- if: github.event.action == 'closed'
- run: gh issue edit --repo prefecthq/prefect ${{ github.event.issue.number }} --remove-label "needs:triage" --remove-label "needs:attention"
- env:
- GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
diff --git a/.github/workflows/markdown-tests.yaml b/.github/workflows/markdown-tests.yaml
index c7f7b1c672bd..bdeb596300df 100644
--- a/.github/workflows/markdown-tests.yaml
+++ b/.github/workflows/markdown-tests.yaml
@@ -95,6 +95,7 @@ jobs:
python -m pip install -U uv
uv pip install --upgrade --system -e '.[dev]'
uv pip install --upgrade --system -r requirements-markdown-tests.txt
+ uv pip uninstall --system pytest-benchmark
- name: Start server
run: |
diff --git a/.github/workflows/nightly-release.yaml b/.github/workflows/nightly-release.yaml
index 1140d547251d..b2dbe112991d 100644
--- a/.github/workflows/nightly-release.yaml
+++ b/.github/workflows/nightly-release.yaml
@@ -19,7 +19,15 @@ jobs:
- name: Get latest tag
id: get_latest_tag
run: |
- latest_tag=$(git tag -l | grep -E '^[0-9]+\.[0-9]+\.[0-9]+(\.dev[0-9]+)?$' | grep -Ev 'rc|alpha|beta|post' | sort -V | tail -n1)
+ # Retrieve the latest tag by:
+ # 1. Listing all tags
+ # 2. Filtering for tags matching the pattern `[0-9]+\.[0-9]+\.[0-9]+(\.dev[0-9]+)?$`
+ # 3. Filtering out tags containing `rc`, `alpha`, `beta`, or `post`
+ # 4. Replacing `.dev` with `~dev` for sorting purposes
+ # 5. Sorting the tags in version order
+ # 6. Replacing `~dev` with `.dev`
+ # 7. Taking the last tag
+ latest_tag=$(git tag -l | grep -E '^[0-9]+\.[0-9]+\.[0-9]+(\.dev[0-9]+)?$' | grep -Ev 'rc|alpha|beta|post' | sed 's/\.dev/~dev/' | sort --version-sort | sed 's/~dev/.dev/' | tail -n1)
if [ -z "$latest_tag" ]; then
echo "No matching tags found."
exit 1
diff --git a/.github/workflows/python-tests.yaml b/.github/workflows/python-tests.yaml
index 7cd3e4500161..26d344a44641 100644
--- a/.github/workflows/python-tests.yaml
+++ b/.github/workflows/python-tests.yaml
@@ -159,6 +159,11 @@ jobs:
echo "COVERAGE_FILE=${COVERAGE_FILE}" >> $GITHUB_ENV
echo "artifact_name=coverage-data-${sanitized_test_type}-${{ matrix.python-version }}-${sanitized_database}" >> $GITHUB_OUTPUT
+ - name: Set coverage core
+ if: ${{ matrix.python-version == '3.12' }}
+ run: |
+ echo "COVERAGE_CORE=sysmon" >> $GITHUB_ENV
+
- name: Run tests
run: |
echo "Using COVERAGE_FILE=$COVERAGE_FILE"
@@ -344,6 +349,11 @@ jobs:
echo "COVERAGE_FILE=${COVERAGE_FILE}" >> $GITHUB_ENV
echo "artifact_name=coverage-data-docker-${{ matrix.python-version }}-${sanitized_database}" >> $GITHUB_OUTPUT
+ - name: Set coverage core
+ if: ${{ matrix.python-version == '3.12' }}
+ run: |
+ echo "COVERAGE_CORE=sysmon" >> $GITHUB_ENV
+
- name: Run tests
run: |
echo "Using COVERAGE_FILE=$COVERAGE_FILE"
diff --git a/.github/workflows/test-windows-unc.yaml b/.github/workflows/test-windows-unc.yaml
new file mode 100644
index 000000000000..3def94b4ec23
--- /dev/null
+++ b/.github/workflows/test-windows-unc.yaml
@@ -0,0 +1,53 @@
+name: Test Windows UNC Paths
+
+on:
+ workflow_dispatch: # Allow manual triggering
+ pull_request:
+ paths:
+ - "src/prefect/utilities/filesystem.py"
+ - "scripts/test_unc_paths.py"
+ - ".github/workflows/test-windows-unc.yaml"
+ - "requirements.txt"
+ - "requirements-client.txt"
+
+jobs:
+ test-unc-paths:
+ runs-on: windows-latest
+
+ steps:
+ - uses: actions/checkout@v4
+ with:
+ persist-credentials: false
+ fetch-depth: 0
+
+ - name: Set up Python
+ uses: actions/setup-python@v5
+ with:
+ python-version: "3.12"
+
+ - name: Install dependencies
+ run: |
+ python -m pip install -U pip
+ pip install -e .
+
+ - name: Create test UNC path and run flow
+ shell: pwsh
+ run: |
+ # Create a test directory
+ New-Item -ItemType Directory -Path "C:\ShareTest" -Force
+
+ # Create network share
+ New-SmbShare -Name "PrefectTest" -Path "C:\ShareTest" -FullAccess "Everyone"
+
+ # Run the test script from the current directory
+ # This will create and test flows in the UNC path
+ python scripts/test_unc_paths.py
+
+ env:
+ PYTHONPATH: ${{ github.workspace }}
+
+ - name: Cleanup
+ if: always()
+ shell: pwsh
+ run: |
+ Remove-SmbShare -Name "PrefectTest" -Force
diff --git a/.github/workflows/ui-v2-checks.yml b/.github/workflows/ui-v2-checks.yml
new file mode 100644
index 000000000000..8805bc6ec653
--- /dev/null
+++ b/.github/workflows/ui-v2-checks.yml
@@ -0,0 +1,60 @@
+name: UI v2 Checks
+
+on:
+ pull_request:
+ paths:
+ - .github/workflows/ui-v2-checks.yml
+ - ui-v2/**
+ - .nvmrc
+ push:
+ branches:
+ - main
+
+permissions:
+ contents: read
+
+# Limit concurrency by workflow/branch combination.
+#
+# For pull request builds, pushing additional changes to the
+# branch will cancel prior in-progress and pending builds.
+#
+# For builds triggered on a branch push, additional changes
+# will wait for prior builds to complete before starting.
+#
+# https://docs.github.com/en/actions/using-jobs/using-concurrency
+concurrency:
+ group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
+ cancel-in-progress: ${{ github.event_name == 'pull_request' }}
+
+jobs:
+ build-ui:
+ name: Build ui
+ runs-on: ubuntu-latest
+
+ steps:
+ - uses: actions/checkout@v4
+
+ - uses: actions/setup-node@v4
+ with:
+ node-version-file: ".nvmrc"
+ cache-dependency-path: "**/package-lock.json"
+
+ - name: Install UI dependencies
+ working-directory: ./ui-v2
+ run: npm ci install
+
+ - name: Check formatting
+ working-directory: ./ui-v2
+ run: npm run format:check
+
+ - name: Lint
+ working-directory: ./ui-v2
+ run: npm run lint
+
+ - name: Build UI
+ working-directory: ./ui-v2
+ run: npm run build
+
+ - name: Run tests
+ working-directory: ./ui-v2
+ run: npm run test
diff --git a/.gitignore b/.gitignore
index 3cd4730f4f31..127e8e8c02a0 100644
--- a/.gitignore
+++ b/.gitignore
@@ -65,6 +65,7 @@ dask-worker-space/
!ui/.vscode/
# Prefect files
+prefect.toml
prefect.yaml
# Deployment recipes
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index 0aa3831a6c5c..30a626c49756 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -45,3 +45,31 @@ repos:
src/prefect/server/events/.*|
scripts/generate_mintlify_openapi_docs.py
)$
+
+ - repo: local
+ hooks:
+ - id: generate-settings-schema
+ name: Generating Settings Schema
+ language: system
+ entry: uv run --with 'pydantic>=2.9.0' ./scripts/generate_settings_schema.py
+ pass_filenames: false
+ files: |
+ (?x)^(
+ .pre-commit-config.yaml|
+ src/prefect/settings/models/.*|
+ scripts/generate_settings_schema.py
+ )$
+
+ - repo: local
+ hooks:
+ - id: generate-settings-ref
+ name: Generating Settings Reference
+ language: system
+ entry: uv run --with 'pydantic>=2.9.0' ./scripts/generate_settings_ref.py
+ pass_filenames: false
+ files: |
+ (?x)^(
+ .pre-commit-config.yaml|
+ src/prefect/settings/models/.*|
+ scripts/generate_settings_ref.py
+ )$
diff --git a/README.md b/README.md
index 2a3940e13ee8..697b574d2889 100644
--- a/README.md
+++ b/README.md
@@ -12,8 +12,6 @@
-
-
diff --git a/benches/bench_flows.py b/benches/bench_flows.py
index 9494fc44a9c9..83c196d183d2 100644
--- a/benches/bench_flows.py
+++ b/benches/bench_flows.py
@@ -2,12 +2,16 @@
TODO: Add benches for higher number of tasks; blocked by engine deadlocks in CI.
"""
+from typing import TYPE_CHECKING
+
import anyio
import pytest
-from pytest_benchmark.fixture import BenchmarkFixture
from prefect import flow, task
+if TYPE_CHECKING:
+ from pytest_benchmark.fixture import BenchmarkFixture
+
def noop_function():
pass
@@ -17,12 +21,12 @@ async def anoop_function():
pass
-def bench_flow_decorator(benchmark: BenchmarkFixture):
+def bench_flow_decorator(benchmark: "BenchmarkFixture"):
benchmark(flow, noop_function)
@pytest.mark.parametrize("options", [{}, {"timeout_seconds": 10}])
-def bench_flow_call(benchmark: BenchmarkFixture, options):
+def bench_flow_call(benchmark: "BenchmarkFixture", options):
noop_flow = flow(**options)(noop_function)
benchmark(noop_flow)
@@ -35,7 +39,7 @@ def bench_flow_call(benchmark: BenchmarkFixture, options):
@pytest.mark.parametrize("num_tasks", [10, 50, 100])
-def bench_flow_with_submitted_tasks(benchmark: BenchmarkFixture, num_tasks: int):
+def bench_flow_with_submitted_tasks(benchmark: "BenchmarkFixture", num_tasks: int):
test_task = task(noop_function)
@flow
@@ -47,7 +51,7 @@ def benchmark_flow():
@pytest.mark.parametrize("num_tasks", [10, 50, 100, 250])
-def bench_flow_with_called_tasks(benchmark: BenchmarkFixture, num_tasks: int):
+def bench_flow_with_called_tasks(benchmark: "BenchmarkFixture", num_tasks: int):
test_task = task(noop_function)
@flow
@@ -62,7 +66,7 @@ def benchmark_flow():
@pytest.mark.parametrize("num_tasks", [10, 50, 100, 250])
-def bench_async_flow_with_async_tasks(benchmark: BenchmarkFixture, num_tasks: int):
+def bench_async_flow_with_async_tasks(benchmark: "BenchmarkFixture", num_tasks: int):
test_task = task(anoop_function)
@flow
@@ -78,7 +82,7 @@ async def benchmark_flow():
@pytest.mark.parametrize("num_flows", [5, 10, 20])
-def bench_flow_with_subflows(benchmark: BenchmarkFixture, num_flows: int):
+def bench_flow_with_subflows(benchmark: "BenchmarkFixture", num_flows: int):
test_flow = flow(noop_function)
@flow
@@ -91,7 +95,7 @@ def benchmark_flow():
@pytest.mark.parametrize("num_flows", [5, 10, 20])
def bench_async_flow_with_sequential_subflows(
- benchmark: BenchmarkFixture, num_flows: int
+ benchmark: "BenchmarkFixture", num_flows: int
):
test_flow = flow(anoop_function)
@@ -105,7 +109,7 @@ async def benchmark_flow():
@pytest.mark.parametrize("num_flows", [5, 10, 20])
def bench_async_flow_with_concurrent_subflows(
- benchmark: BenchmarkFixture, num_flows: int
+ benchmark: "BenchmarkFixture", num_flows: int
):
test_flow = flow(anoop_function)
diff --git a/benches/bench_import.py b/benches/bench_import.py
index 0398ec523ce0..9d3142fc8729 100644
--- a/benches/bench_import.py
+++ b/benches/bench_import.py
@@ -1,9 +1,12 @@
import importlib
import sys
+from typing import TYPE_CHECKING
import pytest
from prometheus_client import REGISTRY
-from pytest_benchmark.fixture import BenchmarkFixture
+
+if TYPE_CHECKING:
+ from pytest_benchmark.fixture import BenchmarkFixture
def reset_imports():
@@ -21,7 +24,7 @@ def reset_imports():
@pytest.mark.benchmark(group="imports")
-def bench_import_prefect(benchmark: BenchmarkFixture):
+def bench_import_prefect(benchmark: "BenchmarkFixture"):
def import_prefect():
reset_imports()
@@ -32,7 +35,7 @@ def import_prefect():
@pytest.mark.timeout(180)
@pytest.mark.benchmark(group="imports")
-def bench_import_prefect_flow(benchmark: BenchmarkFixture):
+def bench_import_prefect_flow(benchmark: "BenchmarkFixture"):
def import_prefect_flow():
reset_imports()
diff --git a/benches/bench_tasks.py b/benches/bench_tasks.py
index e5317beeecda..88c151a6d584 100644
--- a/benches/bench_tasks.py
+++ b/benches/bench_tasks.py
@@ -1,4 +1,7 @@
-from pytest_benchmark.fixture import BenchmarkFixture
+from typing import TYPE_CHECKING
+
+if TYPE_CHECKING:
+ from pytest_benchmark.fixture import BenchmarkFixture
from prefect import flow, task
@@ -7,11 +10,11 @@ def noop_function():
pass
-def bench_task_decorator(benchmark: BenchmarkFixture):
+def bench_task_decorator(benchmark: "BenchmarkFixture"):
benchmark(task, noop_function)
-def bench_task_call(benchmark: BenchmarkFixture):
+def bench_task_call(benchmark: "BenchmarkFixture"):
noop_task = task(noop_function)
@flow
@@ -21,7 +24,7 @@ def benchmark_flow():
benchmark_flow()
-def bench_task_submit(benchmark: BenchmarkFixture):
+def bench_task_submit(benchmark: "BenchmarkFixture"):
noop_task = task(noop_function)
# The benchmark occurs within the flow to measure _submission_ time without
diff --git a/benches/conftest.py b/benches/conftest.py
deleted file mode 100644
index a8f8313c0128..000000000000
--- a/benches/conftest.py
+++ /dev/null
@@ -1,22 +0,0 @@
-import traceback
-
-import pytest
-import pytest_benchmark.plugin
-
-_handle_saving = pytest_benchmark.session.BenchmarkSession.handle_saving
-
-
-@pytest.hookimpl(hookwrapper=True)
-def handle_saving(*args, **kwargs):
- """
- Patches pytest-benchmark's save handler to avoid raising exceptions on failure.
- An upstream bug causes failures to generate the benchmark JSON when tests fail.
- """
- try:
- return _handle_saving(*args, **kwargs)
- except Exception:
- print("Failed to save benchmark results:")
- traceback.print_exc()
-
-
-pytest_benchmark.session.BenchmarkSession.handle_saving = handle_saving
diff --git a/compat-tests b/compat-tests
index 3c5ec0111e2a..9b5fc44426b6 160000
--- a/compat-tests
+++ b/compat-tests
@@ -1 +1 @@
-Subproject commit 3c5ec0111e2aa7b160f2b21cfd383d19448dfe13
+Subproject commit 9b5fc44426b6a98a05408106fd6b5453ae9a0c76
diff --git a/docs/3.0/api-ref/index.mdx b/docs/3.0/api-ref/index.mdx
deleted file mode 100644
index 683883188a6e..000000000000
--- a/docs/3.0/api-ref/index.mdx
+++ /dev/null
@@ -1,12 +0,0 @@
----
-title: API & SDK References
-sidebarTitle: Overview
-description: Explore Prefect's auto-generated API & SDK reference documentation.
----
-
-Prefect auto-generates reference documentation for the following components:
-
-- **[Prefect Python SDK](https://prefect-python-sdk-docs.netlify.app/)**: used to build, test, and execute workflows.
-- **[Prefect REST API](/3.0/api-ref/rest-api)**: used by workflow clients and the Prefect UI for orchestration and data retrieval.
- - Prefect Cloud REST API documentation: https://app.prefect.cloud/api/docs .
- - Self-hosted Prefect server [REST API documentation](/3.0/api-ref/rest-api/server/). Additionally, if self-hosting a Prefect server instance, you can access REST API documentation at the `/docs` endpoint of your [`PREFECT_API_URL`](/3.0/manage/settings-and-profiles/). For example, if you run `prefect server start` with no additional configuration you can find this reference at http://localhost:4200/docs .
diff --git a/docs/3.0/automate/index.mdx b/docs/3.0/automate/index.mdx
deleted file mode 100644
index 3d5f0704d6c7..000000000000
--- a/docs/3.0/automate/index.mdx
+++ /dev/null
@@ -1,14 +0,0 @@
----
-title: Automate overview
-sidebarTitle: Overview
-description: Learn how to automate workflows with Prefect.
----
-
-This **Automate** section explains how create workflows that run automatically and respond to events.
-
-- [Schedule flow runs](/3.0/automate/add-schedules/) explains how to schedule flow runs for specific times or intervals.
-- [Track activity through events](/3.0/automate/events/events/) discusses how to observe events that create a record of activity.
-- [Trigger actions on events](/3.0/automate/events/automations-triggers/) shows how to use events to trigger actions.
-- [Define custom event triggers](/3.0/automate/events/custom-triggers/) discusses advanced trigger options.
-- [Receive events with webhooks](/3.0/automate/events/webhook-triggers/) explains how to use webhooks to receive events from external systems with Prefect Cloud.
-- [Manage incidents](/3.0/automate/incidents/) show how Prefect Cloud can help identify, resolve, and document issues in mission-critical workflows.
diff --git a/docs/3.0/develop/index.mdx b/docs/3.0/develop/index.mdx
deleted file mode 100644
index 12717ded5db4..000000000000
--- a/docs/3.0/develop/index.mdx
+++ /dev/null
@@ -1,38 +0,0 @@
----
-title: Develop overview
-sidebarTitle: Overview
-description: Learn how to write code with Prefect objects for workflows you can trust.
----
-
-These pages explain how to develop Prefect workflows.
-
-The **Write and run workflows** section introduces and explains Prefect's core concepts.
-
-- [Write and run flows](/3.0/develop/write-flows/) introduces the most central Prefect object, the `flow`.
-- [Write and run tasks](/3.0/develop/write-tasks/) shows you how to create `task` decorated functions that represent a discrete unit of work in a Prefect workflow.
-- [Run tasks in the background](/3.0/develop/task-runners/) explains how to use tasks to execute small, discrete units of work quickly.
-- [Log run activity](/3.0/develop/logging/) shows you how to capture fine-grained information about flows and tasks for monitoring, troubleshooting, and auditing.
-
-The **Configure runtime behavior** section introduces advanced features for interacting with running workflows and automatically taking actions based on workflow state.
-
-- [Configure task caching](/3.0/develop/task-caching/) explores how to cache task results to save money and time.
-- [Write transactions](/3.0/develop/transactions/) shows how to group tasks together to ensure they run atomically.
-- [Pause and resume flow runs](/3.0/develop/pause-resume/) explains how to halt flow runs for input.
-- [Send and receive flow run inputs](/3.0/develop/inputs/) highlights how to run Human-in-the-Loop workflows with Prefect.
-- [Access runtime context](/3.0/develop/runtime-context/) shows how to access information about the current run.
-
-The **Manage state and configuration** section explores how to manage data and configuration in Prefect.
-
-- [Manage results](/3.0/develop/results/) explains how to store and retrieve task results.
-- [Manage states](/3.0/develop/manage-states/) discusses how to take action based on task and flow run states.
-- [Create run artifacts](/3.0/develop/artifacts/) demonstrates how to create artifacts for human consumption in the UI.
-- [Set and get variables](/3.0/develop/variables/) shows how to store and retrieve configuration values from API.
-- [Connect to external systems](/3.0/develop/blocks/) discusses how Prefect blocks can help you connect to external systems.
-
-The **Manage concurrency** section explains how to speed up your workflows and limit activities that can take place concurrently.
-
-- [Run tasks concurrently or in parallel](/3.0/develop/task-runners/) explains how to run tasks concurrently or in parallel with Dask or Ray.
-- [Limit concurrent task runs](/3.0/develop/task-run-limits/) shows how to prevent too many tasks from running simultaneously.
-- [Apply concurrency and rate limits](/3.0/develop/global-concurrency-limits/) demonstrates how to control concurrency and apply rate limits using Prefect's provided utilities.
-
-Finally, [Test workflows](/3.0/develop/test-workflows/) discusses tools for testing workflows.
diff --git a/docs/3.0/get-started/quickstart.mdx b/docs/3.0/get-started/quickstart.mdx
deleted file mode 100644
index a248585231eb..000000000000
--- a/docs/3.0/get-started/quickstart.mdx
+++ /dev/null
@@ -1,243 +0,0 @@
----
-title: Quickstart
-description: Get started with Prefect, the easiest way to orchestrate and observe your data pipelines
----
-
-import Installation from '/snippets/installation.mdx'
-
-Prefect is an orchestration and observability platform that empowers developers to build and scale workflows quickly.
-In this quickstart, you will use Prefect to convert the following Python script to a schedulable, observable, resilient, and deployable workflow in minutes:
-
-```python
-import httpx
-
-def get_repo_info():
- """Fetch statistics about the Prefect repository"""
- url = "https://api.github.com/repos/PrefectHQ/prefect"
- response = httpx.get(url)
- repo = response.json()
- print("PrefectHQ/prefect repository statistics 🤓:")
- print(f"Stars 🌠 : {repo['stargazers_count']}")
-
-if __name__ == "__main__":
- get_repo_info()
-```
-
-## Install Prefect
-
-
-
-
-See [Install Prefect](/3.0/get-started/install/) for more details on installation.
-
-
-## Connect to a Prefect API
-
-Connect to a Prefect API:
-
-
-
-1. Start a local API server:
-
- ```bash
- prefect server start
- ```
-
-1. Open the Prefect dashboard in your browser at [http://localhost:4200](http://localhost:4200).
-
-
-1. Head to [https://app.prefect.cloud/](https://app.prefect.cloud/) and sign in or create a forever-free Prefect Cloud account.
-1. Log in to Prefect Cloud from your development environment:
-
- ```bash
- prefect cloud login
- ```
-
-1. Choose **Log in with a web browser** and click the **Authorize** button in the browser window that opens.
-
-Your CLI is now authenticated with your Prefect Cloud account through a locally stored API key that expires in 30 days.
-
-If you have any issues with browser-based authentication, you can [authenticate with a manually created API key](/3.0/manage/cloud/manage-users/api-keys/) instead.
-
-
-
-## Convert your script to a Prefect workflow
-
-The easiest way to convert a Python script into a workflow is to add a `@flow` decorator to the script's entrypoint.
-This will create a corresponding [flow](/3.0/develop/write-flows/).
-
-Adding `@task` decorators to any functions called by the flow converts them to [tasks](/3.0/develop/write-tasks/).
-Tasks receive metadata about upstream dependencies and the state of those dependencies before they run.
-Prefect will then record these dependencies and states as it orchestrates these tasks.
-
-```python my_gh_workflow.py
-import httpx # an HTTP client library and dependency of Prefect
-from prefect import flow, task
-
-@task(retries=2)
-def get_repo_info(repo_owner: str, repo_name: str):
- """Get info about a repo - will retry twice after failing"""
- url = f"https://api.github.com/repos/{repo_owner}/{repo_name}"
- api_response = httpx.get(url)
- api_response.raise_for_status()
- repo_info = api_response.json()
- return repo_info
-
-@task
-def get_contributors(repo_info: dict):
- """Get contributors for a repo"""
- contributors_url = repo_info["contributors_url"]
- response = httpx.get(contributors_url)
- response.raise_for_status()
- contributors = response.json()
- return contributors
-
-@flow(log_prints=True)
-def log_repo_info(repo_owner: str = "PrefectHQ", repo_name: str = "prefect"):
- """
- Given a GitHub repository, logs the number of stargazers
- and contributors for that repo.
- """
- repo_info = get_repo_info(repo_owner, repo_name)
- print(f"Stars 🌠 : {repo_info['stargazers_count']}")
-
- contributors = get_contributors(repo_info)
- print(f"Number of contributors 👷: {len(contributors)}")
-
-if __name__ == "__main__":
- log_repo_info()
-```
-
-
-The `log_prints=True` argument provided to the `@flow` decorator automatically converts any `print` statements within the function to `INFO` level logs.
-
-
-## Run your flow
-
-You can run your Prefect flow just as you would a Python script:
-
-```bash
-python my_gh_workflow.py
-```
-
-Prefect automatically tracks the state of the flow run and logs the output, which can be viewed directly in the terminal or in the UI.
-
-## Create a work pool
-
-Running a flow locally is a good start, but most use cases require a remote execution environment.
-A [work pool](/3.0/deploy/infrastructure-concepts/work-pools/) is the most common interface for deploying flows to remote infrastructure.
-
-
-
-
-Deploy your flow to a self-hosted Prefect server instance using a `Process` work pool.
-All flow runs submitted to this work pool will run in a local subprocess (the mechanics are similar for other work pool types that run on remote infrastructure).
-
-1. Create a `Process` work pool:
-
- ```bash
- prefect work-pool create --type process my-work-pool
- ```
-
-1. Verify that the work pool exists:
-
- ```bash
- prefect work-pool ls
- ```
-
-1. Start a worker to poll the work pool:
-
- ```bash
- prefect worker start --pool my-work-pool
- ```
-
-
-
-Deploy your flow to Prefect Cloud using a managed work pool.
-
-1. Create a [managed work pool](/3.0/deploy/infrastructure-concepts/work-pools):
-
- ```bash
- prefect work-pool create my-work-pool --type prefect:managed
- ```
-
-1. View your new work pool on the **Work Pools** page of the UI.
-
-
-
-
-You can also choose from other [work pool types](https://docs.prefect.io/concepts/work-pools/#worker-types).
-
-
-## Deploy and schedule your flow
-
-A [deployment](/3.0/deploy/infrastructure-examples/docker/) is used to determine when, where, and how a flow should run.
-Deployments elevate flows to remotely configurable entities that have their own API.
-
-1. Create a deployment in code:
-
- ```bash create_deployment.py
- from prefect import flow
-
- # Source for the code to deploy (here, a GitHub repo)
- SOURCE_REPO="https://github.com/prefecthq/demos.git"
-
- if __name__ == "__main__":
- flow.from_source(
- source=SOURCE_REPO,
- entrypoint="my_gh_workflow.py:repo_info", # Specific flow to run
- ).deploy(
- name="my-first-deployment",
- work_pool_name="my-work-pool", # Work pool target
- cron="0 1 * * *", # Cron schedule (1am every day)
- )
- ```
-
-
- You can store your flow code in nearly any location as long as Prefect can access it.
- See [Where to store your flow code](/3.0/deploy/infrastructure-concepts/store-flow-code) for more details.
-
-
-1. Run the script to create the deployment:
-
- ```bash
- python create_deployment.py
- ```
-
- Check the logs to ensure your deployment was created:
-
- ```bash
- Successfully created/updated all deployments!
- ______________________________________________________
- | Deployments |
- ______________________________________________________
- | Name | Status | Details |
- ______________________________________________________
- | repo-info/my-first-deployment | applied | |
- ______________________________________________________
- ```
-
-1. Schedule a run for the deployment:
-
- ```bash
- prefect deployment run 'repo-info/my-first-deployment'
- ```
-
- Soon you should see the flow run graph and logs on the **Flow Run** page in the UI.
- Logs are also streamed to the terminal.
-
- ![Flow run graph and logs](/3.0/img/ui/qs-flow-run.png)
-
-## Next steps
-
-You've seen how to move from a Python script to a scheduled, observable, remotely orchestrated workflow with Prefect.
-Now consider reading:
-
-* [Write flows](/3.0/develop/write-flows)
-* [Write tasks](/3.0/develop/write-tasks)
-* [Cloud and server](/3.0/manage)
-* [Manage infrastructure with work pools](/3.0/deploy/infrastructure-concepts/work-pools) to learn about running workflows on Kubernetes, Docker, and serverless infrastructure.
-
-
-Need help? [Book a meeting](https://calendly.com/prefect-experts/prefect-product-advocates?utm_campaign=prefect_docs_cloud&utm_content=prefect_docs&utm_medium=docs&utm_source=docs) with a Prefect Product Advocate to get your questions answered.
-
diff --git a/docs/3.0/manage/cloud/index.mdx b/docs/3.0/manage/cloud/index.mdx
deleted file mode 100644
index 9dbd12aca6c1..000000000000
--- a/docs/3.0/manage/cloud/index.mdx
+++ /dev/null
@@ -1,47 +0,0 @@
----
-title: Prefect Cloud overview
-description: Observe and orchestrate workflow applications with the Prefect Cloud platform.
----
-
-Prefect Cloud is a hosted workflow application platform with all the capabilities of the open source Prefect server plus additional features.
-
-- [Connect to Prefect Cloud](/3.0/manage/cloud/connect-to-cloud/) shows how to configure a local execution environment to access Prefect Cloud.
-
-The pages that follow provide detailed information about the features and capabilities of Prefect Cloud:
-
-- [Manage workspaces](/3.0/manage/cloud/workspaces/) shows how to use isolated environments for organizing activity.
-
-The **Manage accounts** subsection covers aspects of Prefect Cloud accounts:
-- [Manage user accounts](/3.0/manage/cloud/manage-users/) explains how to use Prefect Cloud's personal accounts.
-- [Manage service accounts](/3.0/manage/cloud/manage-users/service-accounts/) describes how to configure API access for running workers or executing flow runs on remote infrastructure through API keys that are not associated with a user account.
-- [Manage teams](/3.0/manage/cloud/manage-users/manage-teams/) demonstrates how groups of users can be managed together.
-- [Manage account roles](/3.0/manage/cloud/manage-users/manage-roles/) shows how to use role-based access controls (RBAC): granular permissions to perform certain activities within an account or a workspace. Enterprise plans allow for custom roles with specific permissions.
-- [Manage API keys](/3.0/manage/cloud/manage-users/api-keys/) explains how to grant access to Prefect Cloud from a local execution environment.
-- [Configure single sign-on](/3.0/manage/cloud/manage-users/configure-sso/) describes how (SSO) authentication integration is supported through identity providers with OIDC and SAML. Directory Sync and SCIM provisioning is also available.
-- [Audit Cloud activity](/3.0/manage/cloud/manage-users/audit-logs/) shows how Prefect provides a record of user activities to monitor for security and compliance.
-- [Object access control lists](/3.0/manage/cloud/manage-users/object-access-control-lists/) (ACLs) allow privileged users to restrict deployment and block access to individual users.
-- [Secure access by IP](/3.0/manage/cloud/manage-users/secure-access-by-ip-address/) shows how to restrict access to Prefect Cloud by IP address.
-
-The remaining Prefect Cloud pages in this section cover various aspects of Prefect Cloud:
-
-- [Rate limits and data retention](/3.0/manage/cloud/rate-limits/) discusses rate limits and data retention for Prefect Cloud.
-- [Terraform provider](https://registry.terraform.io/providers/PrefectHQ/prefect/latest/docs/guides/getting-started) links to the Prefect Cloud Terraform provider docs for infrastructure as code.
-- [Troubleshoot Prefect Cloud](/3.0/manage/cloud/troubleshoot-cloud/) contains common solutions for troubleshooting Prefect Cloud.
-
-Pages relevant to Prefect Cloud found elsewhere in the documentation include:
-
-- [Push work pools](/3.0/deploy/infrastructure-examples/serverless/) allow you to run flows on your serverless infrastructure without running a worker.
-- [Managed work pools](/3.0/deploy/infrastructure-examples/managed/) allow you to run flows on Prefect's infrastructure without running a worker.
-- [Webhooks](/3.0/automate/events/webhook-triggers/) .
-- [Incidents](/3.0/automate/incidents/) are a way to track and manage issues that arise during flow runs.
-
-Error summaries (enabled by Marvin AI) distill the error logs of `Failed` and `Crashed` flow runs into actionable information. To enable this feature visit the **Settings** page for your account.
-
-The [Prefect Cloud REST API](/3.0/api-ref/rest-api/) is used to send and receive data for orchestration and monitoring between Prefect clients and Prefect Cloud.
-Prefect interactive Cloud REST API documentation is available at https://app.prefect.cloud/api/docs .
-
-
-## Try Prefect Cloud
-
-To try Prefect Cloud, create a free account at [app.prefect.cloud](https://app.prefect.cloud/).
-Follow the steps in the UI to create and run your first workflow in Prefect Cloud.
diff --git a/docs/3.0/manage/index.mdx b/docs/3.0/manage/index.mdx
deleted file mode 100644
index b728e24bd2f7..000000000000
--- a/docs/3.0/manage/index.mdx
+++ /dev/null
@@ -1,13 +0,0 @@
----
-title: Cloud and server overview
-sidebarTitle: Overview
-description: Learn how to interact with Prefect Cloud or self-host a Prefect server instance.
----
-
-The **Use Prefect Cloud** section covers how to interact with Prefect Cloud.
-See the [Prefect Cloud overview](/3.0/manage/cloud/) for a discussion of the pages in this section.
-
-
-- [Host Prefect server](/3.0/manage/self-host/) explains how to self-host Prefect server.
-- [Configure settings and profiles](/3.0/manage/self-host/) shows how to configure API interactions through environment variables or Prefect profiles.
-- [Manage run metadata in Python](/3.0/manage/self-host/) demonstrates how to interact with the API in Python through the `PrefectClient`.
\ No newline at end of file
diff --git a/docs/3.0/manage/settings-and-profiles.mdx b/docs/3.0/manage/settings-and-profiles.mdx
deleted file mode 100644
index 027e1e1c45ec..000000000000
--- a/docs/3.0/manage/settings-and-profiles.mdx
+++ /dev/null
@@ -1,175 +0,0 @@
----
-title: Configure settings and profiles
-description: Prefect settings let you customize your workflow environment, including your Cloud and self-hosted server preferences.
----
-
-Many of Prefect's features—especially interactions with the API—require configuration. You can configure settings through the following methods:
-
-- **Environment Variables**: All Prefect settings can be set using environment variables prefixed with `PREFECT_`. Environment variables are useful for temporarily overriding settings or configuring the runtime environment of a workflow. They take precedence over profile settings, making them ideal for adjustments that should only apply to a single session or process. These can also be set in a `.env` file, which will be automatically applied when using `prefect` in that directory.
-
-- **Profiles**: Prefect profiles store sets of settings locally on your machine. When you activate a profile, its settings are applied, allowing you to switch easily between different configurations. For example, you might use one profile for a self-hosted Prefect server and another for Prefect Cloud.
-
-To view all available settings and their active values from the command line, run:
-
-```bash
-prefect config view --show-defaults
-```
-
-These settings are type-validated and you may verify your setup at any time with:
-
-```bash
-prefect config validate
-```
-
-## Manage profiles
-
-Prefect profiles are persisted groups of settings on your local machine.
-By default these settings are stored in a [TOML](https://toml.io/en/) file located at `~/.prefect/profiles.toml`.
-This location can be configured by setting `PREFECT_PROFILES_PATH`.
-
-One and only one profile can be active at any time.
-
-Immediately after installation, the `ephemeral` profile will be used, which only has 1 setting configured:
-```bash
-» docker run -it prefecthq/prefect:3-latest
- ___ ___ ___ ___ ___ ___ _____
- | _ \ _ \ __| __| __/ __|_ _|
- | _/ / _|| _|| _| (__ | |
- |_| |_|_\___|_| |___\___| |_|
-
-
-root@e56e34ab8934:/opt/prefect $ prefect config view
-PREFECT_PROFILE='ephemeral'
-PREFECT_SERVER_ALLOW_EPHEMERAL_MODE='True' (from profile)
-```
-
-
-**What is `PREFECT_SERVER_ALLOW_EPHEMERAL_MODE`?**
-
-This setting allows a Prefect server to be run ephemerally as needed without explicitly starting a server process.
-
-
-The `prefect profile` CLI commands enable you to create, review, and manage profiles:
-
-| Command | Description |
-| --- | --- |
-| `create` | Create a new profile; use the `--from` flag to copy settings from another profile. |
-| `delete` | Delete the given profile. |
-| `inspect` | Display settings from a given profile; defaults to active. |
-| `ls` | List all profile names. |
-| `rename` | Change the name of a profile. |
-| `use` | Switch the active profile. |
-| `populate-defaults` | Populate your `profiles.toml` file with opinionated stock profiles. |
-
-... or you may edit your `profiles.toml` file directly:
-```bash
-vim ~/.prefect/profiles.toml
-```
-
-### Configure settings for the active profile
-
-The `prefect config` CLI commands enable you to manage the settings within the currently active profile.
-
-| Command | Description |
-| --- | --- |
-| set | Change the value for a setting. |
-| unset | Restore the default value for a setting. |
-| view | Display the current settings. |
-
-For example, the following CLI commands set configuration in the `ephemeral` profile and then create a new
-profile with new settings:
-
-```bash
-prefect profile use ephemeral
-prefect config set PREFECT_API_URL=http://127.0.0.1:4200/api
-
-prefect profile create new-profile --from ephemeral
-prefect profile use new-profile
-prefect config set PREFECT_RESULTS_PERSIST_BY_DEFAULT=true PREFECT_LOGGING_LEVEL="ERROR"
-
-prefect profile inspect
-prefect config unset PREFECT_LOGGING_LEVEL -y
-```
-
-### Use environment variables
-
-All settings can be overridden by setting the environment variable directly.
-
-#### Override a setting for a single command
-For example, we can temporarily set the logging level through an environment variable so that it
-only lasts for the duration of the command:
-
-```bash
-PREFECT_LOGGING_LEVEL="CRITICAL" prefect config view --show-sources
-```
-
-#### Override settings in a `.env` file
-
-You can also set environment variables in a `.env` file, which will be automatically applied when using `prefect` in that directory.
-
-```bash
-echo 'PREFECT_LOGGING_LEVEL="CRITICAL"' > .env
-prefect config view --show-sources
-```
-
-
-**Environment variables always take precedence**
-
-Environment variables always take precedence over values stored within a profile.
-This allows you to configure certain runtime behavior for your workflows by setting the appropriate
-environment variable on the job or process executing the workflow.
-
-
-## Commonly configured settings
-
-This section describes some commonly configured settings.
-
-### Prefect Cloud
-
-- **`PREFECT_API_KEY`**: this setting specifies the
-[API key](/3.0/manage/cloud/manage-users/api-keys/) used to authenticate with Prefect Cloud.
-- **`PREFECT_API_URL`**: this setting specifies the API endpoint of your
-Prefect Cloud workspace or a self-hosted Prefect server instance.
-
-
-**Use `prefect cloud login` to set these values for Prefect Cloud**
-
-To set the `PREFECT_API_URL` and `PREFECT_API_KEY` for your active profile, run `prefect cloud login`.
-Read more about [managing API keys](/3.0/manage/cloud/manage-users/api-keys/).
-
-
-### Prefect server
-
-- **`PREFECT_HOME`**: the `PREFECT_HOME` value specifies the local Prefect directory for configuration files,
-profiles, and the location of the default Prefect SQLite database.
-- **`PREFECT_API_DATABASE_CONNECTION_URL`**: the database connection URL for a self-hosted Prefect server instance.
-Must be provided in a SQLAlchemy-compatible format. Prefect currently supports SQLite and Postgres.
-
-## Security settings
-
-### Host the UI behind a reverse proxy
-
-When using a reverse proxy (such as [Nginx](https://nginx.org) or [Traefik](https://traefik.io)) to proxy traffic to a
- hosted Prefect UI instance, you must also configure the self-hosted Prefect server instance to connect to the API.
-The PREFECT_UI_API_URL should be set to the external proxy URL.
-
-For example, if your external URL is `https://prefect-server.example.com` then set
-`PREFECT_UI_API_URL=https://prefect-server.example.com/api` for the Prefect server process.
-You can also set `PREFECT_API_URL` to the API URL.
-
-This setting is a fallback if `PREFECT_UI_API_URL` is not set.
-
-### CSRF protection settings
-
-If using self-hosted Prefect server, you can configure CSRF protection settings.
-
-- **`PREFECT_SERVER_CSRF_PROTECTION_ENABLED`**: activates CSRF protection on the server,
-requiring valid CSRF tokens for applicable requests. Recommended for production to prevent CSRF attacks.
-Defaults to `False`.
-- **`PREFECT_SERVER_CSRF_TOKEN_EXPIRATION`**: sets the expiration duration for server-issued CSRF tokens,
-influencing how often tokens need to be refreshed. The default is 1 hour.
-- **`PREFECT_CLIENT_CSRF_SUPPORT_ENABLED`**: enables or disables CSRF token handling in the Prefect client.
-When enabled, the client manages CSRF tokens for state-changing API requests. Defaults to `True`.
-
-By default clients expect that CSRF protection is enabled on the server. If you are running a server without CSRF protection,
-you can disable CSRF support in the client.
diff --git a/docs/conftest.py b/docs/conftest.py
index 4db4c192cdac..e1b8ee3e771b 100644
--- a/docs/conftest.py
+++ b/docs/conftest.py
@@ -7,17 +7,17 @@
from prefect.server.database.orm_models import Mapped, Run, mapped_column, sa
SKIP_FILES = {
- "docs/3.0/deploy/index.mdx": "Needs database fixtures",
- "docs/3.0/deploy/run-flows-in-local-processes.mdx": "Needs blocks setup",
- "docs/3.0/develop/blocks.mdx": "Block subclasses defined in docs cannot be properly registered due to test environment limitations",
- "docs/3.0/develop/manage-states.mdx": "Needs some extra import help",
- "docs/3.0/develop/results.mdx": "Needs block cleanup handling",
- "docs/3.0/develop/task-caching.mdx": "Tasks defined in docs cannot be properly inspected due to test environment limitations",
- "docs/3.0/develop/task-run-limits.mdx": "Await outside of async function",
- "docs/3.0/develop/task-runners.mdx": "Tasks defined in docs cannot be properly inspected due to test environment limitations",
- "docs/3.0/develop/write-tasks.mdx": "Tasks defined in docs cannot be properly inspected due to test environment limitations",
- "docs/3.0/manage/interact-with-api.mdx": "Async function outside of async context",
- "docs/3.0/resources/big-data.mdx": "Needs block cleanup handling",
+ "docs/v3/deploy/index.mdx": "Needs database fixtures",
+ "docs/v3/deploy/run-flows-in-local-processes.mdx": "Needs blocks setup",
+ "docs/v3/develop/blocks.mdx": "Block subclasses defined in docs cannot be properly registered due to test environment limitations",
+ "docs/v3/develop/manage-states.mdx": "Needs some extra import help",
+ "docs/v3/develop/results.mdx": "Needs block cleanup handling",
+ "docs/v3/develop/task-caching.mdx": "Tasks defined in docs cannot be properly inspected due to test environment limitations",
+ "docs/v3/develop/task-run-limits.mdx": "Await outside of async function",
+ "docs/v3/develop/task-runners.mdx": "Tasks defined in docs cannot be properly inspected due to test environment limitations",
+ "docs/v3/develop/write-tasks.mdx": "Tasks defined in docs cannot be properly inspected due to test environment limitations",
+ "docs/v3/manage/interact-with-api.mdx": "Async function outside of async context",
+ "docs/v3/resources/big-data.mdx": "Needs block cleanup handling",
"docs/contribute/dev-contribute.mdx": "SQLAlchemy model modifications can't be safely tested without affecting the global database schema",
"docs/integrations/prefect-azure/index.mdx": "Makes live network calls which should be mocked",
"docs/integrations/prefect-bitbucket/index.mdx": "Needs block cleanup handling",
@@ -35,18 +35,18 @@
"docs/integrations/prefect-sqlalchemy/index.mdx": "Needs block cleanup handling",
"docs/integrations/use-integrations.mdx": "Pydantic failures",
# --- Below this line are files that haven't been debugged yet. ---
- "docs/3.0/resources/upgrade-agents-to-workers.mdx": "Needs Debugging",
- "docs/3.0/resources/upgrade-to-prefect-3.mdx": "Needs Debugging",
+ "docs/v3/resources/upgrade-agents-to-workers.mdx": "Needs Debugging",
+ "docs/v3/resources/upgrade-to-prefect-3.mdx": "Needs Debugging",
"docs/contribute/develop-a-new-worker-type.mdx": "Needs Debugging",
"docs/integrations/prefect-aws/index.mdx": "Needs Debugging",
"docs/integrations/prefect-shell/index.mdx": "Needs Debugging",
# --- Below this line are files that pass locally but fail in CI ---
- "docs/3.0/api-ref/rest-api/index.mdx": "Needs Debugging in CI",
- "docs/3.0/automate/add-schedules.mdx": "Needs Debugging in CI",
- "docs/3.0/develop/deferred-tasks.mdx": "Needs Debugging in CI",
- "docs/3.0/develop/logging.mdx": "Needs Debugging in CI",
- "docs/3.0/develop/variables.mdx": "Needs Debugging in CI",
- "docs/3.0/develop/write-flows.mdx": "Needs Debugging in CI",
+ "docs/v3/api-ref/rest-api/index.mdx": "Needs Debugging in CI",
+ "docs/v3/automate/add-schedules.mdx": "Needs Debugging in CI",
+ "docs/v3/develop/deferred-tasks.mdx": "Needs Debugging in CI",
+ "docs/v3/develop/logging.mdx": "Needs Debugging in CI",
+ "docs/v3/develop/variables.mdx": "Needs Debugging in CI",
+ "docs/v3/develop/write-flows.mdx": "Needs Debugging in CI",
}
diff --git a/docs/contribute/dev-contribute.mdx b/docs/contribute/dev-contribute.mdx
index 4c5dd7398b85..d4219ea703f8 100644
--- a/docs/contribute/dev-contribute.mdx
+++ b/docs/contribute/dev-contribute.mdx
@@ -131,7 +131,7 @@ Next, generate new migration files.
Generate a new migration file for each database type.
Migrations are generated for whichever database type `PREFECT_API_DATABASE_CONNECTION_URL` is set to.
-See [how to set the database connection URL](/3.0/manage/self-host#database-settings) for each database type.
+See [how to set the database connection URL](/v3/manage/self-host#database-settings) for each database type.
To generate a new migration file, run:
diff --git a/docs/contribute/develop-a-new-worker-type.mdx b/docs/contribute/develop-a-new-worker-type.mdx
index fff71115e80d..8e4726afb685 100644
--- a/docs/contribute/develop-a-new-worker-type.mdx
+++ b/docs/contribute/develop-a-new-worker-type.mdx
@@ -9,7 +9,7 @@ Prefect workers set up execution infrastructure and start flow runs.
**Advanced topic**
Extending the Prefect framework by developing a worker requires deep knowledge of
-Prefect. For standard needs, we recommend using one of the [available worker types](/3.0/deploy/infrastructure-concepts/workers/#worker-types).
+Prefect. For standard needs, we recommend using one of the [available worker types](/v3/deploy/infrastructure-concepts/workers/#worker-types).
This guide walks you through creating a custom worker that can run your flows on your chosen infrastructure.
@@ -23,7 +23,7 @@ The worker uses this configuration to create the execution environment and start
**How are the configuration values populated?**
-The work pool that a worker polls for flow runs has a [base job template](/3.0/deploy/infrastructure-concepts/work-pools/#base-job-template)
+The work pool that a worker polls for flow runs has a [base job template](/v3/deploy/infrastructure-concepts/work-pools/#base-job-template)
associated with it.
The template is the contract for how configuration values populate for each flow run.
diff --git a/docs/3.0/img/guides/aci-worker-acridentity.png b/docs/images/integrations/aci-worker-acridentity.png
similarity index 100%
rename from docs/3.0/img/guides/aci-worker-acridentity.png
rename to docs/images/integrations/aci-worker-acridentity.png
diff --git a/docs/3.0/img/guides/aci-worker-identities.png b/docs/images/integrations/aci-worker-identities.png
similarity index 100%
rename from docs/3.0/img/guides/aci-worker-identities.png
rename to docs/images/integrations/aci-worker-identities.png
diff --git a/docs/3.0/img/guides/aci-worker-subscription.png b/docs/images/integrations/aci-worker-subscription.png
similarity index 100%
rename from docs/3.0/img/guides/aci-worker-subscription.png
rename to docs/images/integrations/aci-worker-subscription.png
diff --git a/docs/images/prefect-dbt-summary-artifact.png b/docs/images/prefect-dbt-summary-artifact.png
new file mode 100644
index 000000000000..bcab86389f39
Binary files /dev/null and b/docs/images/prefect-dbt-summary-artifact.png differ
diff --git a/docs/integrations/prefect-aws/ecs_guide.mdx b/docs/integrations/prefect-aws/ecs_guide.mdx
index 7644b6b90c86..1fdad1539c97 100644
--- a/docs/integrations/prefect-aws/ecs_guide.mdx
+++ b/docs/integrations/prefect-aws/ecs_guide.mdx
@@ -13,55 +13,48 @@ ECS (Elastic Container Service) tasks are a good option for executing Prefect fl
## ECS flow run execution
-Prefect enables remote flow execution via workers and work pools. To learn more about these concepts please see our [deployment docs](/latest/deploy/infrastructure-concepts/work-pools/).
+Prefect enables remote flow execution via workers and work pools. To learn more about these concepts please see our [deployment docs](/v3/deploy/infrastructure-concepts/work-pools/).
For details on how workers and work pools are implemented for ECS, see the diagram below.
```mermaid
%%{
init: {
- 'theme': 'base',
+ 'theme': 'neutral',
'themeVariables': {
- 'primaryColor': '#2D6DF6',
- 'primaryTextColor': '#fff',
- 'lineColor': '#FE5A14',
- 'secondaryColor': '#E04BF0',
- 'tertiaryColor': '#fff'
+ 'margin': '10px'
}
}
}%%
-graph TB
-
- subgraph ecs_cluster[ECS cluster]
- subgraph ecs_service[ECS service]
- td_worker[Worker task definition] --> |defines| prefect_worker((Prefect worker))
- end
- prefect_worker -->|kicks off| ecs_task
- fr_task_definition[Flow run task definition]
+flowchart TB
+ subgraph ecs_cluster[ECS Cluster]
+ subgraph ecs_service[ECS Service]
+ td_worker[Worker Task Definition] --> |defines| prefect_worker[Prefect Worker]
+ end
- subgraph ecs_task["ECS task execution"]
- style ecs_task text-align:center,display:flex
+ prefect_worker -->|kicks off| ecs_task
+
+ fr_task_definition[Flow Run Task Definition]
+ subgraph ecs_task[ECS Task Execution]
+ flow_run((Flow Run))
+ end
- flow_run((Flow run))
+ fr_task_definition -->|defines| ecs_task
+ end
+ subgraph prefect_cloud[Prefect Cloud]
+ work_pool[ECS Work Pool]
end
- fr_task_definition -->|defines| ecs_task
- end
- subgraph prefect_cloud[Prefect Cloud]
- subgraph prefect_workpool[ECS work pool]
- workqueue[Work queue]
+ subgraph github[ECR]
+ flow_code["Flow Code"]
end
- end
-
- subgraph github["ECR"]
- flow_code{{"Flow code"}}
- end
- flow_code --> |pulls| ecs_task
- prefect_worker -->|polls| workqueue
- prefect_workpool -->|configures| fr_task_definition
+
+ flow_code --> |pulls| ecs_task
+ prefect_worker -->|polls| work_pool
+ work_pool -->|configures| fr_task_definition
```
## ECS and Prefect
@@ -69,7 +62,7 @@ graph TB
**ECS tasks != Prefect tasks**
-An ECS task is **not** the same thing as a [Prefect task](/3.0/develop/write-tasks).
+An ECS task is **not** the same thing as a [Prefect task](/v3/develop/write-tasks).
ECS tasks are groupings of containers that run within an ECS Cluster. An ECS task's behavior is determined by its task definition.
@@ -104,7 +97,7 @@ If you prefer infrastructure as code check out this [Terraform module](https://r
## Step 1: Set up an ECS work pool
-Before setting up the worker, create a [work pool](/latest/deploy/infrastructure-concepts/work-pools/) of type ECS for the worker to pull work from. If doing so from the CLI, be sure to authenticate with Prefect Cloud or run a local Prefect server instance.
+Before setting up the worker, create a [work pool](/v3/deploy/infrastructure-concepts/work-pools/) of type ECS for the worker to pull work from. If doing so from the CLI, be sure to authenticate with Prefect Cloud or run a local Prefect server instance.
Create a work pool from the CLI:
@@ -217,7 +210,7 @@ Next, create an ECS task definition that specifies the Docker image for the Pref
- Use `prefect config view` to view the `PREFECT_API_URL` for your current Prefect profile. Use this to replace ``.
-- For the `PREFECT_API_KEY`, if you are on a paid plan you can create a [service account](https://docs.prefect.io/latest/cloud/manage-users/service-accounts/) for the worker. If your are on a free plan, you can pass a user's API key.
+- For the `PREFECT_API_KEY`, if you are on a paid plan you can create a [service account](/v3/manage/cloud/manage-users/service-accounts) for the worker. If your are on a free plan, you can pass a user's API key.
- Replace both instances of `` with the ARN of the IAM role you created in Step 2. You can grab this by running:
```
@@ -358,4 +351,4 @@ prefect deploy my_flow.py:my_ecs_deployment
- Do your flow runs require higher `CPU`?
- Would an EC2 `Launch Type` speed up your flow run execution?
- These infrastructure configuration values can be set on your ECS work pool or they can be overridden on the deployment level through [job_variables](/latest/deploy/infrastructure-concepts/customize/) if desired.
+ These infrastructure configuration values can be set on your ECS work pool or they can be overridden on the deployment level through [job_variables](/v3/deploy/infrastructure-concepts/customize/) if desired.
diff --git a/docs/integrations/prefect-aws/index.mdx b/docs/integrations/prefect-aws/index.mdx
index cf34281718d9..5cc29eaca9df 100644
--- a/docs/integrations/prefect-aws/index.mdx
+++ b/docs/integrations/prefect-aws/index.mdx
@@ -42,7 +42,7 @@ Run flows on [AWS Elastic Container Service (ECS)](https://aws.amazon.com/ecs/)
See the [ECS work pool docs](/integrations/prefect-aws/ecs_guide) for a walkthrough of using ECS in a hybrid work pool.
-If you're using Prefect Cloud, [ECS push work pools](/latest/deploy/infrastructure-examples/serverless) provide all the benefits of ECS with a quick setup and no worker needed.
+If you're using Prefect Cloud, [ECS push work pools](/v3/deploy/infrastructure-examples/serverless) provide all the benefits of ECS with a quick setup and no worker needed.
In the examples below, you create blocks with Python code.
Alternatively, each block can be created through the Prefect UI.
@@ -74,7 +74,7 @@ Prefect is using the Boto3 library under the hood.
To find credentials for authentication, any data not provided to the block are sourced at runtime in the order shown in the [Boto3 docs](https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html#configuring-credentials).
Prefect creates the session object using the values in the block and then, any missing values follow the sequence in the Boto3 docs.
-See an example of using the `AwsCredentials` block with [AWS Secrets Manager](#aws-secrets-manager) with third-party services without storing credentials in the block itself in the [docs](/latest/resources/secrets).
+See an example of using the `AwsCredentials` block with [AWS Secrets Manager](#aws-secrets-manager) with third-party services without storing credentials in the block itself in the [docs](/v3/resources/secrets).
Here's how to load the saved credentials:
diff --git a/docs/integrations/prefect-azure/aci_worker.mdx b/docs/integrations/prefect-azure/aci_worker.mdx
index 79144e088f74..a98fdf6bc929 100644
--- a/docs/integrations/prefect-azure/aci_worker.mdx
+++ b/docs/integrations/prefect-azure/aci_worker.mdx
@@ -146,7 +146,7 @@ This will be your `IDENTITY_ID`. You can get it from your terminal by running `e
["/subscriptions//resourcegroups//providers/Microsoft.ManagedIdentity/userAssignedIdentities/"]
```
-![Configuring an ACI work pool's identities.](/latest/img/guides/aci-worker-identities.png)
+![Configuring an ACI work pool's identities.](/images/integrations/aci-worker-identities.png)
### ACRManagedIdentity
@@ -161,7 +161,7 @@ ACRManagedIdentity is required for your flow code containers to be pulled from A
.azurecr.io
```
-![Configuring an ACI work pool's ACR Managed Identity.](/latest/img/guides/aci-worker-acridentity.png)
+![Configuring an ACI work pool's ACR Managed Identity.](/images/integrations/aci-worker-acridentity.png)
### Subscription ID and resource group name
@@ -171,7 +171,7 @@ Both the subscription ID and resource group name can be found in the `RG_SCOPE`
/subscriptions//resourceGroups/
```
-![Configuring an ACI work pool.](/latest/img/guides/aci-worker-subscription.png)
+![Configuring an ACI work pool.](/images/integrations/aci-worker-subscription.png)
Then click Save.
diff --git a/docs/integrations/prefect-azure/index.mdx b/docs/integrations/prefect-azure/index.mdx
index a4dd244638b0..8e0c35349546 100644
--- a/docs/integrations/prefect-azure/index.mdx
+++ b/docs/integrations/prefect-azure/index.mdx
@@ -84,7 +84,7 @@ Run flows on [Azure Container Instances (ACI)](https://learn.microsoft.com/en-us
See the [Azure Container Instances Worker Guide](/integrations/prefect-azure/aci_worker/) for a walkthrough of using ACI in a hybrid work pool.
-If you're using Prefect Cloud, [ACI push work pools](/latest/deploy/infrastructure-examples/serverless/#__tabbed_1_2) provide all the benefits of ACI with a quick setup and no worker needed.
+If you're using Prefect Cloud, [ACI push work pools](/v3/deploy/infrastructure-examples/serverless/#__tabbed_1_2) provide all the benefits of ACI with a quick setup and no worker needed.
## Resources
diff --git a/docs/integrations/prefect-bitbucket/index.mdx b/docs/integrations/prefect-bitbucket/index.mdx
index 8e2f68b03f76..aed2975cf005 100644
--- a/docs/integrations/prefect-bitbucket/index.mdx
+++ b/docs/integrations/prefect-bitbucket/index.mdx
@@ -45,7 +45,7 @@ To create a deployment and run a deployment where the flow code is stored in a p
A deployment can use flow code stored in a Bitbucket repository without using this library in either of the following cases:
- The repository is public
-- The deployment uses a [Secret block](/latest/resources/secrets) to store the token
+- The deployment uses a [Secret block](/v3/resources/secrets) to store the token
Create a Bitbucket Credentials block:
diff --git a/docs/integrations/prefect-dask/index.mdx b/docs/integrations/prefect-dask/index.mdx
index c736325b18c3..a414a9378e0a 100644
--- a/docs/integrations/prefect-dask/index.mdx
+++ b/docs/integrations/prefect-dask/index.mdx
@@ -134,7 +134,7 @@ Using the `DaskTaskRunner` reduced the runtime to **5.7** seconds!
## Run tasks on Dask
-The `DaskTaskRunner` is a [task runner](/latest/develop/task-runners) that submits tasks to the [`dask.distributed`](http://distributed.dask.org/) scheduler.
+The `DaskTaskRunner` is a [task runner](/v3/develop/task-runners) that submits tasks to the [`dask.distributed`](http://distributed.dask.org/) scheduler.
By default, when the `DaskTaskRunner` is specified for a flow run, a temporary Dask cluster is created and used for the duration of the flow run.
If you already have a Dask cluster running, either cloud-hosted or local, you can provide the connection URL with the `address` kwarg.
diff --git a/docs/integrations/prefect-dbt/index.mdx b/docs/integrations/prefect-dbt/index.mdx
index e72f1b7f505e..f74ed43d7784 100644
--- a/docs/integrations/prefect-dbt/index.mdx
+++ b/docs/integrations/prefect-dbt/index.mdx
@@ -2,17 +2,17 @@
title: prefect-dbt
---
-With `prefect-dbt`, you can trigger and observe dbt Cloud jobs, execute dbt Core CLI commands, and incorporate other tools, such as [Snowflake](integrations/prefect-snowflake/index), into your dbt runs.
+With `prefect-dbt`, you can trigger and observe dbt Cloud jobs, execute dbt Core CLI commands, and incorporate other tools, such as [Snowflake](/integrations/prefect-snowflake/index), into your dbt runs.
Prefect provides a global view of the state of your workflows and allows you to take action based on state changes.
-Prefect integrations may provide pre-built [blocks](/latest/develop/blocks), [flows](/latest/develop/flows), or [tasks](/latest/develop/tasks) for interacting with external systems.
+Prefect integrations may provide pre-built [blocks](/v3/develop/blocks), [flows](/v3/develop/write-flows), or [tasks](/v3/develop/write-tasks) for interacting with external systems.
Block types in this library allow you to do things such as run a dbt Cloud job or execute a dbt Core command.
## Getting started
### Prerequisites
-- A [dbt Cloud account](https://cloud.getdbt.com/signup) if using dbt Cloud.
+- A [dbt Cloud account](https://cloud.getdbt.com/) if using dbt Cloud.
### Install `prefect-dbt`
@@ -31,12 +31,6 @@ pip install -U "prefect[dbt]"
If necessary, see [additional installation options for dbt Core with BigQuery, Snowflake, and Postgres](#additional-installation-options).
-To install with all additional capabilities, use the following command:
-
-
-```bash
-pip install -U "prefect-dbt[all_extras]"
-```
### Register newly installed blocks types
@@ -48,42 +42,20 @@ prefect block register -m prefect_dbt
## dbt Cloud
-If you have an existing dbt Cloud job, use the pre-built flow `run_dbt_cloud_job` to trigger a job run and wait until the job run is finished.
-
-If some nodes fail, `run_dbt_cloud_job` efficiently retries the unsuccessful nodes.
-
-Prior to running this flow, [save your dbt Cloud credentials to a DbtCloudCredentials block](#save-dbt-cloud-credentials-to-a-block):
-
-```python
-from prefect import flow
-from prefect_dbt.cloud import DbtCloudJob
-from prefect_dbt.cloud.jobs import run_dbt_cloud_job
-
+If you have an existing dbt Cloud job, use the pre-built flow `run_dbt_cloud_job` to trigger a job run and wait until the job run is finished. If some nodes fail, `run_dbt_cloud_job` can efficiently retry the unsuccessful nodes. Prior to running this flow, save your dbt Cloud credentials to a DbtCloudCredentials block and create a dbt Cloud Job block:
-@flow
-def run_dbt_job_flow():
- result = run_dbt_cloud_job(
- dbt_cloud_job=DbtCloudJob.load("my-block-name"),
- targeted_retries=5,
- )
- return result
-
-run_dbt_job_flow()
-```
### Save dbt Cloud credentials to a block
-Blocks can be [created through code](/latest/develop/blocks) or through the UI.
+Blocks can be [created through code](/v3/develop/blocks) or through the UI.
To create a dbt Cloud Credentials block:
-1. Go to your [dbt Cloud profile](https://cloud.getdbt.com/settings/profile).
-2. Log in to your dbt Cloud account.
-3. Scroll to **API** or click **API Access** on the sidebar.
-4. Copy the API Key.
-5. Click **Projects** on the sidebar.
-6. Copy the account ID from the URL: `https://cloud.getdbt.com/settings/accounts/`.
-7. Create and run the following script, replacing the placeholders:
+1. Log into your [dbt Cloud account](https://cloud.getdbt.com/settings/profile).
+2. Click **API Tokens** on the sidebar.
+3. Copy a Service Token.
+4. Copy the account ID from the URL: `https://cloud.getdbt.com/settings/accounts/`.
+5. Create and run the following script, replacing the placeholders:
```python
from prefect_dbt.cloud import DbtCloudCredentials
@@ -95,13 +67,12 @@ DbtCloudCredentials(
).save("CREDENTIALS-BLOCK-NAME-PLACEHOLDER")
```
-Then, create a dbt Cloud job block:
+### Create a dbt Cloud job block
-1. Navigate to your [dbt home page](https://cloud.getdbt.com/).
-2. On the top nav bar, click on **Deploy** -> **Jobs**.
-3. Select a job.
-4. Copy the job ID from the URL: `https://cloud.getdbt.com/deploy//projects//jobs/`
-5. Create and run the following script, replacing the placeholders.
+1. In dbt Cloud, click on **Deploy** -> **Jobs**.
+2. Select a job.
+3. Copy the job ID from the URL: `https://cloud.getdbt.com/deploy//projects//jobs/`
+4. Create and run the following script, replacing the placeholders.
```python
from prefect_dbt.cloud import DbtCloudCredentials, DbtCloudJob
@@ -113,20 +84,30 @@ dbt_cloud_job = DbtCloudJob(
job_id="JOB-ID-PLACEHOLDER"
).save("JOB-BLOCK-NAME-PLACEHOLDER")
```
-
-Load the saved block, which can access your credentials:
+### Run a dbt Cloud job and wait for completion
```python
+from prefect import flow
from prefect_dbt.cloud import DbtCloudJob
+from prefect_dbt.cloud.jobs import run_dbt_cloud_job
+import asyncio
+@flow
+async def run_dbt_job_flow():
+ result = await run_dbt_cloud_job(
+ dbt_cloud_job = await DbtCloudJob.load("JOB-BLOCK-NAME-PLACEHOLDER"),
+ targeted_retries = 0,
+ )
+ return await result
-DbtCloudJob.load("JOB-BLOCK-NAME-PLACEHOLDER")
+if __name__ == "__main__":
+ asyncio.run(run_dbt_job_flow())
```
## dbt Core
-Prefect-dbt supports execution of dbt Core CLI commands.
-If you don't have a `DbtCoreOperation` block saved, create one and set the commands that you want to run.
+`prefect-dbt` supports a couple of ways to run dbt Core commands.
+A `DbtCoreOperation` block will run the commands as shell commands, while other tasks use dbt's [Programmatic Invocation](#programmatic-invocation).
Optionally, specify the `project_dir`.
If `profiles_dir` is not set, the `DBT_PROFILES_DIR` environment variable will be used.
@@ -190,43 +171,9 @@ profile:
```
-### Programmatic Invocation
-
-`prefect-dbt` has some pre-built tasks that use dbt's [programmatic invocation](https://docs.getdbt.com/reference/programmatic-invocations).
-For example:
-
-```python
-from prefect import flow
-from prefect_dbt.cli.tasks import from prefect import flow
-from prefect_dbt.cli.commands import trigger_dbt_cli_command, dbt_build_task
-
-
-@flow
-def dbt_build_flow():
- trigger_dbt_cli_command(
- command="dbt deps", project_dir="/Users/test/my_dbt_project_dir",
- )
- dbt_build_task(
- project_dir="/Users/test/my_dbt_project_dir",
- create_summary_artifact: bool = True,
- summary_artifact_key: str = "dbt-build-task-summary",
- extra_command_args=["--model", "foo_model"]
- )
-
-
-if __name__ == "__main__":
- dbt_build_flow()
-```
-
-See the [SDK docs](https://prefect-python-sdk-docs.netlify.app/prefect_dbt/) for other pre-built tasks.
-
-### Create a summary artifact
+### Create a new `profiles.yml` file with blocks
-These pre-built tasks can also create artifacts. These artifacts have extra information about dbt Core runs, such as messages and compiled code for nodes that fail or have errors.
-
-### Create a new profile with blocks
-
-Use a DbtCliProfile block to create `profiles.yml`.
+If you don't have a `profiles.yml` file, you can use a DbtCliProfile block to create `profiles.yml`.
Then, specify `profiles_dir` where `profiles.yml` will be written.
Here's example code with placeholders:
@@ -266,7 +213,44 @@ Visit the SDK reference in the side navigation to see other built-in `TargetConf
If the desired service profile is not available, you can build one from the generic `TargetConfigs` class.
-#### BigQuery profile example
+### Programmatic Invocation
+
+`prefect-dbt` has some pre-built tasks that use dbt's [programmatic invocation](https://docs.getdbt.com/reference/programmatic-invocations).
+
+For example:
+
+```python
+from prefect import flow
+from prefect_dbt.cli.tasks import from prefect import flow
+from prefect_dbt.cli.commands import trigger_dbt_cli_command, dbt_build_task
+
+
+@flow
+def dbt_build_flow():
+ trigger_dbt_cli_command(
+ command="dbt deps", project_dir="/Users/test/my_dbt_project_dir",
+ )
+ dbt_build_task(
+ project_dir = "/Users/test/my_dbt_project_dir",
+ create_summary_artifact = True,
+ summary_artifact_key = "dbt-build-task-summary",
+ extra_command_args=["--select", "foo_model"]
+ )
+
+
+if __name__ == "__main__":
+ dbt_build_flow()
+```
+
+See the [SDK docs](https://prefect-python-sdk-docs.netlify.app/prefect_dbt/) for other pre-built tasks.
+
+#### Create a summary artifact
+
+These pre-built tasks can also create artifacts. These artifacts have extra information about dbt Core runs, such as messages and compiled code for nodes that fail or have errors.
+
+![prefect-dbt Summary Artifact](/images/prefect-dbt-summary-artifact.png)
+
+### BigQuery CLI profile block example
To create dbt Core target config and profile blocks for BigQuery:
1. Save and load a `GcpCredentials` block.
@@ -354,3 +338,9 @@ pip install "prefect-dbt[bigquery]"
```bash
pip install "prefect-dbt[postgres]"
```
+
+Or, install all of the extras.
+
+```bash
+pip install -U "prefect-dbt[all_extras]"
+```
\ No newline at end of file
diff --git a/docs/integrations/prefect-docker/index.mdx b/docs/integrations/prefect-docker/index.mdx
index 33738d64ec4b..5491a4308c15 100644
--- a/docs/integrations/prefect-docker/index.mdx
+++ b/docs/integrations/prefect-docker/index.mdx
@@ -25,7 +25,7 @@ Upgrade to the latest versions of `prefect` and `prefect-docker`:
pip install -U "prefect[docker]"
```
### Examples
-See the Prefect [Workers docs](/latest/deploy/infrastructure-examples/docker) to learn how to create and run deployments that use Docker.
+See the Prefect [Workers docs](/v3/deploy/infrastructure-examples/docker) to learn how to create and run deployments that use Docker.
## Resources
diff --git a/docs/integrations/prefect-gcp/gcp-worker-guide.mdx b/docs/integrations/prefect-gcp/gcp-worker-guide.mdx
index 0d6667b7bb11..e4351b6be6c4 100644
--- a/docs/integrations/prefect-gcp/gcp-worker-guide.mdx
+++ b/docs/integrations/prefect-gcp/gcp-worker-guide.mdx
@@ -315,6 +315,6 @@ Congratulations on completing this guide! Looking back on our journey, you have:
4. Deployed a flow
5. Executed a flow
-For next steps, take a look at some of the other [work pools](/latest/deploy/infrastructure-examples/serverless) Prefect has to offer.
+For next steps, take a look at some of the other [work pools](/v3/deploy/infrastructure-examples/serverless) Prefect has to offer.
The world is your oyster 🦪✨.
diff --git a/docs/integrations/prefect-gcp/index.mdx b/docs/integrations/prefect-gcp/index.mdx
index fc938fe0e28c..915923016a0a 100644
--- a/docs/integrations/prefect-gcp/index.mdx
+++ b/docs/integrations/prefect-gcp/index.mdx
@@ -81,7 +81,7 @@ If `service_account_file` is used, the provided path *must be available* in the
Alternatively, GCP can authenticate without storing credentials in a block.
-See the [Third-party Secrets docs](/latest/resources/secrets) for an analogous example that uses AWS Secrets Manager and Snowflake.
+See the [Third-party Secrets docs](/v3/resources/secrets) for an analogous example that uses AWS Secrets Manager and Snowflake.
## Run flows on Google Cloud Run or Vertex AI
@@ -89,7 +89,7 @@ Run flows on [Google Cloud Run](https://cloud.google.com/run) or [Vertex AI](htt
See the [Google Cloud Run Worker Guide](integrations/prefect-gcp/gcp-worker-guide/) for a walkthrough of using Google Cloud Run to run workflows with a hybrid work pool.
-If you're using Prefect Cloud, [Google Cloud Run push work pools](/latest/deploy/infrastructure-examples/serverless) provide all the benefits of Google Cloud Run along with a quick setup and no worker needed.
+If you're using Prefect Cloud, [Google Cloud Run push work pools](/v3/deploy/infrastructure-examples/serverless) provide all the benefits of Google Cloud Run along with a quick setup and no worker needed.
### Use Prefect with Google BigQuery
diff --git a/docs/integrations/prefect-kubernetes/index.mdx b/docs/integrations/prefect-kubernetes/index.mdx
index 135baddb27b2..08f3325da56f 100644
--- a/docs/integrations/prefect-kubernetes/index.mdx
+++ b/docs/integrations/prefect-kubernetes/index.mdx
@@ -4,7 +4,7 @@ title: prefect-kubernetes
`prefect-kubernetes` contains Prefect tasks, flows, and blocks enabling orchestration, observation and management of Kubernetes resources.
-This library is most commonly used for installation with a Kubernetes worker. See the [Prefect docs on deploying with Kubernetes](/3.0/deploy/infrastructure-examples/kubernetes) to learn how to create and run deployments in Kubernetes.
+This library is most commonly used for installation with a Kubernetes worker. See the [Prefect docs on deploying with Kubernetes](/v3/deploy/infrastructure-examples/kubernetes) to learn how to create and run deployments in Kubernetes.
Prefect provides a Helm chart for deploying a worker, a self-hosted Prefect server instance, and other resources to a Kubernetes cluster. See the [Prefect Helm chart](https://github.com/PrefectHQ/prefect-helm) for more information.
diff --git a/docs/integrations/prefect-snowflake/index.mdx b/docs/integrations/prefect-snowflake/index.mdx
index 180575c5cd8d..0abf7fd26f25 100644
--- a/docs/integrations/prefect-snowflake/index.mdx
+++ b/docs/integrations/prefect-snowflake/index.mdx
@@ -10,7 +10,7 @@ The prefect-snowflake collection makes it easy to connect to a Snowflake databas
### Prerequisites
-- [Prefect installed](/latest/get-started/install/).
+- [Prefect installed](/v3/get-started/install/).
### Install prefect-snowflake
diff --git a/docs/integrations/use-integrations.mdx b/docs/integrations/use-integrations.mdx
index 4d8cb4dcbe2e..a21f8f98f119 100644
--- a/docs/integrations/use-integrations.mdx
+++ b/docs/integrations/use-integrations.mdx
@@ -24,7 +24,7 @@ See [the `setup.py`](https://github.com/PrefectHQ/prefect/blob/main/setup.py) fo
## Register blocks from an integration
-Once the packages is installed, [register the blocks](/latest/develop/blocks/#registering-blocks-for-use-in-the-prefect-ui) within the integration to view them in the Prefect Cloud UI:
+Once the packages is installed, [register the blocks](/v3/develop/blocks/#registering-blocks-for-use-in-the-prefect-ui) within the integration to view them in the Prefect Cloud UI:
For example, to register the blocks available in `prefect-aws`:
@@ -32,7 +32,7 @@ For example, to register the blocks available in `prefect-aws`:
prefect block register -m prefect_aws
```
-To use a block's `load` method, you must have a block [saved](/latest/develop/blocks/#saving-blocks). [Learn more about blocks](/latest/develop/blocks).
+To use a block's `load` method, you must have a block [saved](/v3/develop/blocks/#saving-blocks). [Learn more about blocks](/v3/develop/blocks).
## Use tasks and flows from an Integration
diff --git a/docs/mint.json b/docs/mint.json
index 4e78a804ab5c..b20b003993fa 100644
--- a/docs/mint.json
+++ b/docs/mint.json
@@ -52,163 +52,185 @@
{
"group": "Get started",
"pages": [
- "3.0/get-started/index",
- "3.0/resources/whats-new-prefect-3",
- "3.0/get-started/install",
- "3.0/get-started/quickstart"
+ "v3/get-started/index",
+ "v3/resources/whats-new-prefect-3",
+ "v3/get-started/install",
+ "v3/get-started/quickstart"
],
- "version": "3.0"
+ "version": "v3"
+ },
+ {
+ "group": "Tutorials",
+ "pages": [
+ "v3/tutorials/schedule",
+ "v3/tutorials/pipelines",
+ "v3/tutorials/scraping"
+ ],
+ "version": "v3"
},
{
"group": "Get started",
"pages": [
- "2.x/get-started/index"
+ "v2/get-started/index"
],
- "version": "2.x"
+ "version": "v2"
},
{
"group": "Develop",
"pages": [
- "3.0/develop/index",
+ "v3/develop/index",
{
"group": "Write and run workflows",
"pages": [
- "3.0/develop/write-flows",
- "3.0/develop/write-tasks",
- "3.0/develop/deferred-tasks"
+ "v3/develop/write-flows",
+ "v3/develop/write-tasks",
+ "v3/develop/deferred-tasks"
],
- "version": "3.0"
+ "version": "v3"
},
{
"group": "Configure runtime behavior",
"pages": [
- "3.0/develop/logging",
- "3.0/develop/task-caching",
- "3.0/develop/transactions",
- "3.0/develop/pause-resume",
- "3.0/develop/inputs",
- "3.0/develop/runtime-context"
+ "v3/develop/logging",
+ "v3/develop/task-caching",
+ "v3/develop/transactions",
+ "v3/develop/pause-resume",
+ "v3/develop/inputs",
+ "v3/develop/runtime-context"
],
- "version": "3.0"
+ "version": "v3"
},
{
"group": "Manage state and configuration",
"pages": [
- "3.0/develop/manage-states",
- "3.0/develop/results",
- "3.0/develop/artifacts",
- "3.0/develop/variables",
- "3.0/develop/blocks"
+ "v3/develop/manage-states",
+ "v3/develop/results",
+ "v3/develop/artifacts",
+ "v3/develop/variables",
+ "v3/develop/blocks"
],
- "version": "3.0"
+ "version": "v3"
},
{
"group": "Manage concurrency",
"pages": [
- "3.0/develop/task-runners",
- "3.0/develop/task-run-limits",
- "3.0/develop/global-concurrency-limits"
+ "v3/develop/task-runners",
+ "v3/develop/task-run-limits",
+ "v3/develop/global-concurrency-limits"
],
- "version": "3.0"
+ "version": "v3"
+ },
+ {
+ "group": "Manage settings",
+ "pages": [
+ "v3/develop/settings-and-profiles",
+ "v3/develop/settings-ref"
+ ]
},
- "3.0/develop/test-workflows"
+ "v3/develop/test-workflows"
],
- "version": "3.0"
+ "version": "v3"
},
{
"group": "Deploy",
"pages": [
- "3.0/deploy/index",
- "3.0/deploy/run-flows-in-local-processes",
+ "v3/deploy/index",
+ "v3/deploy/run-flows-in-local-processes",
{
"group": "Run flows on dynamic infrastructure",
"pages": [
- "3.0/deploy/infrastructure-concepts/work-pools",
- "3.0/deploy/infrastructure-concepts/workers",
- "3.0/deploy/infrastructure-concepts/store-flow-code",
- "3.0/deploy/infrastructure-concepts/deploy-via-python",
- "3.0/deploy/infrastructure-concepts/prefect-yaml",
- "3.0/deploy/infrastructure-concepts/deploy-ci-cd",
- "3.0/deploy/infrastructure-concepts/customize"
+ "v3/deploy/infrastructure-concepts/work-pools",
+ "v3/deploy/infrastructure-concepts/workers",
+ "v3/deploy/infrastructure-concepts/store-flow-code",
+ "v3/deploy/infrastructure-concepts/deploy-via-python",
+ "v3/deploy/infrastructure-concepts/prefect-yaml",
+ "v3/deploy/infrastructure-concepts/deploy-ci-cd",
+ "v3/deploy/infrastructure-concepts/customize"
+ ],
+ "version": "v3"
+ },
+ {
+ "group": "Dynamic infrastructure examples",
+ "pages": [
+ "v3/deploy/infrastructure-examples/managed",
+ "v3/deploy/infrastructure-examples/serverless",
+ "v3/deploy/infrastructure-examples/docker",
+ "v3/deploy/infrastructure-examples/kubernetes"
],
- "version": "3.0"
+ "version": "v3"
},
{
- "group": "Infrastructure-specific examples",
+ "group": "Static infrastructure examples",
"pages": [
- "3.0/deploy/infrastructure-examples/managed",
- "3.0/deploy/infrastructure-examples/serverless",
- "3.0/deploy/infrastructure-examples/docker",
- "3.0/deploy/infrastructure-examples/kubernetes"
+ "v3/deploy/static-infrastructure-examples/docker"
],
- "version": "3.0"
+ "version": "v3"
}
],
- "version": "3.0"
+ "version": "v3"
},
{
"group": "Automate",
"pages": [
- "3.0/automate/index",
- "3.0/automate/add-schedules",
- "3.0/automate/events/events",
- "3.0/automate/events/automations-triggers",
- "3.0/automate/events/custom-triggers",
- "3.0/automate/events/webhook-triggers",
- "3.0/automate/incidents"
+ "v3/automate/index",
+ "v3/automate/add-schedules",
+ "v3/automate/events/events",
+ "v3/automate/events/automations-triggers",
+ "v3/automate/events/custom-triggers",
+ "v3/automate/events/webhook-triggers",
+ "v3/automate/incidents"
],
- "version": "3.0"
+ "version": "v3"
},
{
"group": "Cloud and Server",
"pages": [
- "3.0/manage/index",
+ "v3/manage/index",
{
"group": "Use Prefect Cloud",
"pages": [
- "3.0/manage/cloud/index",
- "3.0/manage/cloud/connect-to-cloud",
- "3.0/manage/cloud/workspaces",
+ "v3/manage/cloud/index",
+ "v3/manage/cloud/connect-to-cloud",
+ "v3/manage/cloud/workspaces",
{
"group": "Manage accounts",
"pages": [
- "3.0/manage/cloud/manage-users/index",
- "3.0/manage/cloud/manage-users/service-accounts",
- "3.0/manage/cloud/manage-users/manage-teams",
- "3.0/manage/cloud/manage-users/manage-roles",
- "3.0/manage/cloud/manage-users/api-keys",
- "3.0/manage/cloud/manage-users/configure-sso",
- "3.0/manage/cloud/manage-users/audit-logs",
- "3.0/manage/cloud/manage-users/object-access-control-lists",
- "3.0/manage/cloud/manage-users/secure-access-by-ip-address"
+ "v3/manage/cloud/manage-users/index",
+ "v3/manage/cloud/manage-users/service-accounts",
+ "v3/manage/cloud/manage-users/manage-teams",
+ "v3/manage/cloud/manage-users/manage-roles",
+ "v3/manage/cloud/manage-users/api-keys",
+ "v3/manage/cloud/manage-users/configure-sso",
+ "v3/manage/cloud/manage-users/audit-logs",
+ "v3/manage/cloud/manage-users/object-access-control-lists",
+ "v3/manage/cloud/manage-users/secure-access-by-ip-address"
],
- "version": "3.0"
+ "version": "v3"
},
- "3.0/manage/cloud/rate-limits",
- "3.0/manage/cloud/terraform-provider",
- "3.0/manage/cloud/troubleshoot-cloud"
+ "v3/manage/cloud/rate-limits",
+ "v3/manage/cloud/terraform-provider",
+ "v3/manage/cloud/troubleshoot-cloud"
],
- "version": "3.0"
+ "version": "v3"
},
- "3.0/manage/self-host",
- "3.0/manage/settings-and-profiles",
- "3.0/manage/interact-with-api"
+ "v3/manage/self-host",
+ "v3/manage/interact-with-api"
],
- "version": "3.0"
+ "version": "v3"
},
{
"group": "Resources",
"pages": [
- "3.0/resources/upgrade-to-prefect-3",
- "3.0/resources/upgrade-agents-to-workers",
- "3.0/resources/cancel",
- "3.0/resources/visualize-flow-structure",
- "3.0/resources/daemonize-processes",
- "3.0/resources/big-data",
- "3.0/resources/cli-shell",
- "3.0/resources/secrets"
+ "v3/resources/upgrade-to-prefect-3",
+ "v3/resources/upgrade-agents-to-workers",
+ "v3/resources/cancel",
+ "v3/resources/visualize-flow-structure",
+ "v3/resources/daemonize-processes",
+ "v3/resources/big-data",
+ "v3/resources/cli-shell",
+ "v3/resources/secrets"
],
- "version": "3.0"
+ "version": "v3"
},
{
"group": "Integrations",
@@ -350,339 +372,339 @@
{
"group": "APIs & SDK",
"pages": [
- "3.0/api-ref/index",
- "3.0/api-ref/python/index",
+ "v3/api-ref/index",
+ "v3/api-ref/python/index",
{
"group": "REST API",
"pages": [
- "3.0/api-ref/rest-api/index",
+ "v3/api-ref/rest-api/index",
{
"group": "Cloud API",
"pages": [
- "3.0/api-ref/rest-api/cloud/index"
+ "v3/api-ref/rest-api/cloud/index"
],
- "version": "3.0"
+ "version": "v3"
},
{
"group": "Server API",
"pages": [
- "3.0/api-ref/rest-api/server/index",
+ "v3/api-ref/rest-api/server/index",
{
"group": "Root",
"pages": [
- "3.0/api-ref/rest-api/server/root/health-check",
- "3.0/api-ref/rest-api/server/root/server-version",
- "3.0/api-ref/rest-api/server/root/hello",
- "3.0/api-ref/rest-api/server/root/perform-readiness-check"
+ "v3/api-ref/rest-api/server/root/health-check",
+ "v3/api-ref/rest-api/server/root/server-version",
+ "v3/api-ref/rest-api/server/root/hello",
+ "v3/api-ref/rest-api/server/root/perform-readiness-check"
]
},
{
"group": "Flows",
"pages": [
- "3.0/api-ref/rest-api/server/flows/create-flow",
- "3.0/api-ref/rest-api/server/flows/read-flow",
- "3.0/api-ref/rest-api/server/flows/delete-flow",
- "3.0/api-ref/rest-api/server/flows/update-flow",
- "3.0/api-ref/rest-api/server/flows/count-flows",
- "3.0/api-ref/rest-api/server/flows/read-flow-by-name",
- "3.0/api-ref/rest-api/server/flows/read-flows",
- "3.0/api-ref/rest-api/server/flows/paginate-flows",
- "3.0/api-ref/rest-api/server/flows/count-deployments-by-flow",
- "3.0/api-ref/rest-api/server/flows/next-runs-by-flow"
+ "v3/api-ref/rest-api/server/flows/create-flow",
+ "v3/api-ref/rest-api/server/flows/read-flow",
+ "v3/api-ref/rest-api/server/flows/delete-flow",
+ "v3/api-ref/rest-api/server/flows/update-flow",
+ "v3/api-ref/rest-api/server/flows/count-flows",
+ "v3/api-ref/rest-api/server/flows/read-flow-by-name",
+ "v3/api-ref/rest-api/server/flows/read-flows",
+ "v3/api-ref/rest-api/server/flows/paginate-flows",
+ "v3/api-ref/rest-api/server/flows/count-deployments-by-flow",
+ "v3/api-ref/rest-api/server/flows/next-runs-by-flow"
]
},
{
"group": "Flow Runs",
"pages": [
- "3.0/api-ref/rest-api/server/flow-runs/create-flow-run",
- "3.0/api-ref/rest-api/server/flow-runs/read-flow-run",
- "3.0/api-ref/rest-api/server/flow-runs/delete-flow-run",
- "3.0/api-ref/rest-api/server/flow-runs/update-flow-run",
- "3.0/api-ref/rest-api/server/flow-runs/count-flow-runs",
- "3.0/api-ref/rest-api/server/flow-runs/average-flow-run-lateness",
- "3.0/api-ref/rest-api/server/flow-runs/flow-run-history",
- "3.0/api-ref/rest-api/server/flow-runs/read-flow-run-graph-v1",
- "3.0/api-ref/rest-api/server/flow-runs/read-flow-run-graph-v2",
- "3.0/api-ref/rest-api/server/flow-runs/resume-flow-run",
- "3.0/api-ref/rest-api/server/flow-runs/read-flow-runs",
- "3.0/api-ref/rest-api/server/flow-runs/set-flow-run-state",
- "3.0/api-ref/rest-api/server/flow-runs/create-flow-run-input",
- "3.0/api-ref/rest-api/server/flow-runs/filter-flow-run-input",
- "3.0/api-ref/rest-api/server/flow-runs/read-flow-run-input",
- "3.0/api-ref/rest-api/server/flow-runs/delete-flow-run-input",
- "3.0/api-ref/rest-api/server/flow-runs/paginate-flow-runs",
- "3.0/api-ref/rest-api/server/flow-runs/download-logs",
- "3.0/api-ref/rest-api/server/flow-runs/read-flow-run-history",
- "3.0/api-ref/rest-api/server/flow-runs/count-task-runs-by-flow-run"
+ "v3/api-ref/rest-api/server/flow-runs/create-flow-run",
+ "v3/api-ref/rest-api/server/flow-runs/read-flow-run",
+ "v3/api-ref/rest-api/server/flow-runs/delete-flow-run",
+ "v3/api-ref/rest-api/server/flow-runs/update-flow-run",
+ "v3/api-ref/rest-api/server/flow-runs/count-flow-runs",
+ "v3/api-ref/rest-api/server/flow-runs/average-flow-run-lateness",
+ "v3/api-ref/rest-api/server/flow-runs/flow-run-history",
+ "v3/api-ref/rest-api/server/flow-runs/read-flow-run-graph-v1",
+ "v3/api-ref/rest-api/server/flow-runs/read-flow-run-graph-v2",
+ "v3/api-ref/rest-api/server/flow-runs/resume-flow-run",
+ "v3/api-ref/rest-api/server/flow-runs/read-flow-runs",
+ "v3/api-ref/rest-api/server/flow-runs/set-flow-run-state",
+ "v3/api-ref/rest-api/server/flow-runs/create-flow-run-input",
+ "v3/api-ref/rest-api/server/flow-runs/filter-flow-run-input",
+ "v3/api-ref/rest-api/server/flow-runs/read-flow-run-input",
+ "v3/api-ref/rest-api/server/flow-runs/delete-flow-run-input",
+ "v3/api-ref/rest-api/server/flow-runs/paginate-flow-runs",
+ "v3/api-ref/rest-api/server/flow-runs/download-logs",
+ "v3/api-ref/rest-api/server/flow-runs/read-flow-run-history",
+ "v3/api-ref/rest-api/server/flow-runs/count-task-runs-by-flow-run"
]
},
{
"group": "Task Runs",
"pages": [
- "3.0/api-ref/rest-api/server/task-runs/create-task-run",
- "3.0/api-ref/rest-api/server/task-runs/read-task-run",
- "3.0/api-ref/rest-api/server/task-runs/delete-task-run",
- "3.0/api-ref/rest-api/server/task-runs/update-task-run",
- "3.0/api-ref/rest-api/server/task-runs/count-task-runs",
- "3.0/api-ref/rest-api/server/task-runs/task-run-history",
- "3.0/api-ref/rest-api/server/task-runs/read-task-runs",
- "3.0/api-ref/rest-api/server/task-runs/set-task-run-state",
- "3.0/api-ref/rest-api/server/task-runs/read-dashboard-task-run-counts",
- "3.0/api-ref/rest-api/server/task-runs/read-task-run-counts-by-state"
+ "v3/api-ref/rest-api/server/task-runs/create-task-run",
+ "v3/api-ref/rest-api/server/task-runs/read-task-run",
+ "v3/api-ref/rest-api/server/task-runs/delete-task-run",
+ "v3/api-ref/rest-api/server/task-runs/update-task-run",
+ "v3/api-ref/rest-api/server/task-runs/count-task-runs",
+ "v3/api-ref/rest-api/server/task-runs/task-run-history",
+ "v3/api-ref/rest-api/server/task-runs/read-task-runs",
+ "v3/api-ref/rest-api/server/task-runs/set-task-run-state",
+ "v3/api-ref/rest-api/server/task-runs/read-dashboard-task-run-counts",
+ "v3/api-ref/rest-api/server/task-runs/read-task-run-counts-by-state"
]
},
{
"group": "Flow Run States",
"pages": [
- "3.0/api-ref/rest-api/server/flow-run-states/read-flow-run-state",
- "3.0/api-ref/rest-api/server/flow-run-states/read-flow-run-states"
+ "v3/api-ref/rest-api/server/flow-run-states/read-flow-run-state",
+ "v3/api-ref/rest-api/server/flow-run-states/read-flow-run-states"
]
},
{
"group": "Task Run States",
"pages": [
- "3.0/api-ref/rest-api/server/task-run-states/read-task-run-state",
- "3.0/api-ref/rest-api/server/task-run-states/read-task-run-states"
+ "v3/api-ref/rest-api/server/task-run-states/read-task-run-state",
+ "v3/api-ref/rest-api/server/task-run-states/read-task-run-states"
]
},
{
"group": "Flow Run Notification Policies",
"pages": [
- "3.0/api-ref/rest-api/server/flow-run-notification-policies/create-flow-run-notification-policy",
- "3.0/api-ref/rest-api/server/flow-run-notification-policies/read-flow-run-notification-policy",
- "3.0/api-ref/rest-api/server/flow-run-notification-policies/delete-flow-run-notification-policy",
- "3.0/api-ref/rest-api/server/flow-run-notification-policies/update-flow-run-notification-policy",
- "3.0/api-ref/rest-api/server/flow-run-notification-policies/read-flow-run-notification-policies"
+ "v3/api-ref/rest-api/server/flow-run-notification-policies/create-flow-run-notification-policy",
+ "v3/api-ref/rest-api/server/flow-run-notification-policies/read-flow-run-notification-policy",
+ "v3/api-ref/rest-api/server/flow-run-notification-policies/delete-flow-run-notification-policy",
+ "v3/api-ref/rest-api/server/flow-run-notification-policies/update-flow-run-notification-policy",
+ "v3/api-ref/rest-api/server/flow-run-notification-policies/read-flow-run-notification-policies"
]
},
{
"group": "Deployments",
"pages": [
- "3.0/api-ref/rest-api/server/deployments/create-deployment",
- "3.0/api-ref/rest-api/server/deployments/read-deployment",
- "3.0/api-ref/rest-api/server/deployments/delete-deployment",
- "3.0/api-ref/rest-api/server/deployments/update-deployment",
- "3.0/api-ref/rest-api/server/deployments/read-deployment-by-name",
- "3.0/api-ref/rest-api/server/deployments/read-deployments",
- "3.0/api-ref/rest-api/server/deployments/paginate-deployments",
- "3.0/api-ref/rest-api/server/deployments/get-scheduled-flow-runs-for-deployments",
- "3.0/api-ref/rest-api/server/deployments/count-deployments",
- "3.0/api-ref/rest-api/server/deployments/schedule-deployment",
- "3.0/api-ref/rest-api/server/deployments/resume-deployment",
- "3.0/api-ref/rest-api/server/deployments/pause-deployment",
- "3.0/api-ref/rest-api/server/deployments/create-flow-run-from-deployment",
- "3.0/api-ref/rest-api/server/deployments/work-queue-check-for-deployment",
- "3.0/api-ref/rest-api/server/deployments/read-deployment-schedules",
- "3.0/api-ref/rest-api/server/deployments/create-deployment-schedules",
- "3.0/api-ref/rest-api/server/deployments/delete-deployment-schedule",
- "3.0/api-ref/rest-api/server/deployments/update-deployment-schedule"
+ "v3/api-ref/rest-api/server/deployments/create-deployment",
+ "v3/api-ref/rest-api/server/deployments/read-deployment",
+ "v3/api-ref/rest-api/server/deployments/delete-deployment",
+ "v3/api-ref/rest-api/server/deployments/update-deployment",
+ "v3/api-ref/rest-api/server/deployments/read-deployment-by-name",
+ "v3/api-ref/rest-api/server/deployments/read-deployments",
+ "v3/api-ref/rest-api/server/deployments/paginate-deployments",
+ "v3/api-ref/rest-api/server/deployments/get-scheduled-flow-runs-for-deployments",
+ "v3/api-ref/rest-api/server/deployments/count-deployments",
+ "v3/api-ref/rest-api/server/deployments/schedule-deployment",
+ "v3/api-ref/rest-api/server/deployments/resume-deployment",
+ "v3/api-ref/rest-api/server/deployments/pause-deployment",
+ "v3/api-ref/rest-api/server/deployments/create-flow-run-from-deployment",
+ "v3/api-ref/rest-api/server/deployments/work-queue-check-for-deployment",
+ "v3/api-ref/rest-api/server/deployments/read-deployment-schedules",
+ "v3/api-ref/rest-api/server/deployments/create-deployment-schedules",
+ "v3/api-ref/rest-api/server/deployments/delete-deployment-schedule",
+ "v3/api-ref/rest-api/server/deployments/update-deployment-schedule"
]
},
{
"group": "SavedSearches",
"pages": [
- "3.0/api-ref/rest-api/server/savedsearches/create-saved-search",
- "3.0/api-ref/rest-api/server/savedsearches/read-saved-search",
- "3.0/api-ref/rest-api/server/savedsearches/delete-saved-search",
- "3.0/api-ref/rest-api/server/savedsearches/read-saved-searches"
+ "v3/api-ref/rest-api/server/savedsearches/create-saved-search",
+ "v3/api-ref/rest-api/server/savedsearches/read-saved-search",
+ "v3/api-ref/rest-api/server/savedsearches/delete-saved-search",
+ "v3/api-ref/rest-api/server/savedsearches/read-saved-searches"
]
},
{
"group": "Logs",
"pages": [
- "3.0/api-ref/rest-api/server/logs/create-logs",
- "3.0/api-ref/rest-api/server/logs/read-logs"
+ "v3/api-ref/rest-api/server/logs/create-logs",
+ "v3/api-ref/rest-api/server/logs/read-logs"
]
},
{
"group": "Concurrency Limits",
"pages": [
- "3.0/api-ref/rest-api/server/concurrency-limits/create-concurrency-limit",
- "3.0/api-ref/rest-api/server/concurrency-limits/read-concurrency-limit",
- "3.0/api-ref/rest-api/server/concurrency-limits/delete-concurrency-limit",
- "3.0/api-ref/rest-api/server/concurrency-limits/read-concurrency-limit-by-tag",
- "3.0/api-ref/rest-api/server/concurrency-limits/delete-concurrency-limit-by-tag",
- "3.0/api-ref/rest-api/server/concurrency-limits/read-concurrency-limits",
- "3.0/api-ref/rest-api/server/concurrency-limits/reset-concurrency-limit-by-tag",
- "3.0/api-ref/rest-api/server/concurrency-limits/increment-concurrency-limits-v1",
- "3.0/api-ref/rest-api/server/concurrency-limits/decrement-concurrency-limits-v1"
+ "v3/api-ref/rest-api/server/concurrency-limits/create-concurrency-limit",
+ "v3/api-ref/rest-api/server/concurrency-limits/read-concurrency-limit",
+ "v3/api-ref/rest-api/server/concurrency-limits/delete-concurrency-limit",
+ "v3/api-ref/rest-api/server/concurrency-limits/read-concurrency-limit-by-tag",
+ "v3/api-ref/rest-api/server/concurrency-limits/delete-concurrency-limit-by-tag",
+ "v3/api-ref/rest-api/server/concurrency-limits/read-concurrency-limits",
+ "v3/api-ref/rest-api/server/concurrency-limits/reset-concurrency-limit-by-tag",
+ "v3/api-ref/rest-api/server/concurrency-limits/increment-concurrency-limits-v1",
+ "v3/api-ref/rest-api/server/concurrency-limits/decrement-concurrency-limits-v1"
]
},
{
"group": "Concurrency Limits V2",
"pages": [
- "3.0/api-ref/rest-api/server/concurrency-limits-v2/create-concurrency-limit-v2",
- "3.0/api-ref/rest-api/server/concurrency-limits-v2/read-concurrency-limit-v2",
- "3.0/api-ref/rest-api/server/concurrency-limits-v2/delete-concurrency-limit-v2",
- "3.0/api-ref/rest-api/server/concurrency-limits-v2/update-concurrency-limit-v2",
- "3.0/api-ref/rest-api/server/concurrency-limits-v2/read-all-concurrency-limits-v2",
- "3.0/api-ref/rest-api/server/concurrency-limits-v2/bulk-increment-active-slots",
- "3.0/api-ref/rest-api/server/concurrency-limits-v2/bulk-decrement-active-slots"
+ "v3/api-ref/rest-api/server/concurrency-limits-v2/create-concurrency-limit-v2",
+ "v3/api-ref/rest-api/server/concurrency-limits-v2/read-concurrency-limit-v2",
+ "v3/api-ref/rest-api/server/concurrency-limits-v2/delete-concurrency-limit-v2",
+ "v3/api-ref/rest-api/server/concurrency-limits-v2/update-concurrency-limit-v2",
+ "v3/api-ref/rest-api/server/concurrency-limits-v2/read-all-concurrency-limits-v2",
+ "v3/api-ref/rest-api/server/concurrency-limits-v2/bulk-increment-active-slots",
+ "v3/api-ref/rest-api/server/concurrency-limits-v2/bulk-decrement-active-slots"
]
},
{
"group": "Block types",
"pages": [
- "3.0/api-ref/rest-api/server/block-types/create-block-type",
- "3.0/api-ref/rest-api/server/block-types/read-block-type-by-id",
- "3.0/api-ref/rest-api/server/block-types/delete-block-type",
- "3.0/api-ref/rest-api/server/block-types/update-block-type",
- "3.0/api-ref/rest-api/server/block-types/read-block-type-by-slug",
- "3.0/api-ref/rest-api/server/block-types/read-block-types",
- "3.0/api-ref/rest-api/server/block-types/read-block-documents-for-block-type",
- "3.0/api-ref/rest-api/server/block-types/read-block-document-by-name-for-block-type",
- "3.0/api-ref/rest-api/server/block-types/install-system-block-types"
+ "v3/api-ref/rest-api/server/block-types/create-block-type",
+ "v3/api-ref/rest-api/server/block-types/read-block-type-by-id",
+ "v3/api-ref/rest-api/server/block-types/delete-block-type",
+ "v3/api-ref/rest-api/server/block-types/update-block-type",
+ "v3/api-ref/rest-api/server/block-types/read-block-type-by-slug",
+ "v3/api-ref/rest-api/server/block-types/read-block-types",
+ "v3/api-ref/rest-api/server/block-types/read-block-documents-for-block-type",
+ "v3/api-ref/rest-api/server/block-types/read-block-document-by-name-for-block-type",
+ "v3/api-ref/rest-api/server/block-types/install-system-block-types"
]
},
{
"group": "Block documents",
"pages": [
- "3.0/api-ref/rest-api/server/block-documents/create-block-document",
- "3.0/api-ref/rest-api/server/block-documents/read-block-documents",
- "3.0/api-ref/rest-api/server/block-documents/count-block-documents",
- "3.0/api-ref/rest-api/server/block-documents/read-block-document-by-id",
- "3.0/api-ref/rest-api/server/block-documents/delete-block-document",
- "3.0/api-ref/rest-api/server/block-documents/update-block-document-data"
+ "v3/api-ref/rest-api/server/block-documents/create-block-document",
+ "v3/api-ref/rest-api/server/block-documents/read-block-documents",
+ "v3/api-ref/rest-api/server/block-documents/count-block-documents",
+ "v3/api-ref/rest-api/server/block-documents/read-block-document-by-id",
+ "v3/api-ref/rest-api/server/block-documents/delete-block-document",
+ "v3/api-ref/rest-api/server/block-documents/update-block-document-data"
]
},
{
"group": "Work Pools",
"pages": [
- "3.0/api-ref/rest-api/server/work-pools/create-work-pool",
- "3.0/api-ref/rest-api/server/work-pools/read-work-pool",
- "3.0/api-ref/rest-api/server/work-pools/delete-work-pool",
- "3.0/api-ref/rest-api/server/work-pools/update-work-pool",
- "3.0/api-ref/rest-api/server/work-pools/read-work-pools",
- "3.0/api-ref/rest-api/server/work-pools/count-work-pools",
- "3.0/api-ref/rest-api/server/work-pools/get-scheduled-flow-runs",
- "3.0/api-ref/rest-api/server/work-pools/create-work-queue",
- "3.0/api-ref/rest-api/server/work-pools/read-work-queue",
- "3.0/api-ref/rest-api/server/work-pools/delete-work-queue",
- "3.0/api-ref/rest-api/server/work-pools/update-work-queue",
- "3.0/api-ref/rest-api/server/work-pools/read-work-queues",
- "3.0/api-ref/rest-api/server/work-pools/worker-heartbeat",
- "3.0/api-ref/rest-api/server/work-pools/read-workers",
- "3.0/api-ref/rest-api/server/work-pools/delete-worker"
+ "v3/api-ref/rest-api/server/work-pools/create-work-pool",
+ "v3/api-ref/rest-api/server/work-pools/read-work-pool",
+ "v3/api-ref/rest-api/server/work-pools/delete-work-pool",
+ "v3/api-ref/rest-api/server/work-pools/update-work-pool",
+ "v3/api-ref/rest-api/server/work-pools/read-work-pools",
+ "v3/api-ref/rest-api/server/work-pools/count-work-pools",
+ "v3/api-ref/rest-api/server/work-pools/get-scheduled-flow-runs",
+ "v3/api-ref/rest-api/server/work-pools/create-work-queue",
+ "v3/api-ref/rest-api/server/work-pools/read-work-queue",
+ "v3/api-ref/rest-api/server/work-pools/delete-work-queue",
+ "v3/api-ref/rest-api/server/work-pools/update-work-queue",
+ "v3/api-ref/rest-api/server/work-pools/read-work-queues",
+ "v3/api-ref/rest-api/server/work-pools/worker-heartbeat",
+ "v3/api-ref/rest-api/server/work-pools/read-workers",
+ "v3/api-ref/rest-api/server/work-pools/delete-worker"
]
},
{
"group": "Task Workers",
"pages": [
- "3.0/api-ref/rest-api/server/task-workers/read-task-workers"
+ "v3/api-ref/rest-api/server/task-workers/read-task-workers"
]
},
{
"group": "Work Queues",
"pages": [
- "3.0/api-ref/rest-api/server/work-queues/create-work-queue",
- "3.0/api-ref/rest-api/server/work-queues/read-work-queue",
- "3.0/api-ref/rest-api/server/work-queues/delete-work-queue",
- "3.0/api-ref/rest-api/server/work-queues/update-work-queue",
- "3.0/api-ref/rest-api/server/work-queues/read-work-queue-by-name",
- "3.0/api-ref/rest-api/server/work-queues/read-work-queue-runs",
- "3.0/api-ref/rest-api/server/work-queues/read-work-queues",
- "3.0/api-ref/rest-api/server/work-queues/read-work-queue-status"
+ "v3/api-ref/rest-api/server/work-queues/create-work-queue",
+ "v3/api-ref/rest-api/server/work-queues/read-work-queue",
+ "v3/api-ref/rest-api/server/work-queues/delete-work-queue",
+ "v3/api-ref/rest-api/server/work-queues/update-work-queue",
+ "v3/api-ref/rest-api/server/work-queues/read-work-queue-by-name",
+ "v3/api-ref/rest-api/server/work-queues/read-work-queue-runs",
+ "v3/api-ref/rest-api/server/work-queues/read-work-queues",
+ "v3/api-ref/rest-api/server/work-queues/read-work-queue-status"
]
},
{
"group": "Artifacts",
"pages": [
- "3.0/api-ref/rest-api/server/artifacts/create-artifact",
- "3.0/api-ref/rest-api/server/artifacts/read-artifact",
- "3.0/api-ref/rest-api/server/artifacts/delete-artifact",
- "3.0/api-ref/rest-api/server/artifacts/update-artifact",
- "3.0/api-ref/rest-api/server/artifacts/read-latest-artifact",
- "3.0/api-ref/rest-api/server/artifacts/read-artifacts",
- "3.0/api-ref/rest-api/server/artifacts/read-latest-artifacts",
- "3.0/api-ref/rest-api/server/artifacts/count-artifacts",
- "3.0/api-ref/rest-api/server/artifacts/count-latest-artifacts"
+ "v3/api-ref/rest-api/server/artifacts/create-artifact",
+ "v3/api-ref/rest-api/server/artifacts/read-artifact",
+ "v3/api-ref/rest-api/server/artifacts/delete-artifact",
+ "v3/api-ref/rest-api/server/artifacts/update-artifact",
+ "v3/api-ref/rest-api/server/artifacts/read-latest-artifact",
+ "v3/api-ref/rest-api/server/artifacts/read-artifacts",
+ "v3/api-ref/rest-api/server/artifacts/read-latest-artifacts",
+ "v3/api-ref/rest-api/server/artifacts/count-artifacts",
+ "v3/api-ref/rest-api/server/artifacts/count-latest-artifacts"
]
},
{
"group": "Block schemas",
"pages": [
- "3.0/api-ref/rest-api/server/block-schemas/create-block-schema",
- "3.0/api-ref/rest-api/server/block-schemas/read-block-schema-by-id",
- "3.0/api-ref/rest-api/server/block-schemas/delete-block-schema",
- "3.0/api-ref/rest-api/server/block-schemas/read-block-schemas",
- "3.0/api-ref/rest-api/server/block-schemas/read-block-schema-by-checksum"
+ "v3/api-ref/rest-api/server/block-schemas/create-block-schema",
+ "v3/api-ref/rest-api/server/block-schemas/read-block-schema-by-id",
+ "v3/api-ref/rest-api/server/block-schemas/delete-block-schema",
+ "v3/api-ref/rest-api/server/block-schemas/read-block-schemas",
+ "v3/api-ref/rest-api/server/block-schemas/read-block-schema-by-checksum"
]
},
{
"group": "Block capabilities",
"pages": [
- "3.0/api-ref/rest-api/server/block-capabilities/read-available-block-capabilities"
+ "v3/api-ref/rest-api/server/block-capabilities/read-available-block-capabilities"
]
},
{
"group": "Collections",
"pages": [
- "3.0/api-ref/rest-api/server/collections/read-view-content"
+ "v3/api-ref/rest-api/server/collections/read-view-content"
]
},
{
"group": "Variables",
"pages": [
- "3.0/api-ref/rest-api/server/variables/create-variable",
- "3.0/api-ref/rest-api/server/variables/read-variable",
- "3.0/api-ref/rest-api/server/variables/delete-variable",
- "3.0/api-ref/rest-api/server/variables/update-variable",
- "3.0/api-ref/rest-api/server/variables/read-variable-by-name",
- "3.0/api-ref/rest-api/server/variables/delete-variable-by-name",
- "3.0/api-ref/rest-api/server/variables/update-variable-by-name",
- "3.0/api-ref/rest-api/server/variables/read-variables",
- "3.0/api-ref/rest-api/server/variables/count-variables"
+ "v3/api-ref/rest-api/server/variables/create-variable",
+ "v3/api-ref/rest-api/server/variables/read-variable",
+ "v3/api-ref/rest-api/server/variables/delete-variable",
+ "v3/api-ref/rest-api/server/variables/update-variable",
+ "v3/api-ref/rest-api/server/variables/read-variable-by-name",
+ "v3/api-ref/rest-api/server/variables/delete-variable-by-name",
+ "v3/api-ref/rest-api/server/variables/update-variable-by-name",
+ "v3/api-ref/rest-api/server/variables/read-variables",
+ "v3/api-ref/rest-api/server/variables/count-variables"
]
},
- "3.0/api-ref/rest-api/server/create-csrf-token",
+ "v3/api-ref/rest-api/server/create-csrf-token",
{
"group": "Events",
"pages": [
- "3.0/api-ref/rest-api/server/events/create-events",
- "3.0/api-ref/rest-api/server/events/read-events",
- "3.0/api-ref/rest-api/server/events/read-account-events-page",
- "3.0/api-ref/rest-api/server/events/count-account-events"
+ "v3/api-ref/rest-api/server/events/create-events",
+ "v3/api-ref/rest-api/server/events/read-events",
+ "v3/api-ref/rest-api/server/events/read-account-events-page",
+ "v3/api-ref/rest-api/server/events/count-account-events"
]
},
{
"group": "Automations",
"pages": [
- "3.0/api-ref/rest-api/server/automations/create-automation",
- "3.0/api-ref/rest-api/server/automations/read-automation",
- "3.0/api-ref/rest-api/server/automations/update-automation",
- "3.0/api-ref/rest-api/server/automations/delete-automation",
- "3.0/api-ref/rest-api/server/automations/patch-automation",
- "3.0/api-ref/rest-api/server/automations/read-automations",
- "3.0/api-ref/rest-api/server/automations/count-automations",
- "3.0/api-ref/rest-api/server/automations/read-automations-related-to-resource",
- "3.0/api-ref/rest-api/server/automations/delete-automations-owned-by-resource",
- "3.0/api-ref/rest-api/server/automations/validate-template"
+ "v3/api-ref/rest-api/server/automations/create-automation",
+ "v3/api-ref/rest-api/server/automations/read-automation",
+ "v3/api-ref/rest-api/server/automations/update-automation",
+ "v3/api-ref/rest-api/server/automations/delete-automation",
+ "v3/api-ref/rest-api/server/automations/patch-automation",
+ "v3/api-ref/rest-api/server/automations/read-automations",
+ "v3/api-ref/rest-api/server/automations/count-automations",
+ "v3/api-ref/rest-api/server/automations/read-automations-related-to-resource",
+ "v3/api-ref/rest-api/server/automations/delete-automations-owned-by-resource",
+ "v3/api-ref/rest-api/server/automations/validate-template"
]
},
{
"group": "UI",
"pages": [
- "3.0/api-ref/rest-api/server/ui/validate-obj"
+ "v3/api-ref/rest-api/server/ui/validate-obj"
]
},
{
"group": "Admin",
"pages": [
- "3.0/api-ref/rest-api/server/admin/read-settings",
- "3.0/api-ref/rest-api/server/admin/read-version",
- "3.0/api-ref/rest-api/server/admin/clear-database",
- "3.0/api-ref/rest-api/server/admin/drop-database",
- "3.0/api-ref/rest-api/server/admin/create-database"
+ "v3/api-ref/rest-api/server/admin/read-settings",
+ "v3/api-ref/rest-api/server/admin/read-version",
+ "v3/api-ref/rest-api/server/admin/clear-database",
+ "v3/api-ref/rest-api/server/admin/drop-database",
+ "v3/api-ref/rest-api/server/admin/create-database"
]
}
],
- "version": "3.0"
+ "version": "v3"
}
],
- "version": "3.0"
+ "version": "v3"
}
],
- "version": "3.0"
+ "version": "v3"
},
{
"group": "Contribute",
@@ -698,187 +720,191 @@
],
"redirects": [
{
- "destination": "/latest/getting-started/:slug*",
+ "destination": "/v3/getting-started/:slug*",
"source": "/getting-started/:slug*"
},
{
- "destination": "/latest/tutorial/:slug*",
+ "destination": "/v3/tutorial/:slug*",
"source": "/tutorial/:slug*"
},
{
- "destination": "/latest/guides/:slug*",
+ "destination": "/v3/guides/:slug*",
"source": "/guides/:slug*"
},
{
- "destination": "/latest/concepts/:slug*",
+ "destination": "/v3/concepts/:slug*",
"source": "/concepts/:slug*"
},
{
- "destination": "/latest/cloud/:slug*",
+ "destination": "/v3/cloud/:slug*",
"source": "/cloud/:slug*"
},
{
- "destination": "/latest/api-ref/:slug*",
+ "destination": "/v3/api-ref/:slug*",
"source": "/api-ref/:slug*"
},
{
- "destination": "/latest/get-started",
+ "destination": "/v3/get-started",
"source": "/latest/tutorial"
},
{
- "destination": "/latest/get-started",
+ "destination": "/v3/get-started",
"source": "/latest/getting-started/quickstart"
},
{
- "destination": "/latest/develop/write-flows",
+ "destination": "/v3/develop/write-flows",
"source": "/latest/concepts/flows"
},
{
- "destination": "/latest/deploy",
+ "destination": "/v3/deploy",
"source": "/latest/concepts/deployments"
},
{
- "destination": "/latest/get-started/install",
+ "destination": "/v3/get-started/install",
"source": "/latest/getting-started/installation"
},
{
- "destination": "/latest/deploy/infrastructure-concepts/work-pools",
+ "destination": "/v3/deploy/infrastructure-concepts/work-pools",
"source": "/latest/concepts/work-pools"
},
{
- "destination": "/latest/manage/self-host",
+ "destination": "/v3/manage/self-host",
"source": "/latest/guides/host"
},
{
- "destination": "/latest/develop/write-tasks",
+ "destination": "/v3/develop/write-tasks",
"source": "/latest/concepts/tasks"
},
{
- "destination": "/latest",
+ "destination": "/v3/develop/settings-and-profiles",
+ "source": "/v3/develop/settings-and-profiles"
+ },
+ {
+ "destination": "/v3",
"source": "/latest/guides"
},
{
- "destination": "/latest/deploy",
+ "destination": "/v3/deploy",
"source": "/latest/tutorial/deployments"
},
{
- "destination": "/latest/develop/write-flows",
+ "destination": "/v3/develop/write-flows",
"source": "/latest/tutorial/flows"
},
{
- "destination": "/latest",
+ "destination": "/v3",
"source": "/latest/concepts"
},
{
- "destination": "/latest/deploy",
+ "destination": "/v3/deploy",
"source": "/latest/guides/prefect-deploy"
},
{
- "destination": "/latest/develop/write-tasks",
+ "destination": "/v3/develop/write-tasks",
"source": "/latest/tutorial/tasks"
},
{
- "destination": "/latest/automate/add-schedules",
+ "destination": "/v3/automate/add-schedules",
"source": "/latest/concepts/schedules"
},
{
- "destination": "/latest/deploy/infrastructure-examples/docker",
+ "destination": "/v3/deploy/infrastructure-examples/docker",
"source": "/latest/guides/docker"
},
{
- "destination": "/latest/deploy/infrastructure-concepts/work-pools",
+ "destination": "/v3/deploy/infrastructure-concepts/work-pools",
"source": "/latest/tutorial/work-pools"
},
{
- "destination": "/latest/manage/settings-and-profiles",
+ "destination": "/v3/develop/settings-and-profiles",
"source": "/latest/guides/settings"
},
{
- "destination": "/latest/develop/task-runners",
+ "destination": "/v3/develop/task-runners",
"source": "/latest/concepts/task-runners"
},
{
- "destination": "/latest/deploy/infrastructure-concepts/workers",
+ "destination": "/v3/deploy/infrastructure-concepts/workers",
"source": "/latest/tutorial/workers"
},
{
- "destination": "/latest/manage/cloud",
+ "destination": "/v3/manage/cloud",
"source": "/latest/cloud"
},
{
- "destination": "/latest/integrations",
+ "destination": "/v3/integrations",
"source": "/latest/integrations"
},
{
- "destination": "/latest/develop/results",
+ "destination": "/v3/develop/results",
"source": "/latest/concepts/results"
},
{
- "destination": "/latest/automate",
+ "destination": "/v3/automate",
"source": "/latest/concepts/automations"
},
{
- "destination": "/latest/develop/logging",
+ "destination": "/v3/develop/logging",
"source": "/latest/guides/logs"
},
{
- "destination": "/latest/develop/blocks",
+ "destination": "/v3/develop/blocks",
"source": "/latest/concepts/blocks"
},
{
- "destination": "/latest/deploy/infrastructure-examples/kubernetes",
+ "destination": "/v3/deploy/infrastructure-examples/kubernetes",
"source": "/latest/guides/deployment/kubernetes"
},
{
- "destination": "/latest/develop/manage-states",
+ "destination": "/v3/develop/manage-states",
"source": "/latest/concepts/states"
},
{
- "destination": "/latest/resources/upgrade-agents-to-workers",
+ "destination": "/v3/resources/upgrade-agents-to-workers",
"source": "/latest/guides/upgrade-guide-agents-to-workers"
},
{
- "destination": "/latest/automate/events/webhook-triggers",
+ "destination": "/v3/automate/events/webhook-triggers",
"source": "/latest/guides/webhooks"
},
{
- "destination": "/latest/automate/events/webhook-triggers",
+ "destination": "/v3/automate/events/webhook-triggers",
"source": "/latest/cloud/webhooks"
},
{
- "destination": "/latest/develop/artifacts",
+ "destination": "/v3/develop/artifacts",
"source": "/latest/concepts/artifacts"
},
{
- "destination": "/latest/api-ref/rest-api",
+ "destination": "/v3/api-ref/rest-api",
"source": "/latest/api-ref/rest-api-reference"
},
{
- "destination": "/latest/develop/global-concurrency-limits",
+ "destination": "/v3/develop/global-concurrency-limits",
"source": "/latest/guides/global-concurrency-limits"
},
{
- "destination": "/latest/develop/variables",
+ "destination": "/v3/develop/variables",
"source": "/latest/guides/variables"
},
{
- "destination": "/latest/api-ref",
+ "destination": "/v3/api-ref",
"source": "/latest/guides/using-the-client"
},
{
- "destination": "/latest/deploy/infrastructure-concepts/store-flow-code",
+ "destination": "/v3/deploy/infrastructure-concepts/store-flow-code",
"source": "/latest/guides/deployment/storage-guide"
},
{
- "destination": "/latest/develop/test-workflows",
+ "destination": "/v3/develop/test-workflows",
"source": "/latest/guides/testing"
},
{
- "destination": "/latest/deploy/infrastructure-examples/serverless",
+ "destination": "/v3/deploy/infrastructure-examples/serverless",
"source": "/latest/guides/deployment/push-work-pools"
},
{
- "destination": "/latest/develop/inputs",
+ "destination": "/v3/develop/inputs",
"source": "/latest/guides/creating-interactive-workflows"
},
{
@@ -886,39 +912,39 @@
"source": "/latest/guides/dask-ray-task-runners"
},
{
- "destination": "/latest/deploy/infrastructure-concepts/deploy-ci-cd",
+ "destination": "/v3/deploy/infrastructure-concepts/deploy-ci-cd",
"source": "/latest/guides/ci-cd"
},
{
- "destination": "/latest/resources/upgrade-agents-to-workers",
+ "destination": "/v3/resources/upgrade-agents-to-workers",
"source": "/latest/concepts/deployments-block-based"
},
{
- "destination": "/latest/deploy/infrastructure-concepts/work-pools",
+ "destination": "/v3/deploy/infrastructure-concepts/work-pools",
"source": "/latest/concepts/infrastructure"
},
{
- "destination": "/latest/develop/runtime-context",
+ "destination": "/v3/develop/runtime-context",
"source": "/latest/guides/runtime-context"
},
{
- "destination": "/latest/deploy/infrastructure-concepts/customize",
+ "destination": "/v3/deploy/infrastructure-concepts/customize",
"source": "/latest/guides/deployment/overriding-job-variables"
},
{
- "destination": "/latest/deploy/infrastructure-examples/managed",
+ "destination": "/v3/deploy/infrastructure-examples/managed",
"source": "/latest/guides/managed-execution"
},
{
- "destination": "/latest/develop/write-flows",
+ "destination": "/v3/develop/write-flows",
"source": "/latest/guides/specifying-upstream-dependencies"
},
{
- "destination": "/latest/automate",
+ "destination": "/v3/automate",
"source": "/latest/guides/automations"
},
{
- "destination": "/latest/resources/upgrade-agents-to-workers",
+ "destination": "/v3/resources/upgrade-agents-to-workers",
"source": "/latest/concepts/agents"
},
{
@@ -926,43 +952,43 @@
"source": "/latest/integrations/prefect-aws"
},
{
- "destination": "/latest/deploy/infrastructure-concepts/store-flow-code",
+ "destination": "/v3/deploy/infrastructure-concepts/store-flow-code",
"source": "/latest/concepts/storage"
},
{
- "destination": "/latest/resources/daemonize-processes",
+ "destination": "/v3/resources/daemonize-processes",
"source": "/latest/guides/deployment/daemonize"
},
{
- "destination": "/latest/manage/cloud/manage-users/api-keys",
+ "destination": "/v3/manage/cloud/manage-users/api-keys",
"source": "/latest/cloud/users/api-keys"
},
{
- "destination": "/latest/contribute",
+ "destination": "/v3/contribute",
"source": "/latest/community"
},
{
- "destination": "/latest/develop/blocks",
+ "destination": "/v3/develop/blocks",
"source": "/latest/concepts/filesystems"
},
{
- "destination": "/latest/events",
+ "destination": "/v3/events",
"source": "/latest/cloud/events"
},
{
- "destination": "/latest",
+ "destination": "/v3",
"source": "/latest/recipes/recipes"
},
{
- "destination": "/latest/resources/cli-shell",
+ "destination": "/v3/resources/cli-shell",
"source": "/latest/guides/cli-shell"
},
{
- "destination": "/latest/api-ref/rest-api/server",
+ "destination": "/v3/api-ref/rest-api/server",
"source": "/latest/api-ref/server"
},
{
- "destination": "/latest/develop/blocks",
+ "destination": "/v3/develop/blocks",
"source": "/latest/guides/moving-data"
},
{
@@ -970,11 +996,11 @@
"source": "/latest/guides/deployment/developing-a-new-worker-type"
},
{
- "destination": "/latest/develop/manage-states",
+ "destination": "/v3/develop/manage-states",
"source": "/latest/guides/state-change-hooks"
},
{
- "destination": "/latest/deploy/infrastructure-examples/serverless",
+ "destination": "/v3/deploy/infrastructure-examples/serverless",
"source": "/latest/guides/deployment/serverless-workers"
},
{
@@ -982,15 +1008,15 @@
"source": "/latest/integrations/usage"
},
{
- "destination": "/latest/manage/cloud/troubleshoot-cloud",
+ "destination": "/v3/manage/cloud/troubleshoot-cloud",
"source": "/latest/guides/troubleshooting"
},
{
- "destination": "/latest/resources/big-data",
+ "destination": "/v3/resources/big-data",
"source": "/latest/guides/big-data"
},
{
- "destination": "/latest/manage/cloud/workspaces",
+ "destination": "/v3/manage/cloud/workspaces",
"source": "/latest/cloud/workspaces"
},
{
@@ -1002,11 +1028,11 @@
"source": "/latest/integrations/prefect-kubernetes"
},
{
- "destination": "/latest/resources/whats-new-prefect-3.mdx",
+ "destination": "/v3/resources/whats-new-prefect-3.mdx",
"source": "/latest/faq"
},
{
- "destination": "/latest/resources/upgrade-prefect-3",
+ "destination": "/v3/resources/upgrade-prefect-3",
"source": "/latest/guides/migration-guide"
},
{
@@ -1018,19 +1044,19 @@
"source": "/latest/integrations/prefect-aws/ecs_worker"
},
{
- "destination": "/latest/contribute",
+ "destination": "/v3/contribute",
"source": "/latest/contributing/overview"
},
{
- "destination": "/latest/resources/secrets",
+ "destination": "/v3/resources/secrets",
"source": "/latest/guides/secrets"
},
{
- "destination": "/latest/manage/cloud/connect-to-cloud",
+ "destination": "/v3/manage/cloud/connect-to-cloud",
"source": "/latest/cloud/connecting"
},
{
- "destination": "/latest/manage/cloud/manage-users/manage-roles",
+ "destination": "/v3/manage/cloud/manage-users/manage-roles",
"source": "/latest/cloud/users/roles"
},
{
@@ -1038,12 +1064,12 @@
"source": "/latest/integrations/prefect-docker"
},
{
- "destination": "/latest/manage/cloud/manage-users",
+ "destination": "/v3/manage/cloud/manage-users",
"source": "/latest/cloud/users"
},
{
"destination": "/integrations/prefect-gcp",
- "source": "/latest/integrations/prefect-gcp"
+ "source": "/v3/integrations/prefect-gcp"
},
{
"destination": "/latest/manage/cloud/manage-users/configure-sso",
@@ -1058,7 +1084,7 @@
"source": "/latest/integrations/prefect-gcp/gcp-worker-guide"
},
{
- "destination": "/latest/manage/cloud/rate-limits",
+ "destination": "/v3/manage/cloud/rate-limits",
"source": "/latest/cloud/rate-limits"
},
{
@@ -1098,7 +1124,7 @@
"source": "/latest/guides/deployment/aci"
},
{
- "destination": "/latest/api-ref/python",
+ "destination": "/v3/api-ref/python",
"source": "/latest/api-ref/server/api/deployments"
},
{
@@ -1110,7 +1136,7 @@
"source": "/latest/integrations/prefect-aws/s3"
},
{
- "destination": "/latest/api-ref",
+ "destination": "/v3/api-ref",
"source": "/latest/api-ref/server/api/flow_runs"
},
{
@@ -1146,7 +1172,7 @@
"source": "/latest/integrations/prefect-docker/worker"
},
{
- "destination": "/latest/api-ref/python",
+ "destination": "/v3/api-ref/python",
"source": "/latest/api-ref/server/schemas/schedules"
},
{
@@ -1158,11 +1184,11 @@
"source": "/latest/integrations/prefect-ray"
},
{
- "destination": "/latest/deploy/infrastructure-examples/kubernetes",
+ "destination": "/v3/deploy/infrastructure-examples/kubernetes",
"source": "/implementation/azure/aks"
},
{
- "destination": "/latest/api-ref/python",
+ "destination": "/v3/api-ref/python",
"source": "/latest/api-ref/server/models/flow_runs"
},
{
@@ -1202,11 +1228,11 @@
"source": "/latest/integrations/prefect-docker/containers"
},
{
- "destination": "/latest/api-ref/rest-api/server/flows",
+ "destination": "/v3/api-ref/rest-api/server/flows",
"source": "/latest/api-ref/server/api/flows"
},
{
- "destination": "/latest/api-ref",
+ "destination": "/v3/api-ref",
"source": "/latest/api-ref/server/schemas/filters"
},
{
@@ -1226,15 +1252,15 @@
"source": "/latest/integrations/prefect-gcp/cloud_storage"
},
{
- "destination": "/latest/api-ref",
+ "destination": "/v3/api-ref",
"source": "/latest/api-ref/server/api/admin"
},
{
- "destination": "/latest/api-ref",
+ "destination": "/v3/api-ref",
"source": "/latest/api-ref/server/api/server"
},
{
- "destination": "/latest/manage/cloud/manage-users/object-access-control-lists",
+ "destination": "/v3/manage/cloud/manage-users/object-access-control-lists",
"source": "/latest/cloud/users/object-access-control-lists"
},
{
@@ -1286,26 +1312,38 @@
"source": "/latest/integrations/prefect-slack/messages"
},
{
- "destination": "/latest/api-ref/python",
+ "destination": "/v3/api-ref/python",
"source": "/latest/api-ref/prefect/:slug*"
},
{
- "destination": "/latest/api-ref/python",
+ "destination": "/v3/api-ref/python",
"source": "/api-ref/prefect/:slug*"
},
{
- "destination": "/3.0/:slug*",
- "source": "/3.0rc/:slug*"
+ "destination": "/v3/:slug*",
+ "source": "/3rc/:slug*"
},
{
- "destination": "/3.0/:slug*",
+ "destination": "/v3/:slug*",
"source": "/latest/:slug*"
+ },
+ {
+ "destination": "/v3/:slug*",
+ "source": "/3/:slug*"
+ },
+ {
+ "destination": "/v3/:slug*",
+ "source": "/3.0/:slug*"
+ },
+ {
+ "destination": "/v2/:slug*",
+ "source": "/2/:slug*"
}
],
"tabs": [
{
"name": "APIs & SDK",
- "url": "3.0/api-ref"
+ "url": "v3/api-ref"
},
{
"name": "Integrations",
@@ -1321,7 +1359,7 @@
"url": "https://github.com/PrefectHQ/Prefect"
},
"versions": [
- "3.0",
- "2.x"
+ "v3",
+ "v2"
]
}
\ No newline at end of file
diff --git a/docs/2.x/get-started/index.mdx b/docs/v2/get-started/index.mdx
similarity index 100%
rename from docs/2.x/get-started/index.mdx
rename to docs/v2/get-started/index.mdx
diff --git a/docs/v3/api-ref/index.mdx b/docs/v3/api-ref/index.mdx
new file mode 100644
index 000000000000..6aa570e07647
--- /dev/null
+++ b/docs/v3/api-ref/index.mdx
@@ -0,0 +1,12 @@
+---
+title: API & SDK References
+sidebarTitle: Overview
+description: Explore Prefect's auto-generated API & SDK reference documentation.
+---
+
+Prefect auto-generates reference documentation for the following components:
+
+- **[Prefect Python SDK](https://prefect-python-sdk-docs.netlify.app/)**: used to build, test, and execute workflows.
+- **[Prefect REST API](/v3/api-ref/rest-api)**: used by workflow clients and the Prefect UI for orchestration and data retrieval.
+ - Prefect Cloud REST API documentation: https://app.prefect.cloud/api/docs .
+ - Self-hosted Prefect server [REST API documentation](/v3/api-ref/rest-api/server/). Additionally, if self-hosting a Prefect server instance, you can access REST API documentation at the `/docs` endpoint of your [`PREFECT_API_URL`](/v3/develop/settings-and-profiles/). For example, if you run `prefect server start` with no additional configuration you can find this reference at http://localhost:4200/docs .
diff --git a/docs/3.0/api-ref/python/index.mdx b/docs/v3/api-ref/python/index.mdx
similarity index 100%
rename from docs/3.0/api-ref/python/index.mdx
rename to docs/v3/api-ref/python/index.mdx
diff --git a/docs/3.0/api-ref/rest-api/cloud/index.mdx b/docs/v3/api-ref/rest-api/cloud/index.mdx
similarity index 100%
rename from docs/3.0/api-ref/rest-api/cloud/index.mdx
rename to docs/v3/api-ref/rest-api/cloud/index.mdx
diff --git a/docs/3.0/api-ref/rest-api/index.mdx b/docs/v3/api-ref/rest-api/index.mdx
similarity index 94%
rename from docs/3.0/api-ref/rest-api/index.mdx
rename to docs/v3/api-ref/rest-api/index.mdx
index edfb10f70e76..68127ce67dc6 100644
--- a/docs/3.0/api-ref/rest-api/index.mdx
+++ b/docs/v3/api-ref/rest-api/index.mdx
@@ -11,13 +11,13 @@ Prefect Cloud and self-hosted Prefect server each provide a REST API.
- [Interactive Prefect Cloud REST API documentation](https://app.prefect.cloud/api/docs)
- [Finding your Prefect Cloud details](#finding-your-prefect-cloud-details)
- Self-hosted Prefect server:
- - Interactive REST API documentation for self-hosted Prefect server is available under **Server API** on the sidebar navigation or at `http://localhost:4200/docs` or the `/docs` endpoint of the [PREFECT_API_URL](/3.0/manage/settings-and-profiles/) you have configured to access the server. You must have the server running with `prefect server start` to access the interactive documentation.
+ - Interactive REST API documentation for self-hosted Prefect server is available under **Server API** on the sidebar navigation or at `http://localhost:4200/docs` or the `/docs` endpoint of the [PREFECT_API_URL](/v3/develop/settings-and-profiles/) you have configured to access the server. You must have the server running with `prefect server start` to access the interactive documentation.
## Interact with the REST API
You can interact with the Prefect REST API in several ways:
-- Create an instance of [`PrefectClient`](https://prefect-python-sdk-docs.netlify.app/prefect/client/orchestration/#prefect.client.orchestration.PrefectClient), which is part of the [Prefect Python SDK](/3.0/api-ref/python/).
+- Create an instance of [`PrefectClient`](https://prefect-python-sdk-docs.netlify.app/prefect/client/orchestration/#prefect.client.orchestration.PrefectClient), which is part of the [Prefect Python SDK](/v3/api-ref/python/).
- Use your favorite Python HTTP library such as [Requests](https://requests.readthedocs.io/en/latest/) or [HTTPX](https://www.python-httpx.org/)
- Use an HTTP library in your language of choice
- Use [curl](https://curl.se/) from the command line
@@ -109,7 +109,7 @@ Note that in this example `--data-raw "{}"` is required and is where you can spe
## Finding your Prefect Cloud details
-When working with the Prefect Cloud REST API you will need your Account ID and often the Workspace ID for the [workspace](/3.0/manage/cloud/workspaces/) you want to interact with. You can find both IDs for a [Prefect profile](/3.0/manage/settings-and-profiles/) in the CLI with `prefect profile inspect my_profile`. This command will also display your [Prefect API key](/3.0/manage/cloud/manage-users/api-keys/), as shown below:
+When working with the Prefect Cloud REST API you will need your Account ID and often the Workspace ID for the [workspace](/v3/manage/cloud/workspaces/) you want to interact with. You can find both IDs for a [Prefect profile](/v3/develop/settings-and-profiles/) in the CLI with `prefect profile inspect my_profile`. This command will also display your [Prefect API key](/v3/manage/cloud/manage-users/api-keys/), as shown below:
```bash
PREFECT_API_URL='https://api.prefect.cloud/api/accounts/abc-my-account-id-is-here/workspaces/123-my-workspace-id-is-here'
diff --git a/docs/3.0/api-ref/rest-api/server/admin/clear-database.mdx b/docs/v3/api-ref/rest-api/server/admin/clear-database.mdx
similarity index 100%
rename from docs/3.0/api-ref/rest-api/server/admin/clear-database.mdx
rename to docs/v3/api-ref/rest-api/server/admin/clear-database.mdx
diff --git a/docs/3.0/api-ref/rest-api/server/admin/create-database.mdx b/docs/v3/api-ref/rest-api/server/admin/create-database.mdx
similarity index 100%
rename from docs/3.0/api-ref/rest-api/server/admin/create-database.mdx
rename to docs/v3/api-ref/rest-api/server/admin/create-database.mdx
diff --git a/docs/3.0/api-ref/rest-api/server/admin/drop-database.mdx b/docs/v3/api-ref/rest-api/server/admin/drop-database.mdx
similarity index 100%
rename from docs/3.0/api-ref/rest-api/server/admin/drop-database.mdx
rename to docs/v3/api-ref/rest-api/server/admin/drop-database.mdx
diff --git a/docs/3.0/api-ref/rest-api/server/admin/read-settings.mdx b/docs/v3/api-ref/rest-api/server/admin/read-settings.mdx
similarity index 100%
rename from docs/3.0/api-ref/rest-api/server/admin/read-settings.mdx
rename to docs/v3/api-ref/rest-api/server/admin/read-settings.mdx
diff --git a/docs/3.0/api-ref/rest-api/server/admin/read-version.mdx b/docs/v3/api-ref/rest-api/server/admin/read-version.mdx
similarity index 100%
rename from docs/3.0/api-ref/rest-api/server/admin/read-version.mdx
rename to docs/v3/api-ref/rest-api/server/admin/read-version.mdx
diff --git a/docs/3.0/api-ref/rest-api/server/artifacts/count-artifacts.mdx b/docs/v3/api-ref/rest-api/server/artifacts/count-artifacts.mdx
similarity index 100%
rename from docs/3.0/api-ref/rest-api/server/artifacts/count-artifacts.mdx
rename to docs/v3/api-ref/rest-api/server/artifacts/count-artifacts.mdx
diff --git a/docs/3.0/api-ref/rest-api/server/artifacts/count-latest-artifacts.mdx b/docs/v3/api-ref/rest-api/server/artifacts/count-latest-artifacts.mdx
similarity index 100%
rename from docs/3.0/api-ref/rest-api/server/artifacts/count-latest-artifacts.mdx
rename to docs/v3/api-ref/rest-api/server/artifacts/count-latest-artifacts.mdx
diff --git a/docs/3.0/api-ref/rest-api/server/artifacts/create-artifact.mdx b/docs/v3/api-ref/rest-api/server/artifacts/create-artifact.mdx
similarity index 100%
rename from docs/3.0/api-ref/rest-api/server/artifacts/create-artifact.mdx
rename to docs/v3/api-ref/rest-api/server/artifacts/create-artifact.mdx
diff --git a/docs/3.0/api-ref/rest-api/server/artifacts/delete-artifact.mdx b/docs/v3/api-ref/rest-api/server/artifacts/delete-artifact.mdx
similarity index 100%
rename from docs/3.0/api-ref/rest-api/server/artifacts/delete-artifact.mdx
rename to docs/v3/api-ref/rest-api/server/artifacts/delete-artifact.mdx
diff --git a/docs/3.0/api-ref/rest-api/server/artifacts/read-artifact.mdx b/docs/v3/api-ref/rest-api/server/artifacts/read-artifact.mdx
similarity index 100%
rename from docs/3.0/api-ref/rest-api/server/artifacts/read-artifact.mdx
rename to docs/v3/api-ref/rest-api/server/artifacts/read-artifact.mdx
diff --git a/docs/3.0/api-ref/rest-api/server/artifacts/read-artifacts.mdx b/docs/v3/api-ref/rest-api/server/artifacts/read-artifacts.mdx
similarity index 100%
rename from docs/3.0/api-ref/rest-api/server/artifacts/read-artifacts.mdx
rename to docs/v3/api-ref/rest-api/server/artifacts/read-artifacts.mdx
diff --git a/docs/3.0/api-ref/rest-api/server/artifacts/read-latest-artifact.mdx b/docs/v3/api-ref/rest-api/server/artifacts/read-latest-artifact.mdx
similarity index 100%
rename from docs/3.0/api-ref/rest-api/server/artifacts/read-latest-artifact.mdx
rename to docs/v3/api-ref/rest-api/server/artifacts/read-latest-artifact.mdx
diff --git a/docs/3.0/api-ref/rest-api/server/artifacts/read-latest-artifacts.mdx b/docs/v3/api-ref/rest-api/server/artifacts/read-latest-artifacts.mdx
similarity index 100%
rename from docs/3.0/api-ref/rest-api/server/artifacts/read-latest-artifacts.mdx
rename to docs/v3/api-ref/rest-api/server/artifacts/read-latest-artifacts.mdx
diff --git a/docs/3.0/api-ref/rest-api/server/artifacts/update-artifact.mdx b/docs/v3/api-ref/rest-api/server/artifacts/update-artifact.mdx
similarity index 100%
rename from docs/3.0/api-ref/rest-api/server/artifacts/update-artifact.mdx
rename to docs/v3/api-ref/rest-api/server/artifacts/update-artifact.mdx
diff --git a/docs/3.0/api-ref/rest-api/server/automations/count-automations.mdx b/docs/v3/api-ref/rest-api/server/automations/count-automations.mdx
similarity index 100%
rename from docs/3.0/api-ref/rest-api/server/automations/count-automations.mdx
rename to docs/v3/api-ref/rest-api/server/automations/count-automations.mdx
diff --git a/docs/3.0/api-ref/rest-api/server/automations/create-automation.mdx b/docs/v3/api-ref/rest-api/server/automations/create-automation.mdx
similarity index 100%
rename from docs/3.0/api-ref/rest-api/server/automations/create-automation.mdx
rename to docs/v3/api-ref/rest-api/server/automations/create-automation.mdx
diff --git a/docs/3.0/api-ref/rest-api/server/automations/delete-automation.mdx b/docs/v3/api-ref/rest-api/server/automations/delete-automation.mdx
similarity index 100%
rename from docs/3.0/api-ref/rest-api/server/automations/delete-automation.mdx
rename to docs/v3/api-ref/rest-api/server/automations/delete-automation.mdx
diff --git a/docs/3.0/api-ref/rest-api/server/automations/delete-automations-owned-by-resource.mdx b/docs/v3/api-ref/rest-api/server/automations/delete-automations-owned-by-resource.mdx
similarity index 100%
rename from docs/3.0/api-ref/rest-api/server/automations/delete-automations-owned-by-resource.mdx
rename to docs/v3/api-ref/rest-api/server/automations/delete-automations-owned-by-resource.mdx
diff --git a/docs/3.0/api-ref/rest-api/server/automations/patch-automation.mdx b/docs/v3/api-ref/rest-api/server/automations/patch-automation.mdx
similarity index 100%
rename from docs/3.0/api-ref/rest-api/server/automations/patch-automation.mdx
rename to docs/v3/api-ref/rest-api/server/automations/patch-automation.mdx
diff --git a/docs/3.0/api-ref/rest-api/server/automations/read-automation.mdx b/docs/v3/api-ref/rest-api/server/automations/read-automation.mdx
similarity index 100%
rename from docs/3.0/api-ref/rest-api/server/automations/read-automation.mdx
rename to docs/v3/api-ref/rest-api/server/automations/read-automation.mdx
diff --git a/docs/3.0/api-ref/rest-api/server/automations/read-automations-related-to-resource.mdx b/docs/v3/api-ref/rest-api/server/automations/read-automations-related-to-resource.mdx
similarity index 100%
rename from docs/3.0/api-ref/rest-api/server/automations/read-automations-related-to-resource.mdx
rename to docs/v3/api-ref/rest-api/server/automations/read-automations-related-to-resource.mdx
diff --git a/docs/3.0/api-ref/rest-api/server/automations/read-automations.mdx b/docs/v3/api-ref/rest-api/server/automations/read-automations.mdx
similarity index 100%
rename from docs/3.0/api-ref/rest-api/server/automations/read-automations.mdx
rename to docs/v3/api-ref/rest-api/server/automations/read-automations.mdx
diff --git a/docs/3.0/api-ref/rest-api/server/automations/update-automation.mdx b/docs/v3/api-ref/rest-api/server/automations/update-automation.mdx
similarity index 100%
rename from docs/3.0/api-ref/rest-api/server/automations/update-automation.mdx
rename to docs/v3/api-ref/rest-api/server/automations/update-automation.mdx
diff --git a/docs/3.0/api-ref/rest-api/server/automations/validate-template.mdx b/docs/v3/api-ref/rest-api/server/automations/validate-template.mdx
similarity index 100%
rename from docs/3.0/api-ref/rest-api/server/automations/validate-template.mdx
rename to docs/v3/api-ref/rest-api/server/automations/validate-template.mdx
diff --git a/docs/3.0/api-ref/rest-api/server/block-capabilities/read-available-block-capabilities.mdx b/docs/v3/api-ref/rest-api/server/block-capabilities/read-available-block-capabilities.mdx
similarity index 100%
rename from docs/3.0/api-ref/rest-api/server/block-capabilities/read-available-block-capabilities.mdx
rename to docs/v3/api-ref/rest-api/server/block-capabilities/read-available-block-capabilities.mdx
diff --git a/docs/3.0/api-ref/rest-api/server/block-documents/count-block-documents.mdx b/docs/v3/api-ref/rest-api/server/block-documents/count-block-documents.mdx
similarity index 100%
rename from docs/3.0/api-ref/rest-api/server/block-documents/count-block-documents.mdx
rename to docs/v3/api-ref/rest-api/server/block-documents/count-block-documents.mdx
diff --git a/docs/3.0/api-ref/rest-api/server/block-documents/create-block-document.mdx b/docs/v3/api-ref/rest-api/server/block-documents/create-block-document.mdx
similarity index 100%
rename from docs/3.0/api-ref/rest-api/server/block-documents/create-block-document.mdx
rename to docs/v3/api-ref/rest-api/server/block-documents/create-block-document.mdx
diff --git a/docs/3.0/api-ref/rest-api/server/block-documents/delete-block-document.mdx b/docs/v3/api-ref/rest-api/server/block-documents/delete-block-document.mdx
similarity index 100%
rename from docs/3.0/api-ref/rest-api/server/block-documents/delete-block-document.mdx
rename to docs/v3/api-ref/rest-api/server/block-documents/delete-block-document.mdx
diff --git a/docs/3.0/api-ref/rest-api/server/block-documents/read-block-document-by-id.mdx b/docs/v3/api-ref/rest-api/server/block-documents/read-block-document-by-id.mdx
similarity index 100%
rename from docs/3.0/api-ref/rest-api/server/block-documents/read-block-document-by-id.mdx
rename to docs/v3/api-ref/rest-api/server/block-documents/read-block-document-by-id.mdx
diff --git a/docs/3.0/api-ref/rest-api/server/block-documents/read-block-documents.mdx b/docs/v3/api-ref/rest-api/server/block-documents/read-block-documents.mdx
similarity index 100%
rename from docs/3.0/api-ref/rest-api/server/block-documents/read-block-documents.mdx
rename to docs/v3/api-ref/rest-api/server/block-documents/read-block-documents.mdx
diff --git a/docs/3.0/api-ref/rest-api/server/block-documents/update-block-document-data.mdx b/docs/v3/api-ref/rest-api/server/block-documents/update-block-document-data.mdx
similarity index 100%
rename from docs/3.0/api-ref/rest-api/server/block-documents/update-block-document-data.mdx
rename to docs/v3/api-ref/rest-api/server/block-documents/update-block-document-data.mdx
diff --git a/docs/3.0/api-ref/rest-api/server/block-schemas/create-block-schema.mdx b/docs/v3/api-ref/rest-api/server/block-schemas/create-block-schema.mdx
similarity index 100%
rename from docs/3.0/api-ref/rest-api/server/block-schemas/create-block-schema.mdx
rename to docs/v3/api-ref/rest-api/server/block-schemas/create-block-schema.mdx
diff --git a/docs/3.0/api-ref/rest-api/server/block-schemas/delete-block-schema.mdx b/docs/v3/api-ref/rest-api/server/block-schemas/delete-block-schema.mdx
similarity index 100%
rename from docs/3.0/api-ref/rest-api/server/block-schemas/delete-block-schema.mdx
rename to docs/v3/api-ref/rest-api/server/block-schemas/delete-block-schema.mdx
diff --git a/docs/3.0/api-ref/rest-api/server/block-schemas/read-block-schema-by-checksum.mdx b/docs/v3/api-ref/rest-api/server/block-schemas/read-block-schema-by-checksum.mdx
similarity index 100%
rename from docs/3.0/api-ref/rest-api/server/block-schemas/read-block-schema-by-checksum.mdx
rename to docs/v3/api-ref/rest-api/server/block-schemas/read-block-schema-by-checksum.mdx
diff --git a/docs/3.0/api-ref/rest-api/server/block-schemas/read-block-schema-by-id.mdx b/docs/v3/api-ref/rest-api/server/block-schemas/read-block-schema-by-id.mdx
similarity index 100%
rename from docs/3.0/api-ref/rest-api/server/block-schemas/read-block-schema-by-id.mdx
rename to docs/v3/api-ref/rest-api/server/block-schemas/read-block-schema-by-id.mdx
diff --git a/docs/3.0/api-ref/rest-api/server/block-schemas/read-block-schemas.mdx b/docs/v3/api-ref/rest-api/server/block-schemas/read-block-schemas.mdx
similarity index 100%
rename from docs/3.0/api-ref/rest-api/server/block-schemas/read-block-schemas.mdx
rename to docs/v3/api-ref/rest-api/server/block-schemas/read-block-schemas.mdx
diff --git a/docs/3.0/api-ref/rest-api/server/block-types/create-block-type.mdx b/docs/v3/api-ref/rest-api/server/block-types/create-block-type.mdx
similarity index 100%
rename from docs/3.0/api-ref/rest-api/server/block-types/create-block-type.mdx
rename to docs/v3/api-ref/rest-api/server/block-types/create-block-type.mdx
diff --git a/docs/3.0/api-ref/rest-api/server/block-types/delete-block-type.mdx b/docs/v3/api-ref/rest-api/server/block-types/delete-block-type.mdx
similarity index 100%
rename from docs/3.0/api-ref/rest-api/server/block-types/delete-block-type.mdx
rename to docs/v3/api-ref/rest-api/server/block-types/delete-block-type.mdx
diff --git a/docs/3.0/api-ref/rest-api/server/block-types/install-system-block-types.mdx b/docs/v3/api-ref/rest-api/server/block-types/install-system-block-types.mdx
similarity index 100%
rename from docs/3.0/api-ref/rest-api/server/block-types/install-system-block-types.mdx
rename to docs/v3/api-ref/rest-api/server/block-types/install-system-block-types.mdx
diff --git a/docs/3.0/api-ref/rest-api/server/block-types/read-block-document-by-name-for-block-type.mdx b/docs/v3/api-ref/rest-api/server/block-types/read-block-document-by-name-for-block-type.mdx
similarity index 100%
rename from docs/3.0/api-ref/rest-api/server/block-types/read-block-document-by-name-for-block-type.mdx
rename to docs/v3/api-ref/rest-api/server/block-types/read-block-document-by-name-for-block-type.mdx
diff --git a/docs/3.0/api-ref/rest-api/server/block-types/read-block-documents-for-block-type.mdx b/docs/v3/api-ref/rest-api/server/block-types/read-block-documents-for-block-type.mdx
similarity index 100%
rename from docs/3.0/api-ref/rest-api/server/block-types/read-block-documents-for-block-type.mdx
rename to docs/v3/api-ref/rest-api/server/block-types/read-block-documents-for-block-type.mdx
diff --git a/docs/3.0/api-ref/rest-api/server/block-types/read-block-type-by-id.mdx b/docs/v3/api-ref/rest-api/server/block-types/read-block-type-by-id.mdx
similarity index 100%
rename from docs/3.0/api-ref/rest-api/server/block-types/read-block-type-by-id.mdx
rename to docs/v3/api-ref/rest-api/server/block-types/read-block-type-by-id.mdx
diff --git a/docs/3.0/api-ref/rest-api/server/block-types/read-block-type-by-slug.mdx b/docs/v3/api-ref/rest-api/server/block-types/read-block-type-by-slug.mdx
similarity index 100%
rename from docs/3.0/api-ref/rest-api/server/block-types/read-block-type-by-slug.mdx
rename to docs/v3/api-ref/rest-api/server/block-types/read-block-type-by-slug.mdx
diff --git a/docs/3.0/api-ref/rest-api/server/block-types/read-block-types.mdx b/docs/v3/api-ref/rest-api/server/block-types/read-block-types.mdx
similarity index 100%
rename from docs/3.0/api-ref/rest-api/server/block-types/read-block-types.mdx
rename to docs/v3/api-ref/rest-api/server/block-types/read-block-types.mdx
diff --git a/docs/3.0/api-ref/rest-api/server/block-types/update-block-type.mdx b/docs/v3/api-ref/rest-api/server/block-types/update-block-type.mdx
similarity index 100%
rename from docs/3.0/api-ref/rest-api/server/block-types/update-block-type.mdx
rename to docs/v3/api-ref/rest-api/server/block-types/update-block-type.mdx
diff --git a/docs/3.0/api-ref/rest-api/server/collections/read-view-content.mdx b/docs/v3/api-ref/rest-api/server/collections/read-view-content.mdx
similarity index 100%
rename from docs/3.0/api-ref/rest-api/server/collections/read-view-content.mdx
rename to docs/v3/api-ref/rest-api/server/collections/read-view-content.mdx
diff --git a/docs/3.0/api-ref/rest-api/server/concurrency-limits-v2/bulk-decrement-active-slots.mdx b/docs/v3/api-ref/rest-api/server/concurrency-limits-v2/bulk-decrement-active-slots.mdx
similarity index 100%
rename from docs/3.0/api-ref/rest-api/server/concurrency-limits-v2/bulk-decrement-active-slots.mdx
rename to docs/v3/api-ref/rest-api/server/concurrency-limits-v2/bulk-decrement-active-slots.mdx
diff --git a/docs/3.0/api-ref/rest-api/server/concurrency-limits-v2/bulk-increment-active-slots.mdx b/docs/v3/api-ref/rest-api/server/concurrency-limits-v2/bulk-increment-active-slots.mdx
similarity index 100%
rename from docs/3.0/api-ref/rest-api/server/concurrency-limits-v2/bulk-increment-active-slots.mdx
rename to docs/v3/api-ref/rest-api/server/concurrency-limits-v2/bulk-increment-active-slots.mdx
diff --git a/docs/3.0/api-ref/rest-api/server/concurrency-limits-v2/create-concurrency-limit-v2.mdx b/docs/v3/api-ref/rest-api/server/concurrency-limits-v2/create-concurrency-limit-v2.mdx
similarity index 100%
rename from docs/3.0/api-ref/rest-api/server/concurrency-limits-v2/create-concurrency-limit-v2.mdx
rename to docs/v3/api-ref/rest-api/server/concurrency-limits-v2/create-concurrency-limit-v2.mdx
diff --git a/docs/3.0/api-ref/rest-api/server/concurrency-limits-v2/delete-concurrency-limit-v2.mdx b/docs/v3/api-ref/rest-api/server/concurrency-limits-v2/delete-concurrency-limit-v2.mdx
similarity index 100%
rename from docs/3.0/api-ref/rest-api/server/concurrency-limits-v2/delete-concurrency-limit-v2.mdx
rename to docs/v3/api-ref/rest-api/server/concurrency-limits-v2/delete-concurrency-limit-v2.mdx
diff --git a/docs/3.0/api-ref/rest-api/server/concurrency-limits-v2/read-all-concurrency-limits-v2.mdx b/docs/v3/api-ref/rest-api/server/concurrency-limits-v2/read-all-concurrency-limits-v2.mdx
similarity index 100%
rename from docs/3.0/api-ref/rest-api/server/concurrency-limits-v2/read-all-concurrency-limits-v2.mdx
rename to docs/v3/api-ref/rest-api/server/concurrency-limits-v2/read-all-concurrency-limits-v2.mdx
diff --git a/docs/3.0/api-ref/rest-api/server/concurrency-limits-v2/read-concurrency-limit-v2.mdx b/docs/v3/api-ref/rest-api/server/concurrency-limits-v2/read-concurrency-limit-v2.mdx
similarity index 100%
rename from docs/3.0/api-ref/rest-api/server/concurrency-limits-v2/read-concurrency-limit-v2.mdx
rename to docs/v3/api-ref/rest-api/server/concurrency-limits-v2/read-concurrency-limit-v2.mdx
diff --git a/docs/3.0/api-ref/rest-api/server/concurrency-limits-v2/update-concurrency-limit-v2.mdx b/docs/v3/api-ref/rest-api/server/concurrency-limits-v2/update-concurrency-limit-v2.mdx
similarity index 100%
rename from docs/3.0/api-ref/rest-api/server/concurrency-limits-v2/update-concurrency-limit-v2.mdx
rename to docs/v3/api-ref/rest-api/server/concurrency-limits-v2/update-concurrency-limit-v2.mdx
diff --git a/docs/3.0/api-ref/rest-api/server/concurrency-limits/create-concurrency-limit.mdx b/docs/v3/api-ref/rest-api/server/concurrency-limits/create-concurrency-limit.mdx
similarity index 100%
rename from docs/3.0/api-ref/rest-api/server/concurrency-limits/create-concurrency-limit.mdx
rename to docs/v3/api-ref/rest-api/server/concurrency-limits/create-concurrency-limit.mdx
diff --git a/docs/3.0/api-ref/rest-api/server/concurrency-limits/decrement-concurrency-limits-v1.mdx b/docs/v3/api-ref/rest-api/server/concurrency-limits/decrement-concurrency-limits-v1.mdx
similarity index 100%
rename from docs/3.0/api-ref/rest-api/server/concurrency-limits/decrement-concurrency-limits-v1.mdx
rename to docs/v3/api-ref/rest-api/server/concurrency-limits/decrement-concurrency-limits-v1.mdx
diff --git a/docs/3.0/api-ref/rest-api/server/concurrency-limits/delete-concurrency-limit-by-tag.mdx b/docs/v3/api-ref/rest-api/server/concurrency-limits/delete-concurrency-limit-by-tag.mdx
similarity index 100%
rename from docs/3.0/api-ref/rest-api/server/concurrency-limits/delete-concurrency-limit-by-tag.mdx
rename to docs/v3/api-ref/rest-api/server/concurrency-limits/delete-concurrency-limit-by-tag.mdx
diff --git a/docs/3.0/api-ref/rest-api/server/concurrency-limits/delete-concurrency-limit.mdx b/docs/v3/api-ref/rest-api/server/concurrency-limits/delete-concurrency-limit.mdx
similarity index 100%
rename from docs/3.0/api-ref/rest-api/server/concurrency-limits/delete-concurrency-limit.mdx
rename to docs/v3/api-ref/rest-api/server/concurrency-limits/delete-concurrency-limit.mdx
diff --git a/docs/3.0/api-ref/rest-api/server/concurrency-limits/increment-concurrency-limits-v1.mdx b/docs/v3/api-ref/rest-api/server/concurrency-limits/increment-concurrency-limits-v1.mdx
similarity index 100%
rename from docs/3.0/api-ref/rest-api/server/concurrency-limits/increment-concurrency-limits-v1.mdx
rename to docs/v3/api-ref/rest-api/server/concurrency-limits/increment-concurrency-limits-v1.mdx
diff --git a/docs/3.0/api-ref/rest-api/server/concurrency-limits/read-concurrency-limit-by-tag.mdx b/docs/v3/api-ref/rest-api/server/concurrency-limits/read-concurrency-limit-by-tag.mdx
similarity index 100%
rename from docs/3.0/api-ref/rest-api/server/concurrency-limits/read-concurrency-limit-by-tag.mdx
rename to docs/v3/api-ref/rest-api/server/concurrency-limits/read-concurrency-limit-by-tag.mdx
diff --git a/docs/3.0/api-ref/rest-api/server/concurrency-limits/read-concurrency-limit.mdx b/docs/v3/api-ref/rest-api/server/concurrency-limits/read-concurrency-limit.mdx
similarity index 100%
rename from docs/3.0/api-ref/rest-api/server/concurrency-limits/read-concurrency-limit.mdx
rename to docs/v3/api-ref/rest-api/server/concurrency-limits/read-concurrency-limit.mdx
diff --git a/docs/3.0/api-ref/rest-api/server/concurrency-limits/read-concurrency-limits.mdx b/docs/v3/api-ref/rest-api/server/concurrency-limits/read-concurrency-limits.mdx
similarity index 100%
rename from docs/3.0/api-ref/rest-api/server/concurrency-limits/read-concurrency-limits.mdx
rename to docs/v3/api-ref/rest-api/server/concurrency-limits/read-concurrency-limits.mdx
diff --git a/docs/3.0/api-ref/rest-api/server/concurrency-limits/reset-concurrency-limit-by-tag.mdx b/docs/v3/api-ref/rest-api/server/concurrency-limits/reset-concurrency-limit-by-tag.mdx
similarity index 100%
rename from docs/3.0/api-ref/rest-api/server/concurrency-limits/reset-concurrency-limit-by-tag.mdx
rename to docs/v3/api-ref/rest-api/server/concurrency-limits/reset-concurrency-limit-by-tag.mdx
diff --git a/docs/3.0/api-ref/rest-api/server/create-csrf-token.mdx b/docs/v3/api-ref/rest-api/server/create-csrf-token.mdx
similarity index 100%
rename from docs/3.0/api-ref/rest-api/server/create-csrf-token.mdx
rename to docs/v3/api-ref/rest-api/server/create-csrf-token.mdx
diff --git a/docs/3.0/api-ref/rest-api/server/deployments/count-deployments.mdx b/docs/v3/api-ref/rest-api/server/deployments/count-deployments.mdx
similarity index 100%
rename from docs/3.0/api-ref/rest-api/server/deployments/count-deployments.mdx
rename to docs/v3/api-ref/rest-api/server/deployments/count-deployments.mdx
diff --git a/docs/3.0/api-ref/rest-api/server/deployments/create-deployment-schedules.mdx b/docs/v3/api-ref/rest-api/server/deployments/create-deployment-schedules.mdx
similarity index 100%
rename from docs/3.0/api-ref/rest-api/server/deployments/create-deployment-schedules.mdx
rename to docs/v3/api-ref/rest-api/server/deployments/create-deployment-schedules.mdx
diff --git a/docs/3.0/api-ref/rest-api/server/deployments/create-deployment.mdx b/docs/v3/api-ref/rest-api/server/deployments/create-deployment.mdx
similarity index 100%
rename from docs/3.0/api-ref/rest-api/server/deployments/create-deployment.mdx
rename to docs/v3/api-ref/rest-api/server/deployments/create-deployment.mdx
diff --git a/docs/3.0/api-ref/rest-api/server/deployments/create-flow-run-from-deployment.mdx b/docs/v3/api-ref/rest-api/server/deployments/create-flow-run-from-deployment.mdx
similarity index 100%
rename from docs/3.0/api-ref/rest-api/server/deployments/create-flow-run-from-deployment.mdx
rename to docs/v3/api-ref/rest-api/server/deployments/create-flow-run-from-deployment.mdx
diff --git a/docs/3.0/api-ref/rest-api/server/deployments/delete-deployment-schedule.mdx b/docs/v3/api-ref/rest-api/server/deployments/delete-deployment-schedule.mdx
similarity index 100%
rename from docs/3.0/api-ref/rest-api/server/deployments/delete-deployment-schedule.mdx
rename to docs/v3/api-ref/rest-api/server/deployments/delete-deployment-schedule.mdx
diff --git a/docs/3.0/api-ref/rest-api/server/deployments/delete-deployment.mdx b/docs/v3/api-ref/rest-api/server/deployments/delete-deployment.mdx
similarity index 100%
rename from docs/3.0/api-ref/rest-api/server/deployments/delete-deployment.mdx
rename to docs/v3/api-ref/rest-api/server/deployments/delete-deployment.mdx
diff --git a/docs/3.0/api-ref/rest-api/server/deployments/get-scheduled-flow-runs-for-deployments.mdx b/docs/v3/api-ref/rest-api/server/deployments/get-scheduled-flow-runs-for-deployments.mdx
similarity index 100%
rename from docs/3.0/api-ref/rest-api/server/deployments/get-scheduled-flow-runs-for-deployments.mdx
rename to docs/v3/api-ref/rest-api/server/deployments/get-scheduled-flow-runs-for-deployments.mdx
diff --git a/docs/3.0/api-ref/rest-api/server/deployments/paginate-deployments.mdx b/docs/v3/api-ref/rest-api/server/deployments/paginate-deployments.mdx
similarity index 100%
rename from docs/3.0/api-ref/rest-api/server/deployments/paginate-deployments.mdx
rename to docs/v3/api-ref/rest-api/server/deployments/paginate-deployments.mdx
diff --git a/docs/3.0/api-ref/rest-api/server/deployments/pause-deployment-1.mdx b/docs/v3/api-ref/rest-api/server/deployments/pause-deployment-1.mdx
similarity index 100%
rename from docs/3.0/api-ref/rest-api/server/deployments/pause-deployment-1.mdx
rename to docs/v3/api-ref/rest-api/server/deployments/pause-deployment-1.mdx
diff --git a/docs/3.0/api-ref/rest-api/server/deployments/pause-deployment.mdx b/docs/v3/api-ref/rest-api/server/deployments/pause-deployment.mdx
similarity index 100%
rename from docs/3.0/api-ref/rest-api/server/deployments/pause-deployment.mdx
rename to docs/v3/api-ref/rest-api/server/deployments/pause-deployment.mdx
diff --git a/docs/3.0/api-ref/rest-api/server/deployments/read-deployment-by-name.mdx b/docs/v3/api-ref/rest-api/server/deployments/read-deployment-by-name.mdx
similarity index 100%
rename from docs/3.0/api-ref/rest-api/server/deployments/read-deployment-by-name.mdx
rename to docs/v3/api-ref/rest-api/server/deployments/read-deployment-by-name.mdx
diff --git a/docs/3.0/api-ref/rest-api/server/deployments/read-deployment-schedules.mdx b/docs/v3/api-ref/rest-api/server/deployments/read-deployment-schedules.mdx
similarity index 100%
rename from docs/3.0/api-ref/rest-api/server/deployments/read-deployment-schedules.mdx
rename to docs/v3/api-ref/rest-api/server/deployments/read-deployment-schedules.mdx
diff --git a/docs/3.0/api-ref/rest-api/server/deployments/read-deployment.mdx b/docs/v3/api-ref/rest-api/server/deployments/read-deployment.mdx
similarity index 100%
rename from docs/3.0/api-ref/rest-api/server/deployments/read-deployment.mdx
rename to docs/v3/api-ref/rest-api/server/deployments/read-deployment.mdx
diff --git a/docs/3.0/api-ref/rest-api/server/deployments/read-deployments.mdx b/docs/v3/api-ref/rest-api/server/deployments/read-deployments.mdx
similarity index 100%
rename from docs/3.0/api-ref/rest-api/server/deployments/read-deployments.mdx
rename to docs/v3/api-ref/rest-api/server/deployments/read-deployments.mdx
diff --git a/docs/3.0/api-ref/rest-api/server/deployments/resume-deployment-1.mdx b/docs/v3/api-ref/rest-api/server/deployments/resume-deployment-1.mdx
similarity index 100%
rename from docs/3.0/api-ref/rest-api/server/deployments/resume-deployment-1.mdx
rename to docs/v3/api-ref/rest-api/server/deployments/resume-deployment-1.mdx
diff --git a/docs/3.0/api-ref/rest-api/server/deployments/resume-deployment.mdx b/docs/v3/api-ref/rest-api/server/deployments/resume-deployment.mdx
similarity index 100%
rename from docs/3.0/api-ref/rest-api/server/deployments/resume-deployment.mdx
rename to docs/v3/api-ref/rest-api/server/deployments/resume-deployment.mdx
diff --git a/docs/3.0/api-ref/rest-api/server/deployments/schedule-deployment.mdx b/docs/v3/api-ref/rest-api/server/deployments/schedule-deployment.mdx
similarity index 100%
rename from docs/3.0/api-ref/rest-api/server/deployments/schedule-deployment.mdx
rename to docs/v3/api-ref/rest-api/server/deployments/schedule-deployment.mdx
diff --git a/docs/3.0/api-ref/rest-api/server/deployments/update-deployment-schedule.mdx b/docs/v3/api-ref/rest-api/server/deployments/update-deployment-schedule.mdx
similarity index 100%
rename from docs/3.0/api-ref/rest-api/server/deployments/update-deployment-schedule.mdx
rename to docs/v3/api-ref/rest-api/server/deployments/update-deployment-schedule.mdx
diff --git a/docs/3.0/api-ref/rest-api/server/deployments/update-deployment.mdx b/docs/v3/api-ref/rest-api/server/deployments/update-deployment.mdx
similarity index 100%
rename from docs/3.0/api-ref/rest-api/server/deployments/update-deployment.mdx
rename to docs/v3/api-ref/rest-api/server/deployments/update-deployment.mdx
diff --git a/docs/3.0/api-ref/rest-api/server/deployments/work-queue-check-for-deployment.mdx b/docs/v3/api-ref/rest-api/server/deployments/work-queue-check-for-deployment.mdx
similarity index 100%
rename from docs/3.0/api-ref/rest-api/server/deployments/work-queue-check-for-deployment.mdx
rename to docs/v3/api-ref/rest-api/server/deployments/work-queue-check-for-deployment.mdx
diff --git a/docs/3.0/api-ref/rest-api/server/events/count-account-events.mdx b/docs/v3/api-ref/rest-api/server/events/count-account-events.mdx
similarity index 100%
rename from docs/3.0/api-ref/rest-api/server/events/count-account-events.mdx
rename to docs/v3/api-ref/rest-api/server/events/count-account-events.mdx
diff --git a/docs/3.0/api-ref/rest-api/server/events/create-events.mdx b/docs/v3/api-ref/rest-api/server/events/create-events.mdx
similarity index 100%
rename from docs/3.0/api-ref/rest-api/server/events/create-events.mdx
rename to docs/v3/api-ref/rest-api/server/events/create-events.mdx
diff --git a/docs/3.0/api-ref/rest-api/server/events/read-account-events-page.mdx b/docs/v3/api-ref/rest-api/server/events/read-account-events-page.mdx
similarity index 100%
rename from docs/3.0/api-ref/rest-api/server/events/read-account-events-page.mdx
rename to docs/v3/api-ref/rest-api/server/events/read-account-events-page.mdx
diff --git a/docs/3.0/api-ref/rest-api/server/events/read-events.mdx b/docs/v3/api-ref/rest-api/server/events/read-events.mdx
similarity index 100%
rename from docs/3.0/api-ref/rest-api/server/events/read-events.mdx
rename to docs/v3/api-ref/rest-api/server/events/read-events.mdx
diff --git a/docs/3.0/api-ref/rest-api/server/flow-run-notification-policies/create-flow-run-notification-policy.mdx b/docs/v3/api-ref/rest-api/server/flow-run-notification-policies/create-flow-run-notification-policy.mdx
similarity index 100%
rename from docs/3.0/api-ref/rest-api/server/flow-run-notification-policies/create-flow-run-notification-policy.mdx
rename to docs/v3/api-ref/rest-api/server/flow-run-notification-policies/create-flow-run-notification-policy.mdx
diff --git a/docs/3.0/api-ref/rest-api/server/flow-run-notification-policies/delete-flow-run-notification-policy.mdx b/docs/v3/api-ref/rest-api/server/flow-run-notification-policies/delete-flow-run-notification-policy.mdx
similarity index 100%
rename from docs/3.0/api-ref/rest-api/server/flow-run-notification-policies/delete-flow-run-notification-policy.mdx
rename to docs/v3/api-ref/rest-api/server/flow-run-notification-policies/delete-flow-run-notification-policy.mdx
diff --git a/docs/3.0/api-ref/rest-api/server/flow-run-notification-policies/read-flow-run-notification-policies.mdx b/docs/v3/api-ref/rest-api/server/flow-run-notification-policies/read-flow-run-notification-policies.mdx
similarity index 100%
rename from docs/3.0/api-ref/rest-api/server/flow-run-notification-policies/read-flow-run-notification-policies.mdx
rename to docs/v3/api-ref/rest-api/server/flow-run-notification-policies/read-flow-run-notification-policies.mdx
diff --git a/docs/3.0/api-ref/rest-api/server/flow-run-notification-policies/read-flow-run-notification-policy.mdx b/docs/v3/api-ref/rest-api/server/flow-run-notification-policies/read-flow-run-notification-policy.mdx
similarity index 100%
rename from docs/3.0/api-ref/rest-api/server/flow-run-notification-policies/read-flow-run-notification-policy.mdx
rename to docs/v3/api-ref/rest-api/server/flow-run-notification-policies/read-flow-run-notification-policy.mdx
diff --git a/docs/3.0/api-ref/rest-api/server/flow-run-notification-policies/update-flow-run-notification-policy.mdx b/docs/v3/api-ref/rest-api/server/flow-run-notification-policies/update-flow-run-notification-policy.mdx
similarity index 100%
rename from docs/3.0/api-ref/rest-api/server/flow-run-notification-policies/update-flow-run-notification-policy.mdx
rename to docs/v3/api-ref/rest-api/server/flow-run-notification-policies/update-flow-run-notification-policy.mdx
diff --git a/docs/3.0/api-ref/rest-api/server/flow-run-states/read-flow-run-state.mdx b/docs/v3/api-ref/rest-api/server/flow-run-states/read-flow-run-state.mdx
similarity index 100%
rename from docs/3.0/api-ref/rest-api/server/flow-run-states/read-flow-run-state.mdx
rename to docs/v3/api-ref/rest-api/server/flow-run-states/read-flow-run-state.mdx
diff --git a/docs/3.0/api-ref/rest-api/server/flow-run-states/read-flow-run-states.mdx b/docs/v3/api-ref/rest-api/server/flow-run-states/read-flow-run-states.mdx
similarity index 100%
rename from docs/3.0/api-ref/rest-api/server/flow-run-states/read-flow-run-states.mdx
rename to docs/v3/api-ref/rest-api/server/flow-run-states/read-flow-run-states.mdx
diff --git a/docs/3.0/api-ref/rest-api/server/flow-runs/average-flow-run-lateness.mdx b/docs/v3/api-ref/rest-api/server/flow-runs/average-flow-run-lateness.mdx
similarity index 100%
rename from docs/3.0/api-ref/rest-api/server/flow-runs/average-flow-run-lateness.mdx
rename to docs/v3/api-ref/rest-api/server/flow-runs/average-flow-run-lateness.mdx
diff --git a/docs/3.0/api-ref/rest-api/server/flow-runs/count-flow-runs.mdx b/docs/v3/api-ref/rest-api/server/flow-runs/count-flow-runs.mdx
similarity index 100%
rename from docs/3.0/api-ref/rest-api/server/flow-runs/count-flow-runs.mdx
rename to docs/v3/api-ref/rest-api/server/flow-runs/count-flow-runs.mdx
diff --git a/docs/3.0/api-ref/rest-api/server/flow-runs/count-task-runs-by-flow-run.mdx b/docs/v3/api-ref/rest-api/server/flow-runs/count-task-runs-by-flow-run.mdx
similarity index 100%
rename from docs/3.0/api-ref/rest-api/server/flow-runs/count-task-runs-by-flow-run.mdx
rename to docs/v3/api-ref/rest-api/server/flow-runs/count-task-runs-by-flow-run.mdx
diff --git a/docs/3.0/api-ref/rest-api/server/flow-runs/create-flow-run-input.mdx b/docs/v3/api-ref/rest-api/server/flow-runs/create-flow-run-input.mdx
similarity index 100%
rename from docs/3.0/api-ref/rest-api/server/flow-runs/create-flow-run-input.mdx
rename to docs/v3/api-ref/rest-api/server/flow-runs/create-flow-run-input.mdx
diff --git a/docs/3.0/api-ref/rest-api/server/flow-runs/create-flow-run.mdx b/docs/v3/api-ref/rest-api/server/flow-runs/create-flow-run.mdx
similarity index 100%
rename from docs/3.0/api-ref/rest-api/server/flow-runs/create-flow-run.mdx
rename to docs/v3/api-ref/rest-api/server/flow-runs/create-flow-run.mdx
diff --git a/docs/3.0/api-ref/rest-api/server/flow-runs/delete-flow-run-input.mdx b/docs/v3/api-ref/rest-api/server/flow-runs/delete-flow-run-input.mdx
similarity index 100%
rename from docs/3.0/api-ref/rest-api/server/flow-runs/delete-flow-run-input.mdx
rename to docs/v3/api-ref/rest-api/server/flow-runs/delete-flow-run-input.mdx
diff --git a/docs/3.0/api-ref/rest-api/server/flow-runs/delete-flow-run.mdx b/docs/v3/api-ref/rest-api/server/flow-runs/delete-flow-run.mdx
similarity index 100%
rename from docs/3.0/api-ref/rest-api/server/flow-runs/delete-flow-run.mdx
rename to docs/v3/api-ref/rest-api/server/flow-runs/delete-flow-run.mdx
diff --git a/docs/3.0/api-ref/rest-api/server/flow-runs/download-logs.mdx b/docs/v3/api-ref/rest-api/server/flow-runs/download-logs.mdx
similarity index 100%
rename from docs/3.0/api-ref/rest-api/server/flow-runs/download-logs.mdx
rename to docs/v3/api-ref/rest-api/server/flow-runs/download-logs.mdx
diff --git a/docs/3.0/api-ref/rest-api/server/flow-runs/filter-flow-run-input.mdx b/docs/v3/api-ref/rest-api/server/flow-runs/filter-flow-run-input.mdx
similarity index 100%
rename from docs/3.0/api-ref/rest-api/server/flow-runs/filter-flow-run-input.mdx
rename to docs/v3/api-ref/rest-api/server/flow-runs/filter-flow-run-input.mdx
diff --git a/docs/3.0/api-ref/rest-api/server/flow-runs/flow-run-history.mdx b/docs/v3/api-ref/rest-api/server/flow-runs/flow-run-history.mdx
similarity index 100%
rename from docs/3.0/api-ref/rest-api/server/flow-runs/flow-run-history.mdx
rename to docs/v3/api-ref/rest-api/server/flow-runs/flow-run-history.mdx
diff --git a/docs/3.0/api-ref/rest-api/server/flow-runs/paginate-flow-runs.mdx b/docs/v3/api-ref/rest-api/server/flow-runs/paginate-flow-runs.mdx
similarity index 100%
rename from docs/3.0/api-ref/rest-api/server/flow-runs/paginate-flow-runs.mdx
rename to docs/v3/api-ref/rest-api/server/flow-runs/paginate-flow-runs.mdx
diff --git a/docs/3.0/api-ref/rest-api/server/flow-runs/read-flow-run-graph-v1.mdx b/docs/v3/api-ref/rest-api/server/flow-runs/read-flow-run-graph-v1.mdx
similarity index 100%
rename from docs/3.0/api-ref/rest-api/server/flow-runs/read-flow-run-graph-v1.mdx
rename to docs/v3/api-ref/rest-api/server/flow-runs/read-flow-run-graph-v1.mdx
diff --git a/docs/3.0/api-ref/rest-api/server/flow-runs/read-flow-run-graph-v2.mdx b/docs/v3/api-ref/rest-api/server/flow-runs/read-flow-run-graph-v2.mdx
similarity index 100%
rename from docs/3.0/api-ref/rest-api/server/flow-runs/read-flow-run-graph-v2.mdx
rename to docs/v3/api-ref/rest-api/server/flow-runs/read-flow-run-graph-v2.mdx
diff --git a/docs/3.0/api-ref/rest-api/server/flow-runs/read-flow-run-history.mdx b/docs/v3/api-ref/rest-api/server/flow-runs/read-flow-run-history.mdx
similarity index 100%
rename from docs/3.0/api-ref/rest-api/server/flow-runs/read-flow-run-history.mdx
rename to docs/v3/api-ref/rest-api/server/flow-runs/read-flow-run-history.mdx
diff --git a/docs/3.0/api-ref/rest-api/server/flow-runs/read-flow-run-input.mdx b/docs/v3/api-ref/rest-api/server/flow-runs/read-flow-run-input.mdx
similarity index 100%
rename from docs/3.0/api-ref/rest-api/server/flow-runs/read-flow-run-input.mdx
rename to docs/v3/api-ref/rest-api/server/flow-runs/read-flow-run-input.mdx
diff --git a/docs/3.0/api-ref/rest-api/server/flow-runs/read-flow-run.mdx b/docs/v3/api-ref/rest-api/server/flow-runs/read-flow-run.mdx
similarity index 100%
rename from docs/3.0/api-ref/rest-api/server/flow-runs/read-flow-run.mdx
rename to docs/v3/api-ref/rest-api/server/flow-runs/read-flow-run.mdx
diff --git a/docs/3.0/api-ref/rest-api/server/flow-runs/read-flow-runs.mdx b/docs/v3/api-ref/rest-api/server/flow-runs/read-flow-runs.mdx
similarity index 100%
rename from docs/3.0/api-ref/rest-api/server/flow-runs/read-flow-runs.mdx
rename to docs/v3/api-ref/rest-api/server/flow-runs/read-flow-runs.mdx
diff --git a/docs/3.0/api-ref/rest-api/server/flow-runs/resume-flow-run.mdx b/docs/v3/api-ref/rest-api/server/flow-runs/resume-flow-run.mdx
similarity index 100%
rename from docs/3.0/api-ref/rest-api/server/flow-runs/resume-flow-run.mdx
rename to docs/v3/api-ref/rest-api/server/flow-runs/resume-flow-run.mdx
diff --git a/docs/3.0/api-ref/rest-api/server/flow-runs/set-flow-run-state.mdx b/docs/v3/api-ref/rest-api/server/flow-runs/set-flow-run-state.mdx
similarity index 100%
rename from docs/3.0/api-ref/rest-api/server/flow-runs/set-flow-run-state.mdx
rename to docs/v3/api-ref/rest-api/server/flow-runs/set-flow-run-state.mdx
diff --git a/docs/3.0/api-ref/rest-api/server/flow-runs/update-flow-run.mdx b/docs/v3/api-ref/rest-api/server/flow-runs/update-flow-run.mdx
similarity index 100%
rename from docs/3.0/api-ref/rest-api/server/flow-runs/update-flow-run.mdx
rename to docs/v3/api-ref/rest-api/server/flow-runs/update-flow-run.mdx
diff --git a/docs/3.0/api-ref/rest-api/server/flows/count-deployments-by-flow.mdx b/docs/v3/api-ref/rest-api/server/flows/count-deployments-by-flow.mdx
similarity index 100%
rename from docs/3.0/api-ref/rest-api/server/flows/count-deployments-by-flow.mdx
rename to docs/v3/api-ref/rest-api/server/flows/count-deployments-by-flow.mdx
diff --git a/docs/3.0/api-ref/rest-api/server/flows/count-flows.mdx b/docs/v3/api-ref/rest-api/server/flows/count-flows.mdx
similarity index 100%
rename from docs/3.0/api-ref/rest-api/server/flows/count-flows.mdx
rename to docs/v3/api-ref/rest-api/server/flows/count-flows.mdx
diff --git a/docs/3.0/api-ref/rest-api/server/flows/create-flow.mdx b/docs/v3/api-ref/rest-api/server/flows/create-flow.mdx
similarity index 100%
rename from docs/3.0/api-ref/rest-api/server/flows/create-flow.mdx
rename to docs/v3/api-ref/rest-api/server/flows/create-flow.mdx
diff --git a/docs/3.0/api-ref/rest-api/server/flows/delete-flow.mdx b/docs/v3/api-ref/rest-api/server/flows/delete-flow.mdx
similarity index 100%
rename from docs/3.0/api-ref/rest-api/server/flows/delete-flow.mdx
rename to docs/v3/api-ref/rest-api/server/flows/delete-flow.mdx
diff --git a/docs/3.0/api-ref/rest-api/server/flows/next-runs-by-flow.mdx b/docs/v3/api-ref/rest-api/server/flows/next-runs-by-flow.mdx
similarity index 100%
rename from docs/3.0/api-ref/rest-api/server/flows/next-runs-by-flow.mdx
rename to docs/v3/api-ref/rest-api/server/flows/next-runs-by-flow.mdx
diff --git a/docs/3.0/api-ref/rest-api/server/flows/paginate-flows.mdx b/docs/v3/api-ref/rest-api/server/flows/paginate-flows.mdx
similarity index 100%
rename from docs/3.0/api-ref/rest-api/server/flows/paginate-flows.mdx
rename to docs/v3/api-ref/rest-api/server/flows/paginate-flows.mdx
diff --git a/docs/3.0/api-ref/rest-api/server/flows/read-flow-by-name.mdx b/docs/v3/api-ref/rest-api/server/flows/read-flow-by-name.mdx
similarity index 100%
rename from docs/3.0/api-ref/rest-api/server/flows/read-flow-by-name.mdx
rename to docs/v3/api-ref/rest-api/server/flows/read-flow-by-name.mdx
diff --git a/docs/3.0/api-ref/rest-api/server/flows/read-flow.mdx b/docs/v3/api-ref/rest-api/server/flows/read-flow.mdx
similarity index 100%
rename from docs/3.0/api-ref/rest-api/server/flows/read-flow.mdx
rename to docs/v3/api-ref/rest-api/server/flows/read-flow.mdx
diff --git a/docs/3.0/api-ref/rest-api/server/flows/read-flows.mdx b/docs/v3/api-ref/rest-api/server/flows/read-flows.mdx
similarity index 100%
rename from docs/3.0/api-ref/rest-api/server/flows/read-flows.mdx
rename to docs/v3/api-ref/rest-api/server/flows/read-flows.mdx
diff --git a/docs/3.0/api-ref/rest-api/server/flows/update-flow.mdx b/docs/v3/api-ref/rest-api/server/flows/update-flow.mdx
similarity index 100%
rename from docs/3.0/api-ref/rest-api/server/flows/update-flow.mdx
rename to docs/v3/api-ref/rest-api/server/flows/update-flow.mdx
diff --git a/docs/3.0/api-ref/rest-api/server/index.mdx b/docs/v3/api-ref/rest-api/server/index.mdx
similarity index 79%
rename from docs/3.0/api-ref/rest-api/server/index.mdx
rename to docs/v3/api-ref/rest-api/server/index.mdx
index a653cce0f770..2e1add7033bd 100644
--- a/docs/3.0/api-ref/rest-api/server/index.mdx
+++ b/docs/v3/api-ref/rest-api/server/index.mdx
@@ -6,4 +6,4 @@ description: The Prefect server API enables you to interact programmatically wit
The self-hosted Prefect server API is organized around REST.
Select links in the left navigation menu to explore.
-Learn about [self-hosting Prefect server](/3.0/manage/self-host/).
\ No newline at end of file
+Learn about [self-hosting Prefect server](/v3/manage/self-host/).
\ No newline at end of file
diff --git a/docs/3.0/api-ref/rest-api/server/logs/create-logs.mdx b/docs/v3/api-ref/rest-api/server/logs/create-logs.mdx
similarity index 100%
rename from docs/3.0/api-ref/rest-api/server/logs/create-logs.mdx
rename to docs/v3/api-ref/rest-api/server/logs/create-logs.mdx
diff --git a/docs/3.0/api-ref/rest-api/server/logs/read-logs.mdx b/docs/v3/api-ref/rest-api/server/logs/read-logs.mdx
similarity index 100%
rename from docs/3.0/api-ref/rest-api/server/logs/read-logs.mdx
rename to docs/v3/api-ref/rest-api/server/logs/read-logs.mdx
diff --git a/docs/3.0/api-ref/rest-api/server/root/health-check.mdx b/docs/v3/api-ref/rest-api/server/root/health-check.mdx
similarity index 100%
rename from docs/3.0/api-ref/rest-api/server/root/health-check.mdx
rename to docs/v3/api-ref/rest-api/server/root/health-check.mdx
diff --git a/docs/3.0/api-ref/rest-api/server/root/hello.mdx b/docs/v3/api-ref/rest-api/server/root/hello.mdx
similarity index 100%
rename from docs/3.0/api-ref/rest-api/server/root/hello.mdx
rename to docs/v3/api-ref/rest-api/server/root/hello.mdx
diff --git a/docs/3.0/api-ref/rest-api/server/root/perform-readiness-check.mdx b/docs/v3/api-ref/rest-api/server/root/perform-readiness-check.mdx
similarity index 100%
rename from docs/3.0/api-ref/rest-api/server/root/perform-readiness-check.mdx
rename to docs/v3/api-ref/rest-api/server/root/perform-readiness-check.mdx
diff --git a/docs/3.0/api-ref/rest-api/server/root/server-version.mdx b/docs/v3/api-ref/rest-api/server/root/server-version.mdx
similarity index 100%
rename from docs/3.0/api-ref/rest-api/server/root/server-version.mdx
rename to docs/v3/api-ref/rest-api/server/root/server-version.mdx
diff --git a/docs/3.0/api-ref/rest-api/server/savedsearches/create-saved-search.mdx b/docs/v3/api-ref/rest-api/server/savedsearches/create-saved-search.mdx
similarity index 100%
rename from docs/3.0/api-ref/rest-api/server/savedsearches/create-saved-search.mdx
rename to docs/v3/api-ref/rest-api/server/savedsearches/create-saved-search.mdx
diff --git a/docs/3.0/api-ref/rest-api/server/savedsearches/delete-saved-search.mdx b/docs/v3/api-ref/rest-api/server/savedsearches/delete-saved-search.mdx
similarity index 100%
rename from docs/3.0/api-ref/rest-api/server/savedsearches/delete-saved-search.mdx
rename to docs/v3/api-ref/rest-api/server/savedsearches/delete-saved-search.mdx
diff --git a/docs/3.0/api-ref/rest-api/server/savedsearches/read-saved-search.mdx b/docs/v3/api-ref/rest-api/server/savedsearches/read-saved-search.mdx
similarity index 100%
rename from docs/3.0/api-ref/rest-api/server/savedsearches/read-saved-search.mdx
rename to docs/v3/api-ref/rest-api/server/savedsearches/read-saved-search.mdx
diff --git a/docs/3.0/api-ref/rest-api/server/savedsearches/read-saved-searches.mdx b/docs/v3/api-ref/rest-api/server/savedsearches/read-saved-searches.mdx
similarity index 100%
rename from docs/3.0/api-ref/rest-api/server/savedsearches/read-saved-searches.mdx
rename to docs/v3/api-ref/rest-api/server/savedsearches/read-saved-searches.mdx
diff --git a/docs/3.0/api-ref/rest-api/server/schema.json b/docs/v3/api-ref/rest-api/server/schema.json
similarity index 99%
rename from docs/3.0/api-ref/rest-api/server/schema.json
rename to docs/v3/api-ref/rest-api/server/schema.json
index 749477368771..56d863334d11 100644
--- a/docs/3.0/api-ref/rest-api/server/schema.json
+++ b/docs/v3/api-ref/rest-api/server/schema.json
@@ -2,7 +2,7 @@
"openapi": "3.1.0",
"info": {
"title": "Prefect Prefect REST API",
- "version": "3.0",
+ "version": "v3",
"x-logo": {
"url": "static/prefect-logo-mark-gradient.png"
}
@@ -9049,6 +9049,9 @@
"additionalProperties": {
"type": "integer"
},
+ "propertyNames": {
+ "format": "uuid"
+ },
"title": "Response Count Deployments By Flow Ui Flows Count Deployments Post"
}
}
@@ -9114,6 +9117,9 @@
}
]
},
+ "propertyNames": {
+ "format": "uuid"
+ },
"title": "Response Next Runs By Flow Ui Flows Next Runs Post"
}
}
@@ -9228,6 +9234,9 @@
"additionalProperties": {
"type": "integer"
},
+ "propertyNames": {
+ "format": "uuid"
+ },
"title": "Response Count Task Runs By Flow Run Ui Flow Runs Count Task Runs Post"
}
}
@@ -9804,7 +9813,7 @@
"type": "null"
}
],
- "title": "Metadata ",
+ "title": "Metadata",
"description": "User-defined artifact metadata. Content must be string key and value pairs."
},
"flow_run_id": {
@@ -9928,7 +9937,7 @@
"type": "null"
}
],
- "title": "Metadata ",
+ "title": "Metadata",
"description": "User-defined artifact metadata. Content must be string key and value pairs."
},
"flow_run_id": {
@@ -10048,7 +10057,7 @@
"type": "null"
}
],
- "title": "Any ",
+ "title": "Any",
"description": "A list of flow run IDs to include"
}
},
@@ -10071,7 +10080,7 @@
"type": "null"
}
],
- "title": "Any ",
+ "title": "Any",
"description": "A list of artifact keys to include"
},
"like_": {
@@ -10083,7 +10092,7 @@
"type": "null"
}
],
- "title": "Like ",
+ "title": "Like",
"description": "A string to match artifact keys against. This can include SQL wildcard characters like `%` and `_`.",
"examples": [
"my-artifact-%"
@@ -10098,7 +10107,7 @@
"type": "null"
}
],
- "title": "Exists ",
+ "title": "Exists",
"description": "If `true`, only include artifacts with a non-null key. If `false`, only include artifacts with a null key. Should return all rows in the ArtifactCollection table if specified."
}
},
@@ -10122,7 +10131,7 @@
"type": "null"
}
],
- "title": "Any ",
+ "title": "Any",
"description": "A list of artifact ids to include"
}
},
@@ -10146,7 +10155,7 @@
"type": "null"
}
],
- "title": "Any ",
+ "title": "Any",
"description": "A list of task run IDs to include"
}
},
@@ -10169,7 +10178,7 @@
"type": "null"
}
],
- "title": "Any ",
+ "title": "Any",
"description": "A list of artifact types to include"
},
"not_any_": {
@@ -10184,7 +10193,7 @@
"type": "null"
}
],
- "title": "Not Any ",
+ "title": "Not Any",
"description": "A list of artifact types to exclude"
}
},
@@ -10268,7 +10277,7 @@
"type": "null"
}
],
- "title": "Metadata ",
+ "title": "Metadata",
"description": "User-defined artifact metadata. Content must be string key and value pairs."
},
"flow_run_id": {
@@ -10386,7 +10395,7 @@
"type": "null"
}
],
- "title": "Any ",
+ "title": "Any",
"description": "A list of flow run IDs to include"
}
},
@@ -10410,7 +10419,7 @@
"type": "null"
}
],
- "title": "Any ",
+ "title": "Any",
"description": "A list of artifact ids to include"
}
},
@@ -10433,7 +10442,7 @@
"type": "null"
}
],
- "title": "Any ",
+ "title": "Any",
"description": "A list of artifact keys to include"
},
"like_": {
@@ -10445,7 +10454,7 @@
"type": "null"
}
],
- "title": "Like ",
+ "title": "Like",
"description": "A string to match artifact keys against. This can include SQL wildcard characters like `%` and `_`.",
"examples": [
"my-artifact-%"
@@ -10460,7 +10469,7 @@
"type": "null"
}
],
- "title": "Exists ",
+ "title": "Exists",
"description": "If `true`, only include artifacts with a non-null key. If `false`, only include artifacts with a null key."
}
},
@@ -10484,7 +10493,7 @@
"type": "null"
}
],
- "title": "Any ",
+ "title": "Any",
"description": "A list of task run IDs to include"
}
},
@@ -10507,7 +10516,7 @@
"type": "null"
}
],
- "title": "Any ",
+ "title": "Any",
"description": "A list of artifact types to include"
},
"not_any_": {
@@ -10522,7 +10531,7 @@
"type": "null"
}
],
- "title": "Not Any ",
+ "title": "Not Any",
"description": "A list of artifact types to exclude"
}
},
@@ -10580,7 +10589,7 @@
"type": "null"
}
],
- "title": "Metadata "
+ "title": "Metadata"
}
},
"additionalProperties": false,
@@ -11104,7 +11113,7 @@
"type": "null"
}
],
- "title": "Before ",
+ "title": "Before",
"description": "Only include automations created before this datetime"
}
},
@@ -11127,7 +11136,7 @@
"type": "null"
}
],
- "title": "Any ",
+ "title": "Any",
"description": "Only include automations with names that match any of these strings"
}
},
@@ -11615,7 +11624,7 @@
"type": "null"
}
],
- "title": "Any ",
+ "title": "Any",
"description": "A list of block type ids to include"
}
},
@@ -11639,7 +11648,7 @@
"type": "null"
}
],
- "title": "Any ",
+ "title": "Any",
"description": "A list of block ids to include"
}
},
@@ -11659,7 +11668,7 @@
"type": "null"
}
],
- "title": "Eq ",
+ "title": "Eq",
"description": "Filter block documents for only those that are or are not anonymous."
}
},
@@ -11682,7 +11691,7 @@
"type": "null"
}
],
- "title": "Any ",
+ "title": "Any",
"description": "A list of block names to include"
},
"like_": {
@@ -11694,7 +11703,7 @@
"type": "null"
}
],
- "title": "Like ",
+ "title": "Like",
"description": "A string to match block names against. This can include SQL wildcard characters like `%` and `_`.",
"examples": [
"my-block%"
@@ -11943,7 +11952,7 @@
"type": "null"
}
],
- "title": "Any ",
+ "title": "Any",
"description": "A list of block type ids to include"
}
},
@@ -11966,7 +11975,7 @@
"type": "null"
}
],
- "title": "All ",
+ "title": "All",
"description": "A list of block capabilities. Block entities will be returned only if an associated block schema has a superset of the defined capabilities.",
"examples": [
[
@@ -11996,7 +12005,7 @@
"type": "null"
}
],
- "title": "Any ",
+ "title": "Any",
"description": "A list of IDs to include"
}
},
@@ -12019,7 +12028,7 @@
"type": "null"
}
],
- "title": "Any ",
+ "title": "Any",
"description": "A list of block schema versions.",
"examples": [
[
@@ -12251,7 +12260,7 @@
"type": "null"
}
],
- "title": "Like ",
+ "title": "Like",
"description": "A case-insensitive partial match. For example, passing 'marvin' will match 'marvin', 'sad-Marvin', and 'marvin-robot'.",
"examples": [
"marvin"
@@ -12277,7 +12286,7 @@
"type": "null"
}
],
- "title": "Any ",
+ "title": "Any",
"description": "A list of slugs to match"
}
},
@@ -14289,9 +14298,6 @@
"properties": {
"type": {
"type": "string",
- "enum": [
- "call-webhook"
- ],
"const": "call-webhook",
"title": "Type",
"default": "call-webhook"
@@ -14320,9 +14326,6 @@
"properties": {
"type": {
"type": "string",
- "enum": [
- "cancel-flow-run"
- ],
"const": "cancel-flow-run",
"title": "Type",
"default": "cancel-flow-run"
@@ -14336,9 +14339,6 @@
"properties": {
"type": {
"type": "string",
- "enum": [
- "change-flow-run-state"
- ],
"const": "change-flow-run-state",
"title": "Type",
"default": "change-flow-run-state"
@@ -14383,9 +14383,6 @@
"properties": {
"type": {
"type": "string",
- "enum": [
- "compound"
- ],
"const": "compound",
"title": "Type",
"default": "compound"
@@ -14453,9 +14450,6 @@
"properties": {
"type": {
"type": "string",
- "enum": [
- "compound"
- ],
"const": "compound",
"title": "Type",
"default": "compound"
@@ -14837,9 +14831,6 @@
"properties": {
"input_type": {
"type": "string",
- "enum": [
- "constant"
- ],
"const": "constant",
"title": "Input Type",
"default": "constant"
@@ -15476,7 +15467,7 @@
"type": "null"
}
],
- "title": "Ge ",
+ "title": "Ge",
"description": "Only include deployments with a concurrency limit greater than or equal to this value"
},
"le_": {
@@ -15488,7 +15479,7 @@
"type": "null"
}
],
- "title": "Le ",
+ "title": "Le",
"description": "Only include deployments with a concurrency limit less than or equal to this value"
},
"is_null_": {
@@ -15500,7 +15491,7 @@
"type": "null"
}
],
- "title": "Is Null ",
+ "title": "Is Null",
"description": "If true, only include deployments without a concurrency limit"
}
},
@@ -15524,7 +15515,7 @@
"type": "null"
}
],
- "title": "Any ",
+ "title": "Any",
"description": "A list of deployment ids to include"
}
},
@@ -15547,7 +15538,7 @@
"type": "null"
}
],
- "title": "Any ",
+ "title": "Any",
"description": "A list of deployment names to include",
"examples": [
[
@@ -15565,7 +15556,7 @@
"type": "null"
}
],
- "title": "Like ",
+ "title": "Like",
"description": "A case-insensitive partial match. For example, passing 'marvin' will match 'marvin', 'sad-Marvin', and 'marvin-robot'.",
"examples": [
"marvin"
@@ -15588,7 +15579,7 @@
"type": "null"
}
],
- "title": "Eq ",
+ "title": "Eq",
"description": "Only returns where deployment is/is not paused"
}
},
@@ -15616,7 +15607,7 @@
"type": "null"
}
],
- "title": "All ",
+ "title": "All",
"description": "A list of tags. Deployments will be returned only if their tags are a superset of the list",
"examples": [
[
@@ -15634,7 +15625,7 @@
"type": "null"
}
],
- "title": "Is Null ",
+ "title": "Is Null",
"description": "If true, only include deployments without tags"
}
},
@@ -15657,7 +15648,7 @@
"type": "null"
}
],
- "title": "Any ",
+ "title": "Any",
"description": "A list of work queue names to include",
"examples": [
[
@@ -15806,7 +15797,7 @@
"type": "null"
}
],
- "title": "Like ",
+ "title": "Like",
"description": "A case-insensitive partial match on deployment or flow names. For example, passing 'example' might match deployments or flows with 'example' in their names."
}
},
@@ -16545,9 +16536,6 @@
"properties": {
"type": {
"type": "string",
- "enum": [
- "do-nothing"
- ],
"const": "do-nothing",
"title": "Type",
"default": "do-nothing"
@@ -16919,9 +16907,7 @@
"next_page": {
"anyOf": [
{
- "type": "string",
- "minLength": 1,
- "format": "uri"
+ "type": "string"
},
{
"type": "null"
@@ -17071,9 +17057,6 @@
"properties": {
"type": {
"type": "string",
- "enum": [
- "event"
- ],
"const": "event",
"title": "Type",
"default": "event"
@@ -17317,7 +17300,7 @@
"type": "null"
}
],
- "title": "Is Null ",
+ "title": "Is Null",
"description": "If true, only include flows without deployments"
}
},
@@ -17341,7 +17324,7 @@
"type": "null"
}
],
- "title": "Any ",
+ "title": "Any",
"description": "A list of flow ids to include"
}
},
@@ -17364,7 +17347,7 @@
"type": "null"
}
],
- "title": "Any ",
+ "title": "Any",
"description": "A list of flow names to include",
"examples": [
[
@@ -17382,7 +17365,7 @@
"type": "null"
}
],
- "title": "Like ",
+ "title": "Like",
"description": "A case-insensitive partial match. For example, passing 'marvin' will match 'marvin', 'sad-Marvin', and 'marvin-robot'.",
"examples": [
"marvin"
@@ -17413,7 +17396,7 @@
"type": "null"
}
],
- "title": "All ",
+ "title": "All",
"description": "A list of tags. Flows will be returned only if their tags are a superset of the list",
"examples": [
[
@@ -17431,7 +17414,7 @@
"type": "null"
}
],
- "title": "Is Null ",
+ "title": "Is Null",
"description": "If true, only include flows without tags"
}
},
@@ -18143,7 +18126,7 @@
"type": "null"
}
],
- "title": "Any ",
+ "title": "Any",
"description": "A list of flow run deployment ids to include"
},
"is_null_": {
@@ -18155,7 +18138,7 @@
"type": "null"
}
],
- "title": "Is Null ",
+ "title": "Is Null",
"description": "If true, only include flow runs without deployment ids"
}
},
@@ -18176,7 +18159,7 @@
"type": "null"
}
],
- "title": "Before ",
+ "title": "Before",
"description": "Only include flow runs ending at or before this time"
},
"after_": {
@@ -18189,7 +18172,7 @@
"type": "null"
}
],
- "title": "After ",
+ "title": "After",
"description": "Only include flow runs ending at or after this time"
},
"is_null_": {
@@ -18201,7 +18184,7 @@
"type": "null"
}
],
- "title": "Is Null ",
+ "title": "Is Null",
"description": "If true, only return flow runs without an end time"
}
},
@@ -18222,7 +18205,7 @@
"type": "null"
}
],
- "title": "Before ",
+ "title": "Before",
"description": "Only include flow runs scheduled to start at or before this time"
},
"after_": {
@@ -18235,7 +18218,7 @@
"type": "null"
}
],
- "title": "After ",
+ "title": "After",
"description": "Only include flow runs scheduled to start at or after this time"
}
},
@@ -18258,7 +18241,7 @@
"type": "null"
}
],
- "title": "Any ",
+ "title": "Any",
"description": "A list of flow run flow_versions to include"
}
},
@@ -18282,7 +18265,7 @@
"type": "null"
}
],
- "title": "Any ",
+ "title": "Any",
"description": "A list of flow run ids to include"
},
"not_any_": {
@@ -18298,7 +18281,7 @@
"type": "null"
}
],
- "title": "Not Any ",
+ "title": "Not Any",
"description": "A list of flow run ids to exclude"
}
},
@@ -18321,7 +18304,7 @@
"type": "null"
}
],
- "title": "Any ",
+ "title": "Any",
"description": "A list of flow run idempotency keys to include"
},
"not_any_": {
@@ -18336,7 +18319,7 @@
"type": "null"
}
],
- "title": "Not Any ",
+ "title": "Not Any",
"description": "A list of flow run idempotency keys to exclude"
}
},
@@ -18359,7 +18342,7 @@
"type": "null"
}
],
- "title": "Any ",
+ "title": "Any",
"description": "A list of flow run names to include",
"examples": [
[
@@ -18377,7 +18360,7 @@
"type": "null"
}
],
- "title": "Like ",
+ "title": "Like",
"description": "A case-insensitive partial match. For example, passing 'marvin' will match 'marvin', 'sad-Marvin', and 'marvin-robot'.",
"examples": [
"marvin"
@@ -18401,7 +18384,7 @@
"type": "null"
}
],
- "title": "Before ",
+ "title": "Before",
"description": "Only include flow runs with a next_scheduled_start_time or before this time"
},
"after_": {
@@ -18414,7 +18397,7 @@
"type": "null"
}
],
- "title": "After ",
+ "title": "After",
"description": "Only include flow runs with a next_scheduled_start_time at or after this time"
}
},
@@ -18443,7 +18426,7 @@
"type": "null"
}
],
- "title": "Any ",
+ "title": "Any",
"description": "A list of parent flow run ids to include"
}
},
@@ -18472,7 +18455,7 @@
"type": "null"
}
],
- "title": "Any ",
+ "title": "Any",
"description": "A list of flow run parent_task_run_ids to include"
},
"is_null_": {
@@ -18484,7 +18467,7 @@
"type": "null"
}
],
- "title": "Is Null ",
+ "title": "Is Null",
"description": "If true, only include flow runs without parent_task_run_id"
}
},
@@ -18505,7 +18488,7 @@
"type": "null"
}
],
- "title": "Before ",
+ "title": "Before",
"description": "Only include flow runs starting at or before this time"
},
"after_": {
@@ -18518,7 +18501,7 @@
"type": "null"
}
],
- "title": "After ",
+ "title": "After",
"description": "Only include flow runs starting at or after this time"
},
"is_null_": {
@@ -18530,7 +18513,7 @@
"type": "null"
}
],
- "title": "Is Null ",
+ "title": "Is Null",
"description": "If true, only return flow runs without a start time"
}
},
@@ -18588,7 +18571,7 @@
"type": "null"
}
],
- "title": "Any ",
+ "title": "Any",
"description": "A list of flow run state names to include"
},
"not_any_": {
@@ -18603,7 +18586,7 @@
"type": "null"
}
],
- "title": "Not Any ",
+ "title": "Not Any",
"description": "A list of flow run state names to exclude"
}
},
@@ -18626,7 +18609,7 @@
"type": "null"
}
],
- "title": "Any ",
+ "title": "Any",
"description": "A list of flow run state types to include"
},
"not_any_": {
@@ -18641,7 +18624,7 @@
"type": "null"
}
],
- "title": "Not Any ",
+ "title": "Not Any",
"description": "A list of flow run state types to exclude"
}
},
@@ -18669,7 +18652,7 @@
"type": "null"
}
],
- "title": "All ",
+ "title": "All",
"description": "A list of tags. Flow runs will be returned only if their tags are a superset of the list",
"examples": [
[
@@ -18687,7 +18670,7 @@
"type": "null"
}
],
- "title": "Is Null ",
+ "title": "Is Null",
"description": "If true, only include flow runs without tags"
}
},
@@ -18715,7 +18698,7 @@
"type": "null"
}
],
- "title": "Any ",
+ "title": "Any",
"description": "A list of work queue names to include",
"examples": [
[
@@ -18733,7 +18716,7 @@
"type": "null"
}
],
- "title": "Is Null ",
+ "title": "Is Null",
"description": "If true, only include flow runs without work queue names"
}
},
@@ -18983,7 +18966,7 @@
"type": "null"
}
],
- "title": "Eq ",
+ "title": "Eq",
"description": "Filter notification policies for only those that are or are not active."
}
},
@@ -19164,6 +19147,22 @@
"title": "Resuming",
"description": "Indicates if this run is resuming from a pause.",
"default": false
+ },
+ "retry_type": {
+ "anyOf": [
+ {
+ "type": "string",
+ "enum": [
+ "in_process",
+ "reschedule"
+ ]
+ },
+ {
+ "type": "null"
+ }
+ ],
+ "title": "Retry Type",
+ "description": "The type of retry this run is undergoing."
}
},
"type": "object",
@@ -20227,7 +20226,7 @@
"type": "null"
}
],
- "title": "Any ",
+ "title": "Any",
"description": "A list of flow run IDs to include"
}
},
@@ -20247,7 +20246,7 @@
"type": "null"
}
],
- "title": "Ge ",
+ "title": "Ge",
"description": "Include logs with a level greater than or equal to this level",
"examples": [
20
@@ -20262,7 +20261,7 @@
"type": "null"
}
],
- "title": "Le ",
+ "title": "Le",
"description": "Include logs with a level less than or equal to this level",
"examples": [
50
@@ -20289,7 +20288,7 @@
"type": "null"
}
],
- "title": "Any ",
+ "title": "Any",
"description": "A list of task run IDs to include"
},
"is_null_": {
@@ -20301,7 +20300,7 @@
"type": "null"
}
],
- "title": "Is Null ",
+ "title": "Is Null",
"description": "If true, only include logs without a task run id"
}
},
@@ -20322,7 +20321,7 @@
"type": "null"
}
],
- "title": "Before ",
+ "title": "Before",
"description": "Only include logs with a timestamp at or before this time"
},
"after_": {
@@ -20335,7 +20334,7 @@
"type": "null"
}
],
- "title": "After ",
+ "title": "After",
"description": "Only include logs with a timestamp at or after this time"
}
},
@@ -20515,9 +20514,6 @@
"properties": {
"input_type": {
"type": "string",
- "enum": [
- "parameter"
- ],
"const": "parameter",
"title": "Input Type",
"default": "parameter"
@@ -20538,9 +20534,6 @@
"properties": {
"type": {
"type": "string",
- "enum": [
- "pause-automation"
- ],
"const": "pause-automation",
"title": "Type",
"default": "pause-automation"
@@ -20577,9 +20570,6 @@
"properties": {
"type": {
"type": "string",
- "enum": [
- "pause-deployment"
- ],
"const": "pause-deployment",
"title": "Type",
"default": "pause-deployment"
@@ -20616,9 +20606,6 @@
"properties": {
"type": {
"type": "string",
- "enum": [
- "pause-work-pool"
- ],
"const": "pause-work-pool",
"title": "Type",
"default": "pause-work-pool"
@@ -20655,9 +20642,6 @@
"properties": {
"type": {
"type": "string",
- "enum": [
- "pause-work-queue"
- ],
"const": "pause-work-queue",
"title": "Type",
"default": "pause-work-queue"
@@ -20863,9 +20847,6 @@
"properties": {
"type": {
"type": "string",
- "enum": [
- "resume-automation"
- ],
"const": "resume-automation",
"title": "Type",
"default": "resume-automation"
@@ -20902,9 +20883,6 @@
"properties": {
"type": {
"type": "string",
- "enum": [
- "resume-deployment"
- ],
"const": "resume-deployment",
"title": "Type",
"default": "resume-deployment"
@@ -20941,9 +20919,6 @@
"properties": {
"type": {
"type": "string",
- "enum": [
- "resume-flow-run"
- ],
"const": "resume-flow-run",
"title": "Type",
"default": "resume-flow-run"
@@ -20957,9 +20932,6 @@
"properties": {
"type": {
"type": "string",
- "enum": [
- "resume-work-pool"
- ],
"const": "resume-work-pool",
"title": "Type",
"default": "resume-work-pool"
@@ -20996,9 +20968,6 @@
"properties": {
"type": {
"type": "string",
- "enum": [
- "resume-work-queue"
- ],
"const": "resume-work-queue",
"title": "Type",
"default": "resume-work-queue"
@@ -21035,9 +21004,6 @@
"properties": {
"type": {
"type": "string",
- "enum": [
- "run-deployment"
- ],
"const": "run-deployment",
"title": "Type",
"default": "run-deployment"
@@ -21212,9 +21178,6 @@
"properties": {
"type": {
"type": "string",
- "enum": [
- "send-notification"
- ],
"const": "send-notification",
"title": "Type",
"default": "send-notification"
@@ -21248,9 +21211,6 @@
"properties": {
"type": {
"type": "string",
- "enum": [
- "sequence"
- ],
"const": "sequence",
"title": "Type",
"default": "sequence"
@@ -21302,9 +21262,6 @@
"properties": {
"type": {
"type": "string",
- "enum": [
- "sequence"
- ],
"const": "sequence",
"title": "Type",
"default": "sequence"
@@ -21363,10 +21320,7 @@
"title": "SetStateStatus",
"description": "Enumerates return statuses for setting run states."
},
- "Settings": {
- "title": "Settings",
- "description": "Settings for Prefect using Pydantic settings.\n\nSee https://docs.pydantic.dev/latest/concepts/pydantic_settings"
- },
+ "Settings": {},
"SimpleFlowRun": {
"properties": {
"id": {
@@ -21517,9 +21471,6 @@
"properties": {
"type": {
"type": "string",
- "enum": [
- "abort_details"
- ],
"const": "abort_details",
"title": "Type",
"description": "The type of state transition detail. Used to ensure pydantic does not coerce into a different type.",
@@ -21546,9 +21497,6 @@
"properties": {
"type": {
"type": "string",
- "enum": [
- "accept_details"
- ],
"const": "accept_details",
"title": "Type",
"description": "The type of state transition detail. Used to ensure pydantic does not coerce into a different type.",
@@ -21798,9 +21746,6 @@
"properties": {
"type": {
"type": "string",
- "enum": [
- "reject_details"
- ],
"const": "reject_details",
"title": "Type",
"description": "The type of state transition detail. Used to ensure pydantic does not coerce into a different type.",
@@ -21843,9 +21788,6 @@
"properties": {
"type": {
"type": "string",
- "enum": [
- "wait_details"
- ],
"const": "wait_details",
"title": "Type",
"description": "The type of state transition detail. Used to ensure pydantic does not coerce into a different type.",
@@ -21880,9 +21822,6 @@
"properties": {
"type": {
"type": "string",
- "enum": [
- "suspend-flow-run"
- ],
"const": "suspend-flow-run",
"title": "Type",
"default": "suspend-flow-run"
@@ -22167,8 +22106,7 @@
"description": "An ORM representation of task run data."
},
"TaskRunCount": {
- "type": "object",
- "title": "TaskRunCount"
+ "type": "object"
},
"TaskRunCreate": {
"properties": {
@@ -22424,7 +22362,7 @@
"type": "null"
}
],
- "title": "Before ",
+ "title": "Before",
"description": "Only include task runs expected to start at or before this time"
},
"after_": {
@@ -22437,7 +22375,7 @@
"type": "null"
}
],
- "title": "After ",
+ "title": "After",
"description": "Only include task runs expected to start at or after this time"
}
},
@@ -22466,7 +22404,7 @@
"type": "null"
}
],
- "title": "Any ",
+ "title": "Any",
"description": "A list of task run flow run ids to include"
},
"is_null_": {
@@ -22478,7 +22416,7 @@
"type": "null"
}
],
- "title": "Is Null ",
+ "title": "Is Null",
"description": "Filter for task runs with None as their flow run id",
"default": false
}
@@ -22503,7 +22441,7 @@
"type": "null"
}
],
- "title": "Any ",
+ "title": "Any",
"description": "A list of task run ids to include"
}
},
@@ -22526,7 +22464,7 @@
"type": "null"
}
],
- "title": "Any ",
+ "title": "Any",
"description": "A list of task run names to include",
"examples": [
[
@@ -22544,7 +22482,7 @@
"type": "null"
}
],
- "title": "Like ",
+ "title": "Like",
"description": "A case-insensitive partial match. For example, passing 'marvin' will match 'marvin', 'sad-Marvin', and 'marvin-robot'.",
"examples": [
"marvin"
@@ -22568,7 +22506,7 @@
"type": "null"
}
],
- "title": "Before ",
+ "title": "Before",
"description": "Only include task runs starting at or before this time"
},
"after_": {
@@ -22581,7 +22519,7 @@
"type": "null"
}
],
- "title": "After ",
+ "title": "After",
"description": "Only include task runs starting at or after this time"
},
"is_null_": {
@@ -22593,7 +22531,7 @@
"type": "null"
}
],
- "title": "Is Null ",
+ "title": "Is Null",
"description": "If true, only return task runs without a start time"
}
},
@@ -22651,7 +22589,7 @@
"type": "null"
}
],
- "title": "Any ",
+ "title": "Any",
"description": "A list of task run state names to include"
}
},
@@ -22674,7 +22612,7 @@
"type": "null"
}
],
- "title": "Any ",
+ "title": "Any",
"description": "A list of task run state types to include"
}
},
@@ -22694,7 +22632,7 @@
"type": "null"
}
],
- "title": "Exists ",
+ "title": "Exists",
"description": "If true, only include task runs that are subflow run parents; if false, exclude parent task runs"
}
},
@@ -22722,7 +22660,7 @@
"type": "null"
}
],
- "title": "All ",
+ "title": "All",
"description": "A list of tags. Task runs will be returned only if their tags are a superset of the list",
"examples": [
[
@@ -22740,7 +22678,7 @@
"type": "null"
}
],
- "title": "Is Null ",
+ "title": "Is Null",
"description": "If true, only include task runs without tags"
}
},
@@ -22816,9 +22754,6 @@
"properties": {
"input_type": {
"type": "string",
- "enum": [
- "task_run"
- ],
"const": "task_run",
"title": "Input Type",
"default": "task_run"
@@ -23215,7 +23150,7 @@
"type": "null"
}
],
- "title": "Any ",
+ "title": "Any",
"description": "A list of variable ids to include"
}
},
@@ -23238,7 +23173,7 @@
"type": "null"
}
],
- "title": "Any ",
+ "title": "Any",
"description": "A list of variables names to include"
},
"like_": {
@@ -23250,7 +23185,7 @@
"type": "null"
}
],
- "title": "Like ",
+ "title": "Like",
"description": "A string to match variable names against. This can include SQL wildcard characters like `%` and `_`.",
"examples": [
"my_variable_%"
@@ -23281,7 +23216,7 @@
"type": "null"
}
],
- "title": "All ",
+ "title": "All",
"description": "A list of tags. Variables will be returned only if their tags are a superset of the list",
"examples": [
[
@@ -23299,7 +23234,7 @@
"type": "null"
}
],
- "title": "Is Null ",
+ "title": "Is Null",
"description": "If true, only include Variables without tags"
}
},
@@ -23626,7 +23561,7 @@
"type": "null"
}
],
- "title": "Any ",
+ "title": "Any",
"description": "A list of work pool ids to include"
}
},
@@ -23649,7 +23584,7 @@
"type": "null"
}
],
- "title": "Any ",
+ "title": "Any",
"description": "A list of work pool names to include"
}
},
@@ -23672,7 +23607,7 @@
"type": "null"
}
],
- "title": "Any ",
+ "title": "Any",
"description": "A list of work pool types to include"
}
},
@@ -23990,7 +23925,7 @@
"type": "null"
}
],
- "title": "Any ",
+ "title": "Any",
"description": "A list of work queue ids to include"
}
},
@@ -24013,7 +23948,7 @@
"type": "null"
}
],
- "title": "Any ",
+ "title": "Any",
"description": "A list of work queue names to include",
"examples": [
[
@@ -24034,7 +23969,7 @@
"type": "null"
}
],
- "title": "Startswith ",
+ "title": "Startswith",
"description": "A list of case-insensitive starts-with matches. For example, passing 'marvin' will match 'marvin', and 'Marvin-robot', but not 'sad-marvin'.",
"examples": [
[
@@ -24404,7 +24339,7 @@
"type": "null"
}
],
- "title": "Before ",
+ "title": "Before",
"description": "Only include processes whose last heartbeat was at or before this time"
},
"after_": {
@@ -24417,7 +24352,7 @@
"type": "null"
}
],
- "title": "After ",
+ "title": "After",
"description": "Only include processes whose last heartbeat was at or after this time"
}
},
@@ -24440,7 +24375,7 @@
"type": "null"
}
],
- "title": "Any ",
+ "title": "Any",
"description": "A list of worker statuses to include"
},
"not_any_": {
@@ -24455,7 +24390,7 @@
"type": "null"
}
],
- "title": "Not Any ",
+ "title": "Not Any",
"description": "A list of worker statuses to exclude"
}
},
diff --git a/docs/3.0/api-ref/rest-api/server/task-run-states/read-task-run-state.mdx b/docs/v3/api-ref/rest-api/server/task-run-states/read-task-run-state.mdx
similarity index 100%
rename from docs/3.0/api-ref/rest-api/server/task-run-states/read-task-run-state.mdx
rename to docs/v3/api-ref/rest-api/server/task-run-states/read-task-run-state.mdx
diff --git a/docs/3.0/api-ref/rest-api/server/task-run-states/read-task-run-states.mdx b/docs/v3/api-ref/rest-api/server/task-run-states/read-task-run-states.mdx
similarity index 100%
rename from docs/3.0/api-ref/rest-api/server/task-run-states/read-task-run-states.mdx
rename to docs/v3/api-ref/rest-api/server/task-run-states/read-task-run-states.mdx
diff --git a/docs/3.0/api-ref/rest-api/server/task-runs/count-task-runs.mdx b/docs/v3/api-ref/rest-api/server/task-runs/count-task-runs.mdx
similarity index 100%
rename from docs/3.0/api-ref/rest-api/server/task-runs/count-task-runs.mdx
rename to docs/v3/api-ref/rest-api/server/task-runs/count-task-runs.mdx
diff --git a/docs/3.0/api-ref/rest-api/server/task-runs/create-task-run.mdx b/docs/v3/api-ref/rest-api/server/task-runs/create-task-run.mdx
similarity index 100%
rename from docs/3.0/api-ref/rest-api/server/task-runs/create-task-run.mdx
rename to docs/v3/api-ref/rest-api/server/task-runs/create-task-run.mdx
diff --git a/docs/3.0/api-ref/rest-api/server/task-runs/delete-task-run.mdx b/docs/v3/api-ref/rest-api/server/task-runs/delete-task-run.mdx
similarity index 100%
rename from docs/3.0/api-ref/rest-api/server/task-runs/delete-task-run.mdx
rename to docs/v3/api-ref/rest-api/server/task-runs/delete-task-run.mdx
diff --git a/docs/3.0/api-ref/rest-api/server/task-runs/read-dashboard-task-run-counts.mdx b/docs/v3/api-ref/rest-api/server/task-runs/read-dashboard-task-run-counts.mdx
similarity index 100%
rename from docs/3.0/api-ref/rest-api/server/task-runs/read-dashboard-task-run-counts.mdx
rename to docs/v3/api-ref/rest-api/server/task-runs/read-dashboard-task-run-counts.mdx
diff --git a/docs/3.0/api-ref/rest-api/server/task-runs/read-task-run-counts-by-state.mdx b/docs/v3/api-ref/rest-api/server/task-runs/read-task-run-counts-by-state.mdx
similarity index 100%
rename from docs/3.0/api-ref/rest-api/server/task-runs/read-task-run-counts-by-state.mdx
rename to docs/v3/api-ref/rest-api/server/task-runs/read-task-run-counts-by-state.mdx
diff --git a/docs/3.0/api-ref/rest-api/server/task-runs/read-task-run.mdx b/docs/v3/api-ref/rest-api/server/task-runs/read-task-run.mdx
similarity index 100%
rename from docs/3.0/api-ref/rest-api/server/task-runs/read-task-run.mdx
rename to docs/v3/api-ref/rest-api/server/task-runs/read-task-run.mdx
diff --git a/docs/3.0/api-ref/rest-api/server/task-runs/read-task-runs.mdx b/docs/v3/api-ref/rest-api/server/task-runs/read-task-runs.mdx
similarity index 100%
rename from docs/3.0/api-ref/rest-api/server/task-runs/read-task-runs.mdx
rename to docs/v3/api-ref/rest-api/server/task-runs/read-task-runs.mdx
diff --git a/docs/3.0/api-ref/rest-api/server/task-runs/set-task-run-state.mdx b/docs/v3/api-ref/rest-api/server/task-runs/set-task-run-state.mdx
similarity index 100%
rename from docs/3.0/api-ref/rest-api/server/task-runs/set-task-run-state.mdx
rename to docs/v3/api-ref/rest-api/server/task-runs/set-task-run-state.mdx
diff --git a/docs/3.0/api-ref/rest-api/server/task-runs/task-run-history.mdx b/docs/v3/api-ref/rest-api/server/task-runs/task-run-history.mdx
similarity index 100%
rename from docs/3.0/api-ref/rest-api/server/task-runs/task-run-history.mdx
rename to docs/v3/api-ref/rest-api/server/task-runs/task-run-history.mdx
diff --git a/docs/3.0/api-ref/rest-api/server/task-runs/update-task-run.mdx b/docs/v3/api-ref/rest-api/server/task-runs/update-task-run.mdx
similarity index 100%
rename from docs/3.0/api-ref/rest-api/server/task-runs/update-task-run.mdx
rename to docs/v3/api-ref/rest-api/server/task-runs/update-task-run.mdx
diff --git a/docs/3.0/api-ref/rest-api/server/task-workers/read-task-workers.mdx b/docs/v3/api-ref/rest-api/server/task-workers/read-task-workers.mdx
similarity index 100%
rename from docs/3.0/api-ref/rest-api/server/task-workers/read-task-workers.mdx
rename to docs/v3/api-ref/rest-api/server/task-workers/read-task-workers.mdx
diff --git a/docs/3.0/api-ref/rest-api/server/ui/validate-obj.mdx b/docs/v3/api-ref/rest-api/server/ui/validate-obj.mdx
similarity index 100%
rename from docs/3.0/api-ref/rest-api/server/ui/validate-obj.mdx
rename to docs/v3/api-ref/rest-api/server/ui/validate-obj.mdx
diff --git a/docs/3.0/api-ref/rest-api/server/variables/count-variables.mdx b/docs/v3/api-ref/rest-api/server/variables/count-variables.mdx
similarity index 100%
rename from docs/3.0/api-ref/rest-api/server/variables/count-variables.mdx
rename to docs/v3/api-ref/rest-api/server/variables/count-variables.mdx
diff --git a/docs/3.0/api-ref/rest-api/server/variables/create-variable.mdx b/docs/v3/api-ref/rest-api/server/variables/create-variable.mdx
similarity index 100%
rename from docs/3.0/api-ref/rest-api/server/variables/create-variable.mdx
rename to docs/v3/api-ref/rest-api/server/variables/create-variable.mdx
diff --git a/docs/3.0/api-ref/rest-api/server/variables/delete-variable-by-name.mdx b/docs/v3/api-ref/rest-api/server/variables/delete-variable-by-name.mdx
similarity index 100%
rename from docs/3.0/api-ref/rest-api/server/variables/delete-variable-by-name.mdx
rename to docs/v3/api-ref/rest-api/server/variables/delete-variable-by-name.mdx
diff --git a/docs/3.0/api-ref/rest-api/server/variables/delete-variable.mdx b/docs/v3/api-ref/rest-api/server/variables/delete-variable.mdx
similarity index 100%
rename from docs/3.0/api-ref/rest-api/server/variables/delete-variable.mdx
rename to docs/v3/api-ref/rest-api/server/variables/delete-variable.mdx
diff --git a/docs/3.0/api-ref/rest-api/server/variables/read-variable-by-name.mdx b/docs/v3/api-ref/rest-api/server/variables/read-variable-by-name.mdx
similarity index 100%
rename from docs/3.0/api-ref/rest-api/server/variables/read-variable-by-name.mdx
rename to docs/v3/api-ref/rest-api/server/variables/read-variable-by-name.mdx
diff --git a/docs/3.0/api-ref/rest-api/server/variables/read-variable.mdx b/docs/v3/api-ref/rest-api/server/variables/read-variable.mdx
similarity index 100%
rename from docs/3.0/api-ref/rest-api/server/variables/read-variable.mdx
rename to docs/v3/api-ref/rest-api/server/variables/read-variable.mdx
diff --git a/docs/3.0/api-ref/rest-api/server/variables/read-variables.mdx b/docs/v3/api-ref/rest-api/server/variables/read-variables.mdx
similarity index 100%
rename from docs/3.0/api-ref/rest-api/server/variables/read-variables.mdx
rename to docs/v3/api-ref/rest-api/server/variables/read-variables.mdx
diff --git a/docs/3.0/api-ref/rest-api/server/variables/update-variable-by-name.mdx b/docs/v3/api-ref/rest-api/server/variables/update-variable-by-name.mdx
similarity index 100%
rename from docs/3.0/api-ref/rest-api/server/variables/update-variable-by-name.mdx
rename to docs/v3/api-ref/rest-api/server/variables/update-variable-by-name.mdx
diff --git a/docs/3.0/api-ref/rest-api/server/variables/update-variable.mdx b/docs/v3/api-ref/rest-api/server/variables/update-variable.mdx
similarity index 100%
rename from docs/3.0/api-ref/rest-api/server/variables/update-variable.mdx
rename to docs/v3/api-ref/rest-api/server/variables/update-variable.mdx
diff --git a/docs/3.0/api-ref/rest-api/server/work-pools/count-work-pools.mdx b/docs/v3/api-ref/rest-api/server/work-pools/count-work-pools.mdx
similarity index 100%
rename from docs/3.0/api-ref/rest-api/server/work-pools/count-work-pools.mdx
rename to docs/v3/api-ref/rest-api/server/work-pools/count-work-pools.mdx
diff --git a/docs/3.0/api-ref/rest-api/server/work-pools/create-work-pool.mdx b/docs/v3/api-ref/rest-api/server/work-pools/create-work-pool.mdx
similarity index 100%
rename from docs/3.0/api-ref/rest-api/server/work-pools/create-work-pool.mdx
rename to docs/v3/api-ref/rest-api/server/work-pools/create-work-pool.mdx
diff --git a/docs/3.0/api-ref/rest-api/server/work-pools/create-work-queue.mdx b/docs/v3/api-ref/rest-api/server/work-pools/create-work-queue.mdx
similarity index 100%
rename from docs/3.0/api-ref/rest-api/server/work-pools/create-work-queue.mdx
rename to docs/v3/api-ref/rest-api/server/work-pools/create-work-queue.mdx
diff --git a/docs/3.0/api-ref/rest-api/server/work-pools/delete-work-pool.mdx b/docs/v3/api-ref/rest-api/server/work-pools/delete-work-pool.mdx
similarity index 100%
rename from docs/3.0/api-ref/rest-api/server/work-pools/delete-work-pool.mdx
rename to docs/v3/api-ref/rest-api/server/work-pools/delete-work-pool.mdx
diff --git a/docs/3.0/api-ref/rest-api/server/work-pools/delete-work-queue.mdx b/docs/v3/api-ref/rest-api/server/work-pools/delete-work-queue.mdx
similarity index 100%
rename from docs/3.0/api-ref/rest-api/server/work-pools/delete-work-queue.mdx
rename to docs/v3/api-ref/rest-api/server/work-pools/delete-work-queue.mdx
diff --git a/docs/3.0/api-ref/rest-api/server/work-pools/delete-worker.mdx b/docs/v3/api-ref/rest-api/server/work-pools/delete-worker.mdx
similarity index 100%
rename from docs/3.0/api-ref/rest-api/server/work-pools/delete-worker.mdx
rename to docs/v3/api-ref/rest-api/server/work-pools/delete-worker.mdx
diff --git a/docs/3.0/api-ref/rest-api/server/work-pools/get-scheduled-flow-runs.mdx b/docs/v3/api-ref/rest-api/server/work-pools/get-scheduled-flow-runs.mdx
similarity index 100%
rename from docs/3.0/api-ref/rest-api/server/work-pools/get-scheduled-flow-runs.mdx
rename to docs/v3/api-ref/rest-api/server/work-pools/get-scheduled-flow-runs.mdx
diff --git a/docs/3.0/api-ref/rest-api/server/work-pools/read-work-pool.mdx b/docs/v3/api-ref/rest-api/server/work-pools/read-work-pool.mdx
similarity index 100%
rename from docs/3.0/api-ref/rest-api/server/work-pools/read-work-pool.mdx
rename to docs/v3/api-ref/rest-api/server/work-pools/read-work-pool.mdx
diff --git a/docs/3.0/api-ref/rest-api/server/work-pools/read-work-pools.mdx b/docs/v3/api-ref/rest-api/server/work-pools/read-work-pools.mdx
similarity index 100%
rename from docs/3.0/api-ref/rest-api/server/work-pools/read-work-pools.mdx
rename to docs/v3/api-ref/rest-api/server/work-pools/read-work-pools.mdx
diff --git a/docs/3.0/api-ref/rest-api/server/work-pools/read-work-queue.mdx b/docs/v3/api-ref/rest-api/server/work-pools/read-work-queue.mdx
similarity index 100%
rename from docs/3.0/api-ref/rest-api/server/work-pools/read-work-queue.mdx
rename to docs/v3/api-ref/rest-api/server/work-pools/read-work-queue.mdx
diff --git a/docs/3.0/api-ref/rest-api/server/work-pools/read-work-queues.mdx b/docs/v3/api-ref/rest-api/server/work-pools/read-work-queues.mdx
similarity index 100%
rename from docs/3.0/api-ref/rest-api/server/work-pools/read-work-queues.mdx
rename to docs/v3/api-ref/rest-api/server/work-pools/read-work-queues.mdx
diff --git a/docs/3.0/api-ref/rest-api/server/work-pools/read-workers.mdx b/docs/v3/api-ref/rest-api/server/work-pools/read-workers.mdx
similarity index 100%
rename from docs/3.0/api-ref/rest-api/server/work-pools/read-workers.mdx
rename to docs/v3/api-ref/rest-api/server/work-pools/read-workers.mdx
diff --git a/docs/3.0/api-ref/rest-api/server/work-pools/update-work-pool.mdx b/docs/v3/api-ref/rest-api/server/work-pools/update-work-pool.mdx
similarity index 100%
rename from docs/3.0/api-ref/rest-api/server/work-pools/update-work-pool.mdx
rename to docs/v3/api-ref/rest-api/server/work-pools/update-work-pool.mdx
diff --git a/docs/3.0/api-ref/rest-api/server/work-pools/update-work-queue.mdx b/docs/v3/api-ref/rest-api/server/work-pools/update-work-queue.mdx
similarity index 100%
rename from docs/3.0/api-ref/rest-api/server/work-pools/update-work-queue.mdx
rename to docs/v3/api-ref/rest-api/server/work-pools/update-work-queue.mdx
diff --git a/docs/3.0/api-ref/rest-api/server/work-pools/worker-heartbeat.mdx b/docs/v3/api-ref/rest-api/server/work-pools/worker-heartbeat.mdx
similarity index 100%
rename from docs/3.0/api-ref/rest-api/server/work-pools/worker-heartbeat.mdx
rename to docs/v3/api-ref/rest-api/server/work-pools/worker-heartbeat.mdx
diff --git a/docs/3.0/api-ref/rest-api/server/work-queues/create-work-queue.mdx b/docs/v3/api-ref/rest-api/server/work-queues/create-work-queue.mdx
similarity index 100%
rename from docs/3.0/api-ref/rest-api/server/work-queues/create-work-queue.mdx
rename to docs/v3/api-ref/rest-api/server/work-queues/create-work-queue.mdx
diff --git a/docs/3.0/api-ref/rest-api/server/work-queues/delete-work-queue.mdx b/docs/v3/api-ref/rest-api/server/work-queues/delete-work-queue.mdx
similarity index 100%
rename from docs/3.0/api-ref/rest-api/server/work-queues/delete-work-queue.mdx
rename to docs/v3/api-ref/rest-api/server/work-queues/delete-work-queue.mdx
diff --git a/docs/3.0/api-ref/rest-api/server/work-queues/read-work-queue-by-name.mdx b/docs/v3/api-ref/rest-api/server/work-queues/read-work-queue-by-name.mdx
similarity index 100%
rename from docs/3.0/api-ref/rest-api/server/work-queues/read-work-queue-by-name.mdx
rename to docs/v3/api-ref/rest-api/server/work-queues/read-work-queue-by-name.mdx
diff --git a/docs/3.0/api-ref/rest-api/server/work-queues/read-work-queue-runs.mdx b/docs/v3/api-ref/rest-api/server/work-queues/read-work-queue-runs.mdx
similarity index 100%
rename from docs/3.0/api-ref/rest-api/server/work-queues/read-work-queue-runs.mdx
rename to docs/v3/api-ref/rest-api/server/work-queues/read-work-queue-runs.mdx
diff --git a/docs/3.0/api-ref/rest-api/server/work-queues/read-work-queue-status.mdx b/docs/v3/api-ref/rest-api/server/work-queues/read-work-queue-status.mdx
similarity index 100%
rename from docs/3.0/api-ref/rest-api/server/work-queues/read-work-queue-status.mdx
rename to docs/v3/api-ref/rest-api/server/work-queues/read-work-queue-status.mdx
diff --git a/docs/3.0/api-ref/rest-api/server/work-queues/read-work-queue.mdx b/docs/v3/api-ref/rest-api/server/work-queues/read-work-queue.mdx
similarity index 100%
rename from docs/3.0/api-ref/rest-api/server/work-queues/read-work-queue.mdx
rename to docs/v3/api-ref/rest-api/server/work-queues/read-work-queue.mdx
diff --git a/docs/3.0/api-ref/rest-api/server/work-queues/read-work-queues.mdx b/docs/v3/api-ref/rest-api/server/work-queues/read-work-queues.mdx
similarity index 100%
rename from docs/3.0/api-ref/rest-api/server/work-queues/read-work-queues.mdx
rename to docs/v3/api-ref/rest-api/server/work-queues/read-work-queues.mdx
diff --git a/docs/3.0/api-ref/rest-api/server/work-queues/update-work-queue.mdx b/docs/v3/api-ref/rest-api/server/work-queues/update-work-queue.mdx
similarity index 100%
rename from docs/3.0/api-ref/rest-api/server/work-queues/update-work-queue.mdx
rename to docs/v3/api-ref/rest-api/server/work-queues/update-work-queue.mdx
diff --git a/docs/3.0/automate/add-schedules.mdx b/docs/v3/automate/add-schedules.mdx
similarity index 97%
rename from docs/3.0/automate/add-schedules.mdx
rename to docs/v3/automate/add-schedules.mdx
index 6494de4a507b..831db0978831 100644
--- a/docs/3.0/automate/add-schedules.mdx
+++ b/docs/v3/automate/add-schedules.mdx
@@ -4,7 +4,7 @@ description: Prefect can schedule when to automatically create new flow runs.
---
Prefect allows you to specify schedules on which your flows run.
-You can add one or more schedules to any [served or deployed](/3.0/deploy/run-flows-in-local-processes) flow.
+You can add one or more schedules to any [served or deployed](/v3/deploy/run-flows-in-local-processes) flow.
Schedules tell Prefect when and how to create new flow runs.
You can add a schedule to a deployed flow in the Prefect UI, in the CLI through the `prefect deployment schedule` command, or the `prefect.yaml` configuration file.
@@ -14,9 +14,9 @@ There are several ways to create a schedule for a deployment:
- Through the Prefect UI
- With the `cron`, `interval`, or `rrule` parameters if building your deployment with the
-[`serve` method](/3.0/develop/write-flows/#serving-a-flow) of the `Flow` object or
-[the `serve` utility](/3.0/develop/write-flows/#serving-multiple-flows-at-once) for managing multiple flows simultaneously
-- If using [worker-based deployments](/3.0/deploy/infrastructure-concepts/workers/)
+[`serve` method](/v3/develop/write-flows/#serving-a-flow) of the `Flow` object or
+[the `serve` utility](/v3/develop/write-flows/#serving-multiple-flows-at-once) for managing multiple flows simultaneously
+- If using [worker-based deployments](/v3/deploy/infrastructure-concepts/workers/)
- When you define a deployment with `flow.serve` or `flow.deploy`
- Through the interactive `prefect deploy` command
- With the `deployments` -> `schedules` section of the `prefect.yaml` file
@@ -294,7 +294,7 @@ PREFECT_API_SERVICES_SCHEDULER_MIN_SCHEDULED_TIME='1:00:00'
PREFECT_API_SERVICES_SCHEDULER_MAX_SCHEDULED_TIME='100 days, 0:00:00'
```
-See the [Settings docs](/3.0/manage/settings-and-profiles/) for more information on altering your settings.
+See the [Settings docs](/v3/develop/settings-and-profiles/) for more information on altering your settings.
These settings mean that if a deployment has an hourly schedule, the default settings will create runs for the next four days (or 100 hours).
If it has a weekly schedule, the default settings will maintain the next 14 runs (up to 100 days in the future).
diff --git a/docs/3.0/automate/events/automations-triggers.mdx b/docs/v3/automate/events/automations-triggers.mdx
similarity index 95%
rename from docs/3.0/automate/events/automations-triggers.mdx
rename to docs/v3/automate/events/automations-triggers.mdx
index 215a7b0844b2..5177a7adf7f5 100644
--- a/docs/3.0/automate/events/automations-triggers.mdx
+++ b/docs/v3/automate/events/automations-triggers.mdx
@@ -6,8 +6,8 @@ description: Automations provide a flexible and powerful framework for automatic
Automations enable you to configure [actions](#actions) that execute automatically based on [trigger](#triggers) conditions.
Potential triggers include the occurrence of events from changes in a flow run's state—or the absence of such events.
-You can define your own custom trigger to fire based on a custom [event](/3.0/automate/events/custom-triggers/) defined in Python code.
-With Prefect Cloud you can even create [webhooks](/3.0/automate/events/webhook-triggers/) that can receive data for use in actions.
+You can define your own custom trigger to fire based on a custom [event](/v3/automate/events/custom-triggers/) defined in Python code.
+With Prefect Cloud you can even create [webhooks](/v3/automate/events/webhook-triggers/) that can receive data for use in actions.
Actions include starting flow runs, pausing schedules, and sending custom notifications.
@@ -23,7 +23,7 @@ On the **Automations** page, select the **+** icon to create a new automation. Y
The **Automations** page provides an overview of all configured automations for your workspace.
-![Viewing automations for a workspace in Prefect Cloud.](/3.0/img/ui/automations.png)
+![Viewing automations for a workspace in Prefect Cloud.](/v3/img/ui/automations.png)
Select the toggle next to an automation to pause execution of the automation.
@@ -54,7 +54,7 @@ trigger and action policies based on arbitrary [events](https://app.prefect.clou
Importantly, you can configure the triggers not only in reaction to events, but also proactively: in the absence of
an expected event.
-![Configuring a trigger for an automation in Prefect Cloud.](/3.0/img/ui/automations-trigger.png)
+![Configuring a trigger for an automation in Prefect Cloud.](/v3/img/ui/automations-trigger.png)
For example, in the case of flow run state change triggers, you might expect production flows to finish in no longer
than thirty minutes. But transient infrastructure or network issues could cause your flow to get “stuck” in a running state.
@@ -80,7 +80,7 @@ Actions specify what your automation does when its trigger criteria are met. Cur
- Declare an incident (available on Pro and Enterprise plans)
- Change the state of a flow run
-![Configuring an action for an automation in Prefect Cloud.](/3.0/img/ui/automations-action.png)
+![Configuring an action for an automation in Prefect Cloud.](/v3/img/ui/automations-action.png)
### Create automations In Python code
@@ -143,7 +143,7 @@ creating automations that are linked to specific deployments to run them based o
Trigger definitions for deployments are supported in `prefect.yaml`, `.serve`, and `.deploy`. At deployment time,
specified trigger definitions create linked automations triggered by events matching your chosen
-[grammar](/3.0/automate/events/events/#event-grammar). Each trigger definition may include a [jinja template](https://en.wikipedia.org/wiki/Jinja_(template_engine))
+[grammar](/v3/automate/events/events/#event-grammar). Each trigger definition may include a [jinja template](https://en.wikipedia.org/wiki/Jinja_(template_engine))
to render the triggering `event` as the `parameters` of your deployment's flow run.
### Define triggers in `prefect.yaml`
@@ -331,7 +331,7 @@ to send a message, including:
- Microsoft Teams message to a channel
- Email to an email address
-![Configuring notifications for an automation in Prefect Cloud.](/3.0/img/ui/automations-notifications.png)
+![Configuring notifications for an automation in Prefect Cloud.](/v3/img/ui/automations-notifications.png)
## Templating with Jinja
@@ -365,7 +365,7 @@ Flow run {{ flow_run.name }} entered state {{ flow_run.state.name }}.
The resulting Slack webhook notification looks something like this:
-![Configuring notifications for an automation in Prefect Cloud.](/3.0/img/ui/templated-notification.png)
+![Configuring notifications for an automation in Prefect Cloud.](/v3/img/ui/templated-notification.png)
You could include `flow` and `deployment` properties:
@@ -471,24 +471,24 @@ workflow logic.
1. Prior to creating the automation, confirm the notification location. Create a notification block to help define where
the notification is sent.
-![List of available blocks](/3.0/img/guides/block-list.png)
+![List of available blocks](/v3/img/guides/block-list.png)
2. Navigate to the blocks page on the UI, and click into creating an email notification block.
-![Creating a notification block in the Cloud UI](/3.0/img/guides/notification-block.png)
+![Creating a notification block in the Cloud UI](/v3/img/guides/notification-block.png)
3. Go to the automations page to create your first automation.
-![Automations page](/3.0/img/guides/automation-list.png)
+![Automations page](/v3/img/guides/automation-list.png)
4. Next, find the trigger type. In this case, use a flow completion.
-![Trigger type](/3.0/img/guides/automation-triggers.png)
+![Trigger type](/v3/img/guides/automation-triggers.png)
5. Create the actions for when the trigger is hit. In this case, create a notification to showcase the
completion.
-![Notification block in automation](/3.0/img/guides/notify-auto-block.png)
+![Notification block in automation](/v3/img/guides/notify-auto-block.png)
6. Now the automation is ready to be triggered from a flow run completion. Run the file locally and see that the
notification is sent to your inbox after the completion. It may take a few minutes for the notification to arrive.
-![Final notification](/3.0/img/guides/final-automation.png)
+![Final notification](/v3/img/guides/final-automation.png)
**No deployment created**
@@ -728,7 +728,7 @@ Create this webhook in the UI to create these dynamic events.
```
From this input, you can create an exposed webhook endpoint.
-![webhook-simple](/3.0/img/guides/webhook-simple.png)
+![webhook-simple](/v3/img/guides/webhook-simple.png)
Each webhook corresponds to a custom event created where you can react to it downstream with a separate deployment or
automation.
@@ -739,14 +739,14 @@ curl -X POST https://api.prefect.cloud/hooks/34iV2SFke3mVa6y5Y-YUoA -d "model_id
```
From here, you can make a webhook that is connected to pulling in parameters on the curl command. It kicks off a
deployment that uses these pulled parameters:
-![Webhook created](/3.0/img/guides/webhook-created.png)
+![Webhook created](/v3/img/guides/webhook-created.png)
Go into the event feed to automate straight from this event:
-![Webhook automate](/3.0/img/guides/webhook-automate.png)
+![Webhook automate](/v3/img/guides/webhook-automate.png)
This allows you to create automations that respond to these webhook events. From a few clicks in the UI, you can
associate an external process with the Prefect events API that can trigger downstream deployments.
-![Automation custom](/3.0/img/guides/automation-custom.png)
+![Automation custom](/v3/img/guides/automation-custom.png)
## Examples
@@ -909,6 +909,6 @@ In the above example:
## See also
-- To learn more about Prefect events, which can trigger automations, see the [events docs](/3.0/automate/events/events/).
-- See the [webhooks guide](/3.0/automate/events/webhook-triggers/)
+- To learn more about Prefect events, which can trigger automations, see the [events docs](/v3/automate/events/events/).
+- See the [webhooks guide](/v3/automate/events/webhook-triggers/)
to learn how to create webhooks and receive external events.
\ No newline at end of file
diff --git a/docs/3.0/automate/events/custom-triggers.mdx b/docs/v3/automate/events/custom-triggers.mdx
similarity index 99%
rename from docs/3.0/automate/events/custom-triggers.mdx
rename to docs/v3/automate/events/custom-triggers.mdx
index 1f2651b80d34..e09ef611ac41 100644
--- a/docs/3.0/automate/events/custom-triggers.mdx
+++ b/docs/v3/automate/events/custom-triggers.mdx
@@ -14,7 +14,7 @@ that combines several underlying triggers.
Event triggers are the most common type of trigger. They are intended to react to the presence or absence of an event.
Event triggers are indicated with `{"type": "event"}`.
-![Viewing a custom trigger for automations for a workspace in Prefect Cloud.](/3.0/img/ui/automations-custom.png)
+![Viewing a custom trigger for automations for a workspace in Prefect Cloud.](/v3/img/ui/automations-custom.png)
This is the schema that defines an event trigger:
diff --git a/docs/3.0/automate/events/events.mdx b/docs/v3/automate/events/events.mdx
similarity index 84%
rename from docs/3.0/automate/events/events.mdx
rename to docs/v3/automate/events/events.mdx
index 812032755d6c..4042ff91f5c8 100644
--- a/docs/3.0/automate/events/events.mdx
+++ b/docs/v3/automate/events/events.mdx
@@ -6,16 +6,16 @@ description: An event is a notification of a change, creating a history of activ
Events can represent API calls, state transitions, or changes in your execution environment or infrastructure.
Events power several features, including flow run logs and automations.
-In Prefect Cloud, events power [audit logs](/3.0/manage/cloud/manage-users/audit-logs/).
+In Prefect Cloud, events power [audit logs](/v3/manage/cloud/manage-users/audit-logs/).
Events enable observability into your data stack through the [event feed in the UI](#events-in-the-cloud-ui) and the
-configuration of Prefect's reactivity through [automations](/3.0/automate/events/automations-triggers/).
+configuration of Prefect's reactivity through [automations](/v3/automate/events/automations-triggers/).
## Event specification
Events adhere to a structured [specification](https://app.prefect.cloud/api/docs#tag/Events).
-![Prefect UI](/3.0/img/ui/event-spec.png)
+![Prefect UI](/v3/img/ui/event-spec.png)
| Name | Type | Required? | Description |
| -------- | ------ | --------- | -------------------------------------------------------------------- |
@@ -69,10 +69,10 @@ To get data into an event for use in an automation action, specify a dictionary
### Emit events through webhooks
-Prefect Cloud offers [programmable webhooks](/3.0/automate/events/webhook-triggers/) to receive HTTP requests
+Prefect Cloud offers [programmable webhooks](/v3/automate/events/webhook-triggers/) to receive HTTP requests
from other systems and translate them into events within your workspace.
-Webhooks can emit [pre-defined static events](/3.0/automate/events/webhook-triggers/#static-webhook-events),
-dynamic events that [use portions of the incoming HTTP request](/3.0/automate/events/webhook-triggers/#dynamic-webhook-events), or events derived from [CloudEvents](/3.0/automate/events/webhook-triggers/#accepting-cloudevents).
+Webhooks can emit [pre-defined static events](/v3/automate/events/webhook-triggers/#static-webhook-events),
+dynamic events that [use portions of the incoming HTTP request](/v3/automate/events/webhook-triggers/#dynamic-webhook-events), or events derived from [CloudEvents](/v3/automate/events/webhook-triggers/#accepting-cloudevents).
## Resources
@@ -119,7 +119,7 @@ Here is an example of a related resource:
The Prefect UI provides an interactive dashboard to analyze and take action on events that occurred in your workspace on the event feed page.
-![Event feed](/3.0/img/ui/event-feed.png)
+![Event feed](/v3/img/ui/event-feed.png)
The event feed is the primary place to view, search, and filter events to understand activity across your stack.
Each entry displays data on the resource, related resource, and event that took place.
@@ -129,10 +129,10 @@ resource, related resources, and its payload.
## Respond to events
-From an event page, you can configure an [automation](/3.0/automate/events/automations-triggers/) to trigger on
+From an event page, you can configure an [automation](/v3/automate/events/automations-triggers/) to trigger on
the observation of matching events—or a lack of matching events—by clicking the automate button in the overflow menu:
-![Automation from event](/3.0/img/ui/automation-from-event.png)
+![Automation from event](/v3/img/ui/automation-from-event.png)
The default trigger configuration fires every time it sees an event with a matching resource identifier.
-Advanced configuration is possible through [custom triggers](/3.0/automate/events/custom-triggers/).
+Advanced configuration is possible through [custom triggers](/v3/automate/events/custom-triggers/).
diff --git a/docs/3.0/automate/events/webhook-triggers.mdx b/docs/v3/automate/events/webhook-triggers.mdx
similarity index 96%
rename from docs/3.0/automate/events/webhook-triggers.mdx
rename to docs/v3/automate/events/webhook-triggers.mdx
index d6becf7f8784..c1f8392505d7 100644
--- a/docs/3.0/automate/events/webhook-triggers.mdx
+++ b/docs/v3/automate/events/webhook-triggers.mdx
@@ -6,7 +6,7 @@ description: Observe and respond to events from other systems.
Prefect Cloud webhooks can receive, observe, and respond to events
from other systems.
Each webhook exposes a unique URL endpoint to receive events from other systems and transform them into Prefect
-[events](/3.0/automate/events/) for use in [automations](/3.0/automate/events/automations-triggers/).
+[events](/v3/automate/events/) for use in [automations](/v3/automate/events/automations-triggers/).
Webhooks are defined by two essential components: a unique URL and a template that translates incoming web requests to a
Prefect event.
@@ -20,14 +20,14 @@ Set up your webhooks through the Prefect Cloud API, Prefect Cloud UI, or Prefect
Webhooks are managed through the [Webhooks API
endpoints](https://app.prefect.cloud/api/docs#tag/Webhooks). This is a Prefect
Cloud-only feature. Authenticate API calls using the standard
-[authentication methods](/3.0/manage/cloud/connect-to-cloud#manually-configure-prefect-api-settings) you use with Prefect
+[authentication methods](/v3/manage/cloud/connect-to-cloud#manually-configure-prefect-api-settings) you use with Prefect
Cloud.
### Through Prefect Cloud
You can create and manage webhooks from the Prefect Cloud UI.
-![Managing a webhook in the Prefect Cloud UI.](/3.0/img/ui/webhook.png)
+![Managing a webhook in the Prefect Cloud UI.](/v3/img/ui/webhook.png)
### Through the Prefect CLI
@@ -139,7 +139,7 @@ Prefect Cloud workspace. The template you define for each webhook determines how
individual components of the incoming HTTP request become the event name and resource
labels of the resulting Prefect event.
-As with the [templates available in Prefect Cloud Automation](/3.0/automate/events/automations-triggers) for
+As with the [templates available in Prefect Cloud Automation](/v3/automate/events/automations-triggers) for
defining notifications and other parameters, you write templates in
[Jinja2](https://jinja.palletsprojects.com/en/3.1.x/templates/). All of the built-in
Jinja2 blocks and filters are available, as well as the filters from the
@@ -349,7 +349,7 @@ way tailored to your use case, use a dynamic template to interpret the incoming
The initial configuration of your webhook may require some trial and error as you get
the sender and your receiving webhook speaking a compatible language. While you are in
-this phase, lean upon the [event feed](/3.0/automate/events/#event-feed) in the UI to see events as they happen.
+this phase, lean upon the [event feed](/v3/automate/events/#event-feed) in the UI to see events as they happen.
When Prefect Cloud encounters an error during receipt of a webhook, it produces a
`prefect-cloud.webhook.failed` event in your workspace. This event includes
diff --git a/docs/3.0/automate/incidents.mdx b/docs/v3/automate/incidents.mdx
similarity index 91%
rename from docs/3.0/automate/incidents.mdx
rename to docs/v3/automate/incidents.mdx
index e7a2a08f7f90..19f8ef174480 100644
--- a/docs/3.0/automate/incidents.mdx
+++ b/docs/v3/automate/incidents.mdx
@@ -32,7 +32,7 @@ There are several ways to create an incident:
* **From a flow run, work pool, or block**, from the failed flow run, click the menu button and select "Declare an incident". This method automatically links the resource.
-* **Through an [automation](/3.0/automate/events/automations-triggers/)**, set up incident creation as an automated response to selected triggers.
+* **Through an [automation](/v3/automate/events/automations-triggers/)**, set up incident creation as an automated response to selected triggers.
### Automate an incident
@@ -63,7 +63,7 @@ To get started with incident automations, specify two fields in your trigger:
- `prefect-cloud.incident.resolved`
- `prefect-cloud.incident.updated.severity`
-See [event triggers](/3.0/automate/events/automations-triggers/#custom-triggers) for more information on custom triggers, and check out your event feed to see the event types emitted by your incidents and other resources (that is, the events that you can react to).
+See [event triggers](/v3/automate/events/automations-triggers/#custom-triggers) for more information on custom triggers, and check out your event feed to see the event types emitted by your incidents and other resources (that is, the events that you can react to).
When an incident is declared, any actions you configure such as pausing work pools or sending notifications, execute immediately.
diff --git a/docs/v3/automate/index.mdx b/docs/v3/automate/index.mdx
new file mode 100644
index 000000000000..628f69f0efbd
--- /dev/null
+++ b/docs/v3/automate/index.mdx
@@ -0,0 +1,14 @@
+---
+title: Automate overview
+sidebarTitle: Overview
+description: Learn how to automate workflows with Prefect.
+---
+
+This **Automate** section explains how create workflows that run automatically and respond to events.
+
+- [Schedule flow runs](/v3/automate/add-schedules/) explains how to schedule flow runs for specific times or intervals.
+- [Track activity through events](/v3/automate/events/events/) discusses how to observe events that create a record of activity.
+- [Trigger actions on events](/v3/automate/events/automations-triggers/) shows how to use events to trigger actions.
+- [Define custom event triggers](/v3/automate/events/custom-triggers/) discusses advanced trigger options.
+- [Receive events with webhooks](/v3/automate/events/webhook-triggers/) explains how to use webhooks to receive events from external systems with Prefect Cloud.
+- [Manage incidents](/v3/automate/incidents/) show how Prefect Cloud can help identify, resolve, and document issues in mission-critical workflows.
diff --git a/docs/3.0/deploy/index.mdx b/docs/v3/deploy/index.mdx
similarity index 81%
rename from docs/3.0/deploy/index.mdx
rename to docs/v3/deploy/index.mdx
index 45bb1b291ac7..c4e8caab09a2 100644
--- a/docs/3.0/deploy/index.mdx
+++ b/docs/v3/deploy/index.mdx
@@ -4,16 +4,16 @@ sidebarTitle: Overview
description: Learn how to use deployments to trigger flow runs remotely.
---
-Deployments allow you to run flows on a [schedule](/3.0/automate/add-schedules/) and trigger runs based on [events](/3.0/automate/events/automations-triggers/).
+Deployments allow you to run flows on a [schedule](/v3/automate/add-schedules/) and trigger runs based on [events](/v3/automate/events/automations-triggers/).
-[Deployments](/3.0/deploy/infrastructure-examples/docker/) are server-side representations of flows.
+Deployments are server-side representations of flows.
They store the crucial metadata for remote orchestration including when, where, and how a workflow should run.
In addition to manually triggering and managing flow runs, deploying a flow exposes an API and UI that allow you to:
-- trigger new runs, [cancel active runs](/3.0/develop/write-flows/#cancel-a-flow-run), pause scheduled runs, customize parameters, and more
-- remotely configure [schedules](/3.0/automate/add-schedules) and [automation rules](/3.0/automate/events/automations-triggers)
-- dynamically provision infrastructure with [work pools](/3.0/deploy/infrastructure-concepts/work-pools) - optionally with templated guardrails for other users
+- trigger new runs, [cancel active runs](/v3/develop/write-flows/#cancel-a-flow-run), pause scheduled runs, customize parameters, and more
+- remotely configure [schedules](/v3/automate/add-schedules) and [automation rules](/v3/automate/events/automations-triggers)
+- dynamically provision infrastructure with [work pools](/v3/deploy/infrastructure-concepts/work-pools) - optionally with templated guardrails for other users
## Create a deployment
@@ -74,11 +74,11 @@ if __name__ == "__main__":
)
```
-To learn more about the `deploy` method, see [Deploy flows with Python](/3.0/deploy/infrastructure-concepts/deploy-via-python).
+To learn more about the `deploy` method, see [Deploy flows with Python](/v3/deploy/infrastructure-concepts/deploy-via-python).
### Create a deployment with a YAML file
-If you'd rather take a declarative approach to defining a deployment through a YAML file, use a [`prefect.yaml` file](/3.0/deploy/infrastructure-concepts/prefect-yaml).
+If you'd rather take a declarative approach to defining a deployment through a YAML file, use a [`prefect.yaml` file](/v3/deploy/infrastructure-concepts/prefect-yaml).
Prefect provides an interactive CLI that walks you through creating a `prefect.yaml` file:
@@ -88,16 +88,16 @@ prefect deploy
The result is a `prefect.yaml` file for deployment creation.
The file contains `build`, `push`, and `pull` steps for building a Docker image, pushing code to a Docker registry, and pulling code at runtime.
-Learn more about creating deployments with a YAML file in [Define deployments with YAML](/3.0/deploy/infrastructure-concepts/prefect-yaml).
+Learn more about creating deployments with a YAML file in [Define deployments with YAML](/v3/deploy/infrastructure-concepts/prefect-yaml).
-Prefect also provides [CI/CD options](/3.0/deploy/infrastructure-concepts/deploy-ci-cd) for automatically creating YAML-based deployments.
+Prefect also provides [CI/CD options](/v3/deploy/infrastructure-concepts/deploy-ci-cd) for automatically creating YAML-based deployments.
### Work pools
-[Work pools](/3.0/deploy/infrastructure-concepts/work-pools/) allow you to switch between different types of infrastructure and to create a template for deployments.
+[Work pools](/v3/deploy/infrastructure-concepts/work-pools/) allow you to switch between different types of infrastructure and to create a template for deployments.
Data platform teams find work pools especially useful for managing infrastructure configuration across teams of data professionals.
-Common work pool types include [Docker](/3.0/deploy/infrastructure-examples/docker), [Kubernetes](/3.0/deploy/infrastructure-examples/kubernetes), and serverless options such as [AWS ECS](/integrations/prefect-aws/ecs_guide#ecs-worker-guide), [Azure ACI](/integrations/prefect-azure/aci_worker), [GCP Vertex AI](/integrations/prefect-gcp/index#run-flows-on-google-cloud-run-or-vertex-ai), or [GCP Google Cloud Run](/integrations/prefect-gcp/gcp-worker-guide).
+Common work pool types include [Docker](/v3/deploy/infrastructure-examples/docker), [Kubernetes](/v3/deploy/infrastructure-examples/kubernetes), and serverless options such as [AWS ECS](/integrations/prefect-aws/ecs_guide#ecs-worker-guide), [Azure ACI](/integrations/prefect-azure/aci_worker), [GCP Vertex AI](/integrations/prefect-gcp/index#run-flows-on-google-cloud-run-or-vertex-ai), or [GCP Google Cloud Run](/integrations/prefect-gcp/gcp-worker-guide).
### Work pool-based deployment requirements
@@ -115,26 +115,26 @@ Your flow code location can be specified in a few ways:
2. the cloud provider storage location (for example, AWS S3)
3. the local path (an option for Process work pools)
-See the [Retrieve code from storage docs](/3.0/deploy/infrastructure-concepts/store-flow-code) for more information about flow code storage.
+See the [Retrieve code from storage docs](/v3/deploy/infrastructure-concepts/store-flow-code) for more information about flow code storage.
## Run a deployment
-You can set a deployment to run manually, on a [schedule](/3.0/automate/add-schedules#schedule-flow-runs), or [in response to an event](/3.0/automate/events/automations-triggers).
+You can set a deployment to run manually, on a [schedule](/v3/automate/add-schedules#schedule-flow-runs), or [in response to an event](/v3/automate/events/automations-triggers).
The deployment inherits the infrastructure configuration from the work pool, and can be overridden at deployment creation time or at runtime.
### Work pools that require a worker
-To run a deployment with a hybrid work pool type, such as Docker or Kubernetes, you must start a [worker](/3.0/deploy/infrastructure-concepts/workers/).
+To run a deployment with a hybrid work pool type, such as Docker or Kubernetes, you must start a [worker](/v3/deploy/infrastructure-concepts/workers/).
-A [Prefect worker](/3.0/deploy/infrastructure-concepts/workers) is a client-side process that checks for scheduled flow runs in the work pool that it matches.
+A [Prefect worker](/v3/deploy/infrastructure-concepts/workers) is a client-side process that checks for scheduled flow runs in the work pool that it matches.
When a scheduled run is found, the worker kicks off a flow run on the specified infrastructure and monitors the flow run until completion.
### Work pools that don't require a worker
-Prefect Cloud offers [push work pools](/3.0/deploy/infrastructure-examples/serverless#automatically-create-a-new-push-work-pool-and-provision-infrastructure) that run flows on Cloud provider serverless infrastructure without a worker and that can be set up quickly.
+Prefect Cloud offers [push work pools](/v3/deploy/infrastructure-examples/serverless#automatically-create-a-new-push-work-pool-and-provision-infrastructure) that run flows on Cloud provider serverless infrastructure without a worker and that can be set up quickly.
-Prefect Cloud also provides the option to run work flows on Prefect's infrastructure through a [Prefect Managed work pool](/3.0/deploy/infrastructure-examples/managed).
+Prefect Cloud also provides the option to run work flows on Prefect's infrastructure through a [Prefect Managed work pool](/v3/deploy/infrastructure-examples/managed).
These work pool types do not require a worker to run flows.
However, they do require sharing a bit more information with Prefect, which can be a challenge depending upon the security posture of your organization.
@@ -146,8 +146,8 @@ The best choice depends on your use case.
### Static infrastructure
-When you have several flows running regularly, [the `serve` method](/3.0/develop/write-flows/#serving-a-flow)
-of the `Flow` object or [the `serve` utility](/3.0/develop/write-flows/#serving-multiple-flows-at-once)
+When you have several flows running regularly, [the `serve` method](/v3/develop/write-flows/#serving-a-flow)
+of the `Flow` object or [the `serve` utility](/v3/develop/write-flows/#serving-multiple-flows-at-once)
is a great option for managing multiple flows simultaneously.
Once you have authored your flow and decided on its deployment settings, run this long-running
@@ -175,12 +175,12 @@ Consider running flows on dynamically provisioned infrastructure with work pools
- An internal organizational structure in which deployment authors or runners are not members of
the team that manages the infrastructure.
-[Work pools](/3.0/deploy/infrastructure-concepts/work-pools/) allow Prefect to exercise greater control
+[Work pools](/v3/deploy/infrastructure-concepts/work-pools/) allow Prefect to exercise greater control
of the infrastructure on which flows run.
-Options for [serverless work pools](/3.0/deploy/infrastructure-examples/serverless/) allow you to
+Options for [serverless work pools](/v3/deploy/infrastructure-examples/serverless/) allow you to
scale to zero when workflows aren't running.
Prefect even provides you with the ability to
-[provision cloud infrastructure via a single CLI command](/3.0/deploy/infrastructure-examples/serverless/#automatically-create-a-new-push-work-pool-and-provision-infrastructure),
+[provision cloud infrastructure via a single CLI command](/v3/deploy/infrastructure-examples/serverless/#automatically-create-a-new-push-work-pool-and-provision-infrastructure),
if you use a Prefect Cloud push work pool option.
With work pools:
@@ -296,7 +296,7 @@ This separation means that your flow code stays within your storage and executio
infrastructure.
This is key to the Prefect hybrid model: there's a boundary between your proprietary assets,
-such as your flow code, and the Prefect backend (including [Prefect Cloud](/3.0/manage/cloud/)).
+such as your flow code, and the Prefect backend (including [Prefect Cloud](/v3/manage/cloud/)).
### Workflow scheduling and parametrization
@@ -307,9 +307,9 @@ scheduled with different values through parameters.
These are the fields to capture the required metadata for those actions:
-- **`schedules`**: a list of [schedule objects](/3.0/automate/add-schedules/).
+- **`schedules`**: a list of [schedule objects](/v3/automate/add-schedules/).
Most of the convenient interfaces for creating deployments allow users to avoid creating this object themselves.
-For example, when [updating a deployment schedule in the UI](/3.0/automate/add-schedules/#creating-schedules-through-the-ui)
+For example, when [updating a deployment schedule in the UI](/v3/automate/add-schedules/#creating-schedules-through-the-ui)
basic information such as a cron string or interval is all that's required.
- **`parameter_openapi_schema`**: an [OpenAPI compatible schema](https://swagger.io/specification/) that defines
the types and defaults for the flow's parameters.
@@ -400,7 +400,7 @@ anytime the flow or task runs, allowing you to audit changes.
### Worker-specific fields
-[Work pools](/3.0/deploy/infrastructure-concepts/work-pools/) and [workers](/3.0/deploy/infrastructure-concepts/workers/) are an advanced deployment pattern that
+[Work pools](/v3/deploy/infrastructure-concepts/work-pools/) and [workers](/v3/deploy/infrastructure-concepts/workers/) are an advanced deployment pattern that
allow you to dynamically provision infrastructure for each flow run.
The work pool job template interface allows users to create and govern opinionated interfaces
to their workflow infrastructure.
diff --git a/docs/3.0/deploy/infrastructure-concepts/customize.mdx b/docs/v3/deploy/infrastructure-concepts/customize.mdx
similarity index 92%
rename from docs/3.0/deploy/infrastructure-concepts/customize.mdx
rename to docs/v3/deploy/infrastructure-concepts/customize.mdx
index 51745426114a..96ba6847a1e1 100644
--- a/docs/3.0/deploy/infrastructure-concepts/customize.mdx
+++ b/docs/v3/deploy/infrastructure-concepts/customize.mdx
@@ -3,7 +3,7 @@ title: Override job configuration for specific deployments
description: Override job variables on a deployment to set environment variables, specify a Docker image, allocate resources, and more.
---
-There are two ways to deploy flows to work pools: with a [`prefect.yaml` file](/3.0/deploy/infrastructure-concepts/prefect-yaml) or using the [Python `deploy` method](/3.0/deploy/infrastructure-concepts/deploy-via-python).
+There are two ways to deploy flows to work pools: with a [`prefect.yaml` file](/v3/deploy/infrastructure-concepts/prefect-yaml) or using the [Python `deploy` method](/v3/deploy/infrastructure-concepts/deploy-via-python).
In both cases, you can add or override job variables to the work pool's defaults for a given deployment.
You can override both a work pool and a deployment when a flow run is triggered.
@@ -36,10 +36,10 @@ Here's an example repo structure:
```
» tree
.
-├── README.md
-├── requirements.txt
-├── demo_project
-│ ├── daily_flow.py
+|── README.md
+|── requirements.txt
+|── demo_project
+| |── daily_flow.py
```
With a `demo_flow.py` file like:
@@ -114,7 +114,7 @@ Then run `prefect deploy -n demo-deployment` to deploy the flow with these job v
You should see the job variables in the `Configuration` tab of the deployment in the UI:
-![Job variables in the UI](/3.0/img/guides/job-variables.png)
+![Job variables in the UI](/v3/img/guides/job-variables.png)
#### Use existing environment variables
To use environment variables that are already set in your local environment, you can template
@@ -190,7 +190,7 @@ python demo_project/daily_flow.py
The job variables should be visible in the UI under the `Configuration` tab.
-![Job variables in the UI](/3.0/img/guides/job-variables.png)
+![Job variables in the UI](/v3/img/guides/job-variables.png)
## Override job variables on a flow run
@@ -203,7 +203,7 @@ Any interface that runs deployments can accept job variables.
Custom runs allow you to pass in a dictionary of variables into your flow run infrastructure. Using the same
`env` example from above, you could do the following:
-![Job variables through custom run](/3.0/img/ui/deployment-job-variables.png)
+![Job variables through custom run](/v3/img/ui/deployment-job-variables.png)
### Use the CLI
@@ -221,6 +221,6 @@ prefect deployment run \
Additionally, runs kicked off through automation actions can use job variables, including ones rendered from Jinja
templates.
-![Job variables through automation action](/3.0/img/ui/automations-action-job-variable.png)
+![Job variables through automation action](/v3/img/ui/automations-action-job-variable.png)
diff --git a/docs/3.0/deploy/infrastructure-concepts/deploy-ci-cd.mdx b/docs/v3/deploy/infrastructure-concepts/deploy-ci-cd.mdx
similarity index 87%
rename from docs/3.0/deploy/infrastructure-concepts/deploy-ci-cd.mdx
rename to docs/v3/deploy/infrastructure-concepts/deploy-ci-cd.mdx
index 771b50217888..b32c3bc96954 100644
--- a/docs/3.0/deploy/infrastructure-concepts/deploy-ci-cd.mdx
+++ b/docs/v3/deploy/infrastructure-concepts/deploy-ci-cd.mdx
@@ -11,7 +11,7 @@ search:
Many organizations deploy Prefect workflows through their CI/CD process.
Each organization has their own unique CI/CD setup, but a common pattern is to use CI/CD to manage
-Prefect [deployments](/3.0/deploy/infrastructure-examples/docker).
+Prefect [deployments](/v3/deploy/infrastructure-examples/docker).
Combining Prefect's deployment features with CI/CD tools enables efficient management of flow code
updates, scheduling changes, and container builds.
This guide uses [GitHub Actions](https://docs.github.com/en/actions) to implement a CI/CD process,
@@ -38,7 +38,7 @@ and `DOCKER_PASSWORD` as secrets to your repository as well.
Create secrets for GitHub Actions in your repository under
**Settings -> Secrets and variables -> Actions -> New repository secret**:
-![Creating a GitHub Actions secret](/3.0/img/guides/github-secrets.png)
+![Creating a GitHub Actions secret](/v3/img/guides/github-secrets.png)
### Write a GitHub workflow
@@ -68,11 +68,11 @@ For reference, the examples below live in their respective branches of
```
.
- ├── .github/
- │ └── workflows/
- │ └── deploy-prefect-flow.yaml
- ├── flow.py
- └── requirements.txt
+ | -- .github/
+ | |-- workflows/
+ | |-- deploy-prefect-flow.yaml
+ |-- flow.py
+ |-- requirements.txt
```
`flow.py`
@@ -120,7 +120,7 @@ For reference, the examples below live in their respective branches of
- name: Setup Python
uses: actions/setup-python@v5
with:
- python-version: "3.11"
+ python-version: "3.12"
- name: Prefect Deploy
env:
@@ -135,12 +135,12 @@ For reference, the examples below live in their respective branches of
```
.
- ├── .github/
- │ └── workflows/
- │ └── deploy-prefect-flow.yaml
- ├── flow.py
- ├── prefect.yaml
- └── requirements.txt
+ |-- .github/
+ | |-- workflows/
+ | |-- deploy-prefect-flow.yaml
+ |-- flow.py
+ |-- prefect.yaml
+ |-- requirements.txt
```
`flow.py`
@@ -213,7 +213,7 @@ For reference, the examples below live in their respective branches of
- name: Setup Python
uses: actions/setup-python@v5
with:
- python-version: "3.11"
+ python-version: "3.12"
- name: Prefect Deploy
env:
@@ -231,7 +231,7 @@ For reference, the examples below live in their respective branches of
After pushing commits to your repository, GitHub automatically triggers a run of your workflow.
Monitor the status of running and completed workflows from the **Actions** tab of your repository.
-![A GitHub Action triggered via push](/3.0/img/guides/github-actions-trigger.png)
+![A GitHub Action triggered via push](/v3/img/guides/github-actions-trigger.png)
View the logs from each workflow step as they run. The `Prefect Deploy` step includes output about
your image build and push, and the creation/update of your deployment.
@@ -244,11 +244,11 @@ Successfully pushed image '***/cicd-example:latest'
Successfully created/updated all deployments!
Deployments
-┏━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━┳━━━━━━━━━┓
-┃ Name ┃ Status ┃ Details ┃
-┡━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━╇━━━━━━━━━┩
-│ hello/my-deployment │ applied │ │
-└─────────────────────┴─────────┴─────────┘
+|-----------------------------------------|
+| Name | Status Details |
+|---------------------|---------|---------|
+| hello/my-deployment | applied | |
+|-----------------------------------------|
```
@@ -292,12 +292,12 @@ separated across projects and environments.
```
.
- ├── cicd-example-workspaces-prod # production bucket
- │ ├── project_1
- │ └── project_2
- └── cicd-example-workspaces-stg # staging bucket
- ├── project_1
- └── project_2
+ |--- cicd-example-workspaces-prod # production bucket
+ | |--- project_1
+ | |---project_2
+ |---cicd-example-workspaces-stg # staging bucket
+ |--- project_1
+ |---project_2
```
Deployments in this example use S3 for code storage. So it's important that push steps place flow files
@@ -316,15 +316,10 @@ so Python packages do not have to be downloaded on repeat workflow runs.
- name: Setup Python
uses: actions/setup-python@v5
with:
- python-version: "3.11"
+ python-version: "3.12"
cache: "pip"
```
-```
-Using cached prefect-3.0.0-py3-none-any.whl (2.9 MB)
-Using cached prefect_aws-0.4.18-py3-none-any.whl (61 kB)
-```
-
The `build-push-action` for building Docker images also offers
[caching options for GitHub Actions](https://docs.docker.com/build/cache/backends/gha/).
If you are not using GitHub, other remote [cache backends](https://docs.docker.com/build/cache/backends/)
@@ -395,7 +390,7 @@ jobs:
- name: Setup Python
uses: actions/setup-python@v5
with:
- python-version: "3.11"
+ python-version: "3.12"
- name: Prefect Auth
uses: PrefectHQ/actions-prefect-auth@v1
@@ -404,7 +399,7 @@ jobs:
prefect-workspace: ${{ secrets.PREFECT_WORKSPACE }}
- name: Run Prefect Deploy
- uses: PrefectHQ/actions-prefect-deploy@v3
+ uses: PrefectHQ/actions-prefect-deploy@v4
with:
deployment-names: my-deployment
requirements-file-paths: requirements.txt
@@ -429,5 +424,4 @@ registry URL to the `registry` key in the `with:` part of the action and use an
## See also
-Check out the [Prefect Cloud Terraform provider](https://registry.terraform.io/providers/PrefectHQ/prefect/latest/docs/guides/getting-started)
-if you're using Terraform to manage your infrastructure.
+If you're using Terraform to manage your infrastructure, check out the [Prefect Cloud Terraform provider](https://registry.terraform.io/providers/PrefectHQ/prefect/latest/docs/guides/getting-started).
diff --git a/docs/3.0/deploy/infrastructure-concepts/deploy-via-python.mdx b/docs/v3/deploy/infrastructure-concepts/deploy-via-python.mdx
similarity index 94%
rename from docs/3.0/deploy/infrastructure-concepts/deploy-via-python.mdx
rename to docs/v3/deploy/infrastructure-concepts/deploy-via-python.mdx
index 3b0509d674c5..af1a410bb405 100644
--- a/docs/3.0/deploy/infrastructure-concepts/deploy-via-python.mdx
+++ b/docs/v3/deploy/infrastructure-concepts/deploy-via-python.mdx
@@ -31,13 +31,13 @@ Before deploying your flow using `flow.deploy`, ensure you have the following:
1. A running Prefect server or Prefect Cloud workspace: You can either run a Prefect server locally or use a Prefect Cloud workspace.
To start a local server, run `prefect server start`. To use Prefect Cloud, sign up for an account at [app.prefect.cloud](https://app.prefect.cloud)
-and follow the [Connect to Prefect Cloud](/3.0/manage/cloud/connect-to-cloud/) guide.
+and follow the [Connect to Prefect Cloud](/v3/manage/cloud/connect-to-cloud/) guide.
2. A Prefect flow: You should have a flow defined in your Python script. If you haven't created a flow yet, refer to the
-[Write Flows](/3.0/develop/write-flows/) guide.
+[Write Flows](/v3/develop/write-flows/) guide.
3. A work pool: You need a work pool to manage the infrastructure for running your flow. If you haven't created a work pool, you can do so through
-the Prefect UI or using the Prefect CLI. For more information, see the [Work Pools](/3.0/deploy/infrastructure-concepts/work-pools/) guide.
+the Prefect UI or using the Prefect CLI. For more information, see the [Work Pools](/v3/deploy/infrastructure-concepts/work-pools/) guide.
For examples in this guide, we'll use a Docker work pool created by running:
```bash
@@ -254,7 +254,7 @@ To deploy a flow with a schedule, you can use one of the following options:
```
- Learn more about schedules [here](/3.0/automate/add-schedules).
+ Learn more about schedules [here](/v3/automate/add-schedules).
## Use remote code storage
@@ -301,7 +301,7 @@ The `source` parameter can accept a variety of remote storage options including:
The `entrypoint` parameter is the path to the flow function within your repository combined with the name of the flow function.
-Learn more about remote code storage [here](/3.0/deploy/infrastructure-concepts/store-flow-code/).
+Learn more about remote code storage [here](/v3/deploy/infrastructure-concepts/store-flow-code/).
## Set default parameters
@@ -359,7 +359,7 @@ if __name__ == "__main__":
Job variables can be used to customize environment variables, resources limits, and other infrastructure options, allowing fine-grained control over your infrastructure on a per-deployment or per-flow-run basis.
Any variable defined in the base job template of the associated work pool can be overridden by a job variable.
-You can learn more about job variables [here](/3.0/deploy/infrastructure-concepts/customize).
+You can learn more about job variables [here](/v3/deploy/infrastructure-concepts/customize).
## Deploy multiple flows
@@ -402,8 +402,8 @@ This approach offers the following benefits:
## Additional resources
-- [Work Pools](/3.0/deploy/infrastructure-concepts/work-pools/)
-- [Store Flow Code](/3.0/deploy/infrastructure-concepts/store-flow-code/)
-- [Customize Infrastructure](/3.0/deploy/infrastructure-concepts/customize/)
-- [Schedules](/3.0/automate/add-schedules/)
-- [Write Flows](/3.0/develop/write-flows/)
+- [Work Pools](/v3/deploy/infrastructure-concepts/work-pools/)
+- [Store Flow Code](/v3/deploy/infrastructure-concepts/store-flow-code/)
+- [Customize Infrastructure](/v3/deploy/infrastructure-concepts/customize/)
+- [Schedules](/v3/automate/add-schedules/)
+- [Write Flows](/v3/develop/write-flows/)
diff --git a/docs/3.0/deploy/infrastructure-concepts/prefect-yaml.mdx b/docs/v3/deploy/infrastructure-concepts/prefect-yaml.mdx
similarity index 95%
rename from docs/3.0/deploy/infrastructure-concepts/prefect-yaml.mdx
rename to docs/v3/deploy/infrastructure-concepts/prefect-yaml.mdx
index a1fec36ce38d..597382290956 100644
--- a/docs/3.0/deploy/infrastructure-concepts/prefect-yaml.mdx
+++ b/docs/v3/deploy/infrastructure-concepts/prefect-yaml.mdx
@@ -228,7 +228,7 @@ The bucket is populated with the provided value (which also could have been prov
`folder` property of the `push` step is a template—the `pull_from_s3` step outputs both a `bucket` value as well as a `folder`
value for the template downstream steps. This helps you keep your steps consistent across edits.
-As discussed above, if you use [blocks](/3.0/develop/blocks/), you can template the credentials section with
+As discussed above, if you use [blocks](/v3/develop/blocks/), you can template the credentials section with
a block reference for secure and dynamic credentials access:
```yaml
@@ -370,10 +370,10 @@ Values that you place within your `prefect.yaml` file can reference dynamic valu
- **step outputs**: every step of both `build` and `push` produce named fields such as `image_name`; you can reference these
fields within `prefect.yaml` and `prefect deploy` will populate them with each call. References must be enclosed in double
brackets and in `"{{ field_name }}"` format
-- **blocks**: you can reference [Prefect blocks](/3.0/develop/blocks) with the
+- **blocks**: you can reference [Prefect blocks](/v3/develop/blocks) with the
`{{ prefect.blocks.block_type.block_slug }}` syntax. It is highly recommended that you use block references for any sensitive
information (such as a GitHub access token or any credentials) to avoid hardcoding these values in plaintext
-- **variables**: you can reference [Prefect variables](/3.0/develop/variables) with the
+- **variables**: you can reference [Prefect variables](/v3/develop/variables) with the
`{{ prefect.variables.variable_name }}` syntax. Use variables to reference non-sensitive, reusable pieces of information
such as a default image name or a default work pool name.
- **environment variables**: you can also reference environment variables with the special syntax `{{ $MY_ENV_VAR }}`.
@@ -620,9 +620,9 @@ These are fields you can add to each deployment declaration.
| `version` | An optional version for the deployment. |
| `tags` | A list of strings to assign to the deployment as tags. |
| `description` | An optional description for the deployment. |
-| `schedule` | An optional [schedule](/3.0/automate/add-schedules) to assign to the deployment. Fields for this section are documented in the [Schedule Fields](#schedule-fields) section. |
-| `concurrency_limit` | An optional [deployment concurrency limit](/3.0/deploy/index#concurrency-limiting). Fields for this section are documented in the [Concurrency Limit Fields](#concurrency-limit-fields) section. |
-| `triggers` | An optional array of [triggers](/3.0/automate/events/automations-triggers/#custom-triggers) to assign to the deployment |
+| `schedule` | An optional [schedule](/v3/automate/add-schedules) to assign to the deployment. Fields for this section are documented in the [Schedule Fields](#schedule-fields) section. |
+| `concurrency_limit` | An optional [deployment concurrency limit](/v3/deploy/index#concurrency-limiting). Fields for this section are documented in the [Concurrency Limit Fields](#concurrency-limit-fields) section. |
+| `triggers` | An optional array of [triggers](/v3/automate/events/automations-triggers/#custom-triggers) to assign to the deployment |
| `entrypoint` | Required path to the `.py` file containing the flow you want to deploy (relative to the root directory of your development folder) combined with the name of the flow function. In the format `path/to/file.py:flow_function_name`. |
| `parameters` | Optional default values to provide for the parameters of the deployed flow. Should be an object with key/value pairs. |
| `enforce_parameter_schema` | Boolean flag that determines whether the API should validate the parameters passed to a flow run against the parameter schema generated for the deployed flow. |
@@ -659,7 +659,7 @@ These are fields you can add to a deployment declaration's `work_pool` section.
| ---------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| `name` | The name of the work pool to schedule flow runs in for the deployment. |
| `work_queue_name` | The name of the work queue within the specified work pool to schedule flow runs in for the deployment. If not provided, the default queue for the specified work pool is used. |
-| `job_variables` | Values used to override the default values in the specified work pool's [base job template](/3.0/deploy/infrastructure-concepts/work-pools/#base-job-template). Maps directly to a created deployments `infra_overrides` attribute. |
+| `job_variables` | Values used to override the default values in the specified work pool's [base job template](/v3/deploy/infrastructure-concepts/work-pools/#base-job-template). Maps directly to a created deployments `infra_overrides` attribute. |
#### Deployment mechanics
@@ -685,11 +685,15 @@ Each time a step runs, the following actions take place in order:
- The step's function is called with the resolved inputs.
- The step's output is returned and used to resolve inputs for subsequent steps.
+## Update a deployment
+
+To update a deployment, make any desired changes to the `prefect.yaml` file, and run `prefect deploy`. Running just this command will prompt you to select a deployment interactively, or you may specify the deployment to update with `--name your-deployment`.
+
## Next steps
Now that you are familiar with creating deployments, you can explore infrastructure options for running your deployments:
-- [Managed work pools](/3.0/deploy/infrastructure-examples/managed/)
-- [Push work pools](/3.0/deploy/infrastructure-examples/serverless/)
-- [Kubernetes work pools](/3.0/deploy/infrastructure-examples/kubernetes/)
-- [Serverless hybrid work pools](/3.0/deploy/infrastructure-examples/docker/)
+- [Managed work pools](/v3/deploy/infrastructure-examples/managed/)
+- [Push work pools](/v3/deploy/infrastructure-examples/serverless/)
+- [Kubernetes work pools](/v3/deploy/infrastructure-examples/kubernetes/)
+- [Serverless hybrid work pools](/v3/deploy/infrastructure-examples/docker/)
diff --git a/docs/3.0/deploy/infrastructure-concepts/store-flow-code.mdx b/docs/v3/deploy/infrastructure-concepts/store-flow-code.mdx
similarity index 97%
rename from docs/3.0/deploy/infrastructure-concepts/store-flow-code.mdx
rename to docs/v3/deploy/infrastructure-concepts/store-flow-code.mdx
index ce8175beef0a..d7295942a197 100644
--- a/docs/3.0/deploy/infrastructure-concepts/store-flow-code.mdx
+++ b/docs/v3/deploy/infrastructure-concepts/store-flow-code.mdx
@@ -18,7 +18,7 @@ Local storage is also an option for deployments that run locally.
In the examples below, we show how to create a work pool-based deployment for each of these storage options.
## Deployment creation options
-You can create a deployment through [Python code with the `flow.deploy` method](/3.0/deploy/infrastructure-concepts/deploy-via-python) or through a [YAML specification defined in a `prefect.yaml` file](/3.0/deploy/infrastructure-concepts/prefect-yaml).
+You can create a deployment through [Python code with the `flow.deploy` method](/v3/deploy/infrastructure-concepts/deploy-via-python) or through a [YAML specification defined in a `prefect.yaml` file](/v3/deploy/infrastructure-concepts/prefect-yaml).
When using the Python `deploy` method specify a flow storage location other than a Docker image requires the `flow.from_source` method. The `source` and `entrypoint` arguments are required.
@@ -422,7 +422,7 @@ Alternatively, you can create a custom Docker image outside of Prefect.
If doing this with and you don't need push or pull steps in the `prefect.yaml` file.
Instead, the work pool can reference the image directly.
-For more information, see [this discussion of custom Docker images](/3.0/deploy/infrastructure-examples/docker/#automatically-build-a-custom-docker-image-with-a-local-dockerfile).
+For more information, see [this discussion of custom Docker images](/v3/deploy/infrastructure-examples/docker/#automatically-build-a-custom-docker-image-with-a-local-dockerfile).
## Cloud-provider storage
@@ -734,7 +734,7 @@ We also include Python code that shows how to use an existing storage block and
-Another authentication option is to give the [worker](/3.0/deploy/infrastructure-concepts/workers/) access to the storage location at runtime through SSH keys.
+Another authentication option is to give the [worker](/v3/deploy/infrastructure-concepts/workers/) access to the storage location at runtime through SSH keys.
## Store code locally
@@ -819,4 +819,4 @@ The deployment creation mechanics for `serve` are similar to `deploy`.
Unlike `serve`, if you don't specify an image to use for your flow, you must specify where to pull the flow code from at runtime with the `from_source` method; `from_source` is optional with `serve`.
-Read more about when to consider using `serve` [here](/3.0/deploy/infrastructure-concepts/deploy-via-python#when-to-consider-flow-deploy-over-flow-serve).
+Read more about when to consider using `serve` [here](/v3/deploy/infrastructure-concepts/deploy-via-python#when-to-consider-flow-deploy-over-flow-serve).
diff --git a/docs/3.0/deploy/infrastructure-concepts/work-pools.mdx b/docs/v3/deploy/infrastructure-concepts/work-pools.mdx
similarity index 90%
rename from docs/3.0/deploy/infrastructure-concepts/work-pools.mdx
rename to docs/v3/deploy/infrastructure-concepts/work-pools.mdx
index 45bb3ab800ed..bdd80bd46e2a 100644
--- a/docs/3.0/deploy/infrastructure-concepts/work-pools.mdx
+++ b/docs/v3/deploy/infrastructure-concepts/work-pools.mdx
@@ -13,7 +13,7 @@ Other advantages of work pools:
- Configure default infrastructure configurations on your work pools that all jobs inherit and can override.
- Allow platform teams to use work pools to expose opinionated (and enforced) interfaces to the infrastructure that they oversee.
-- Allow work pools to prioritize (or limit) flow runs through the use of [work queues](/3.0/deploy/infrastructure-concepts/work-pools/#work-queues).
+- Allow work pools to prioritize (or limit) flow runs through the use of [work queues](/v3/deploy/infrastructure-concepts/work-pools/#work-queues).
**Choosing Between `flow.deploy()` and `flow.serve()`**
@@ -27,8 +27,8 @@ Other advantages of work pools:
Work pools have different operational modes, each designed to work with specific infrastructures and work delivery methods:
1. Pull work pools: These require workers to actively poll for flow runs to execute.
-2. [Push work pools](/3.0/deploy/infrastructure-examples/serverless): These submit runs directly to serverless infrastructure providers.
-3. [Managed work pools](/3.0/deploy/infrastructure-examples/managed): These are administered by Prefect and handle both submission and execution of code.
+2. [Push work pools](/v3/deploy/infrastructure-examples/serverless): These submit runs directly to serverless infrastructure providers.
+3. [Managed work pools](/v3/deploy/infrastructure-examples/managed): These are administered by Prefect and handle both submission and execution of code.
Each type of work pool is optimized for different use cases, allowing you to choose the best fit for your specific infrastructure and workflow requirements.
By using work pools, you can efficiently manage the distribution and execution of your Prefect flows across environments and infrastructures.
@@ -51,38 +51,36 @@ deployment that is polled by a worker and executes a flow run based on that depl
```mermaid
%%{
init: {
- 'theme': 'base',
+ 'theme': 'neutral',
'themeVariables': {
- 'fontSize': '19px'
+ 'margin': '10px'
}
}
}%%
flowchart LR
- F("Flow Code
"):::yellow -.-> A("Deployment Definition
"):::gold
- subgraph Server ["Prefect API
"]
- D("Deployment
"):::green
+
+ B(Deployment Definition)
+
+ subgraph Server [Prefect API]
+ C(Deployment)
end
- subgraph Remote Storage ["Remote Storage
"]
- B("Flow
"):::yellow
+
+ subgraph Remote Storage [Remote Storage]
+ D(Flow Code)
end
- subgraph Infrastructure ["Infrastructure
"]
- G("Flow Run
"):::blue
+
+ E(Worker)
+
+ subgraph Infrastructure [Infrastructure]
+ F((Flow Run))
end
- A --> D
- D --> E("Worker
"):::red
- B -.-> E
- A -.-> B
- E -.-> G
-
- classDef gold fill:goldenrod,stroke:goldenrod,stroke-width:4px,color:black
- classDef yellow fill:gold,stroke:gold,stroke-width:4px,color:black
- classDef gray fill:lightgray,stroke:lightgray,stroke-width:4px
- classDef blue fill:blue,stroke:blue,stroke-width:4px,color:white
- classDef green fill:green,stroke:green,stroke-width:4px,color:white
- classDef red fill:red,stroke:red,stroke-width:4px,color:white
- classDef dkgray fill:darkgray,stroke:darkgray,stroke-width:4px,color:white
+ B --> C
+ B -.-> D
+ C --> E
+ D -.-> E
+ E -.-> F
```
### Work pool configuration
@@ -91,12 +89,12 @@ You can configure work pools by using any of the following:
- Prefect UI
- Prefect CLI commands
-- [Prefect REST API](/3.0/api-ref/rest-api/)
+- [Prefect REST API](/v3/api-ref/rest-api/)
- [Terraform provider for Prefect Cloud](https://registry.terraform.io/providers/PrefectHQ/prefect/latest/docs/resources/work_pool)
To manage work pools in the UI, click the **Work Pools** icon. This displays a list of currently configured work pools.
-![The UI displays a list of configured work pools](/3.0/img/ui/work-pool-list.png)
+![The UI displays a list of configured work pools](/v3/img/ui/work-pool-list.png)
Select the **+** button to create a new work pool. You can specify the details about infrastructure created for this work pool.
@@ -380,7 +378,7 @@ Each work pool type is configured with a default base job template, which is a g
The default base template defines values that pass to every flow run, but you can override them on a per-deployment or per-flow run basis.
For example, these configuration options are available in the Prefect UI for `process` work pool:
-![process work pool configuration options](/3.0/img/ui/process-work-pool-config.png)
+![process work pool configuration options](/v3/img/ui/process-work-pool-config.png)
The default base job template allows you to:
- set environment variables for spawned processes
@@ -409,7 +407,7 @@ deployments:
stream_output: false
```
-Learn more about [customizing job variables](/3.0/deploy/infrastructure-concepts/customize).
+Learn more about [customizing job variables](/v3/deploy/infrastructure-concepts/customize).
**Advanced customization of the base job template**
@@ -423,7 +421,7 @@ Learn more about [customizing job variables](/3.0/deploy/infrastructure-concepts
capabilities in secure environments. For example, the `kubernetes` worker type allows users to specify a custom job template
to configure the manifest that workers use to create jobs for flow execution.
- See more information about [overriding a work pool's job variables](/3.0/deploy/infrastructure-concepts/customize).
+ See more information about [overriding a work pool's job variables](/v3/deploy/infrastructure-concepts/customize).
### Work queues
@@ -478,10 +476,10 @@ in waterfall fashion.
## Next steps
-- Learn more about [workers](/3.0/deploy/infrastructure-concepts/workers) and how they interact with work pools
-- Learn how to [deploy flows](/3.0/deploy/infrastructure-concepts/prefect-yaml) that run in work pools
+- Learn more about [workers](/v3/deploy/infrastructure-concepts/workers) and how they interact with work pools
+- Learn how to [deploy flows](/v3/deploy/infrastructure-concepts/prefect-yaml) that run in work pools
- Learn how to set up work pools for:
- - [Kubernetes](/3.0/deploy/infrastructure-examples/kubernetes)
- - [Docker](/3.0/deploy/infrastructure-examples/docker)
- - [Serverless platforms](/3.0/deploy/infrastructure-examples/serverless)
- - [Infrastructure managed by Prefect Cloud](/3.0/deploy/infrastructure-examples/managed)
+ - [Kubernetes](/v3/deploy/infrastructure-examples/kubernetes)
+ - [Docker](/v3/deploy/infrastructure-examples/docker)
+ - [Serverless platforms](/v3/deploy/infrastructure-examples/serverless)
+ - [Infrastructure managed by Prefect Cloud](/v3/deploy/infrastructure-examples/managed)
diff --git a/docs/3.0/deploy/infrastructure-concepts/workers.mdx b/docs/v3/deploy/infrastructure-concepts/workers.mdx
similarity index 85%
rename from docs/3.0/deploy/infrastructure-concepts/workers.mdx
rename to docs/v3/deploy/infrastructure-concepts/workers.mdx
index ee0bfe45f5ca..874c0521bb47 100644
--- a/docs/3.0/deploy/infrastructure-concepts/workers.mdx
+++ b/docs/v3/deploy/infrastructure-concepts/workers.mdx
@@ -12,15 +12,24 @@ As a result, when deployments are assigned to a work pool, you know in which exe
The following diagram summarizes the architecture of a worker-based work pool deployment:
```mermaid
-graph TD
+%%{
+ init: {
+ 'theme': 'neutral',
+ 'themeVariables': {
+ 'margin': '10px'
+ }
+ }
+}%%
+
+flowchart TD
subgraph your_infra["Your Execution Environment"]
worker["Worker"]
- subgraph flow_run_infra[Flow Run Infra]
- flow_run_a(("Flow Run A"))
- end
- subgraph flow_run_infra_2[Flow Run Infra]
- flow_run_b(("Flow Run B"))
- end
+ subgraph flow_run_infra[Infrastructure]
+ flow_run_a(("Flow Run A"))
+ end
+ subgraph flow_run_infra_2[Infrastructure]
+ flow_run_b(("Flow Run B"))
+ end
end
subgraph api["Prefect API"]
@@ -33,9 +42,7 @@ graph TD
worker --> |creates| flow_run_infra_2
```
-
-Notice above that the worker is in charge of provisioning the _flow run infrastructure_.
-
+The worker is in charge of provisioning the _flow run infrastructure_.
### Worker types
@@ -95,6 +102,23 @@ Workers have two statuses: `ONLINE` and `OFFLINE`. A worker is online if it send
If a worker misses three heartbeats, it is considered offline. By default, a worker is considered offline a maximum of 90 seconds
after it stopped sending heartbeats, but you can configure the threshold with the `PREFECT_WORKER_HEARTBEAT_SECONDS` setting.
+### Worker logs
+ Workers send logs to the Prefect Cloud API if you're connected to Prefect Cloud.
+
+- All worker logs are automatically sent to the Prefect Cloud API
+- Logs are accessible through both the Prefect Cloud UI and API
+- Each flow run will include a link to its associated worker's logs
+
+### Worker details
+ The **Worker Details** page shows you three key areas of information:
+
+- Worker status
+- Installed Prefect version
+- Installed Prefect integrations (e.g., `prefect-aws`, `prefect-gcp`)
+- Live worker logs (if worker logging is enabled)
+
+Access a worker's details by clicking on the worker's name in the Work Pool list.
+
### Start a worker
Use the `prefect worker start` CLI command to start a worker. You must pass at least the work pool name.
@@ -138,10 +162,10 @@ prefect worker start --pool "my-pool" --limit 5
### Configure prefetch
-By default, the worker submits flow runs a short time (10 seconds) before they are scheduled to run.
+By default, the worker submits flow runs 10 seconds before they are scheduled to run.
This allows time for the infrastructure to be created so the flow run can start on time.
-In some cases, infrastructure takes longer than 10 seconds to start the flow run. You can increase the prefetch with the
+In some cases, infrastructure takes longer than 10 seconds to start the flow run. You can increase the prefetch time with the
`--prefetch-seconds` option or the `PREFECT_WORKER_PREFETCH_SECONDS` setting.
If this value is _more_ than the amount of time it takes for the infrastructure to start, the flow run will _wait_ until its
@@ -149,7 +173,7 @@ scheduled start time.
### Polling for work
-Workers poll for work every 15 seconds by default. You can configure this interval in your [profile settings](/3.0/manage/settings-and-profiles/)
+Workers poll for work every 15 seconds by default. You can configure this interval in your [profile settings](/v3/develop/settings-and-profiles/)
with the
`PREFECT_WORKER_QUERY_SECONDS` setting.
@@ -168,8 +192,9 @@ If `prefect worker start` is run non-interactively, the `prompt` install policy
### Additional resources
-See how to [daemonize a Prefect worker](/3.0/resources/daemonize-processes/).
+See how to [daemonize a Prefect worker](/v3/resources/daemonize-processes/).
+
+See more information on [overriding a work pool's job variables](/v3/deploy/infrastructure-concepts/customize).
-See more information on [overriding a work pool's job variables](/3.0/deploy/infrastructure-concepts/customize).
---------
diff --git a/docs/3.0/deploy/infrastructure-examples/docker.mdx b/docs/v3/deploy/infrastructure-examples/docker.mdx
similarity index 86%
rename from docs/3.0/deploy/infrastructure-examples/docker.mdx
rename to docs/v3/deploy/infrastructure-examples/docker.mdx
index e422f98a99e3..052bf4dfd590 100644
--- a/docs/3.0/deploy/infrastructure-examples/docker.mdx
+++ b/docs/v3/deploy/infrastructure-examples/docker.mdx
@@ -3,9 +3,24 @@ title: Run flows in Docker containers
description: Learn how to deploy a flow to a Docker work pool with workers
---
-In this example, you will create a work pool and worker to deploy your flow, and then execute it with the Prefect API.
+In this example, you will set up:
+
+- a Docker [**work pool**](/v3/deploy/infrastructure-concepts/work-pools/): stores the infrastructure configuration for your deployment
+- a Docker [**worker**](/v3/deploy/infrastructure-concepts/workers/): process that polls the Prefect API for flow runs to execute as Docker containers
+- a [**deployment**](/v3/deploy/index/): a flow that should run according to the configuration on your Docker work pool
+
+Then you can execute your deployment via the Prefect API (through the SDK, CLI, UI, etc).
+
You must have [Docker](https://docs.docker.com/engine/install/) installed and running on your machine.
+
+**Executing flows in a long-lived container**
+
+This guide shows how to run a flow in an ephemeral container that is removed after the flow run completes.
+To instead learn how to run flows in a static, long-lived container, see [this](/v3/deploy/static-infrastructure-examples/docker/) guide.
+
+
+
### Create a work pool
A work pool provides default infrastructure configurations that all jobs inherit and can override.
@@ -17,7 +32,7 @@ To set up a **Docker** type work pool with the default values, run:
prefect work-pool create --type docker my-docker-pool
```
-Or create the work pool in the UI.
+... or create the work pool in the UI.
To confirm the work pool creation was successful, run:
@@ -50,9 +65,19 @@ It's now polling the Prefect API to check for any scheduled flow runs it should
You'll see your new worker listed in the UI under the **Workers** tab of the Work Pools page with a recent last polled date.
The work pool should have a `Ready` status indicator.
+
+**Pro Tip:**
+
+If `my-docker-pool` does not already exist, the below command will create it for you automatically with the default settings for that work pool type, in this case `docker`.
+
+```bash
+prefect worker start --pool my-docker-pool --type docker
+```
+
+
Keep this terminal session active for the worker to continue to pick up jobs.
Since you are running this worker locally, the worker will if you close the terminal.
-In a production setting this worker should run as a [daemonized or managed process](/3.0/resources/daemonize-processes/).
+In a production setting this worker should run as a [daemonized or managed process](/v3/resources/daemonize-processes/).
## Create the deployment
@@ -67,7 +92,7 @@ Next, you'll create a deployment from your flow code.
Create a deployment from Python code by calling the `.deploy` method on a flow:
-```python buy.py
+```python deploy_buy.py
from prefect import flow
@flow(log_prints=True)
@@ -85,7 +110,7 @@ if __name__ == "__main__":
Now, run the script to create a deployment (in future examples this step is omitted for brevity):
```bash
-python buy.py
+python deploy_buy.py
```
You should see messages in your terminal that Docker is building your image.
@@ -129,7 +154,7 @@ if __name__ == "__main__":
buy.deploy(
name="my-code-baked-into-an-image-deployment",
work_pool_name="my-docker-pool",
- image="my_registry/no-build-image:1.0",
+ image="my_registry/already-built-image:1.0",
build=False
)
```
@@ -212,7 +237,7 @@ For example, you can install a private Python package from GCP's artifact regist
)
```
-Note that you used a [Prefect Secret block](/3.0/develop/blocks/) to load the URL configuration for
+Note that you used a [Prefect Secret block](/v3/develop/blocks/) to load the URL configuration for
the artifact registry above.
See all the optional keyword arguments for the [DockerImage class](https://docker-py.readthedocs.io/en/stable/images.html#docker.models.images.ImageCollection.build).
@@ -270,7 +295,7 @@ if __name__ == "__main__":
The `entrypoint` is the path to the file the flow is located in and the function name, separated by a colon.
-See the [Store flow code](/3.0/deploy/infrastructure-concepts/store-flow-code/) guide for more flow code storage options.
+See the [Store flow code](/v3/deploy/infrastructure-concepts/store-flow-code/) guide for more flow code storage options.
### Additional configuration with `.deploy`
@@ -299,7 +324,7 @@ if __name__ == "__main__":
The `job_variables` parameter allows you to fine-tune the infrastructure settings for a deployment.
The values passed in override default values in the specified work pool's
-[base job template](/3.0/deploy/infrastructure-concepts/work-pools/#base-job-template).
+[base job template](/v3/deploy/infrastructure-concepts/work-pools/#base-job-template).
You can override environment variables, such as `image_pull_policy` and `image`, for a specific deployment with the `job_variables`
argument.
@@ -332,7 +357,7 @@ The dictionary key "EXTRA_PIP_PACKAGES" denotes a special environment variable t
Python packages at runtime.
This approach is an alternative to building an image with a custom `requirements.txt` copied into it.
-See [Override work pool job variables](/3.0/deploy/infrastructure-concepts/customize) for more information about how to customize these variables.
+See [Override work pool job variables](/v3/deploy/infrastructure-concepts/customize) for more information about how to customize these variables.
### Work with multiple deployments with `deploy`
@@ -420,6 +445,6 @@ This is useful if using a monorepo approach to your workflows.
## Learn more
-- [Deploy flows on Kubernetes](/3.0/deploy/infrastructure-examples/kubernetes/)
-- [Deploy flows on serverless infrastructure](/3.0/deploy/infrastructure-examples/serverless/)
-- [Daemonize workers](/3.0/resources/daemonize-processes/)
+- [Deploy flows on Kubernetes](/v3/deploy/infrastructure-examples/kubernetes/)
+- [Deploy flows on serverless infrastructure](/v3/deploy/infrastructure-examples/serverless/)
+- [Daemonize workers](/v3/resources/daemonize-processes/)
diff --git a/docs/3.0/deploy/infrastructure-examples/kubernetes.mdx b/docs/v3/deploy/infrastructure-examples/kubernetes.mdx
similarity index 98%
rename from docs/3.0/deploy/infrastructure-examples/kubernetes.mdx
rename to docs/v3/deploy/infrastructure-examples/kubernetes.mdx
index b2cf6145c367..a004d84a4fb9 100644
--- a/docs/3.0/deploy/infrastructure-examples/kubernetes.mdx
+++ b/docs/v3/deploy/infrastructure-examples/kubernetes.mdx
@@ -12,7 +12,7 @@ Kubernetes 1.26.0 and newer minor versions.
1. A Prefect Cloud account
2. A cloud provider (AWS, GCP, or Azure) account
-3. Python and Prefect [installed](/3.0/get-started/install/)
+3. Python and Prefect [installed](/v3/get-started/install/)
4. Helm [installed](https://helm.sh/docs/intro/install/)
5. Kubernetes CLI (kubectl)[installed](https://kubernetes.io/docs/tasks/tools/install-kubectl/)
6. Admin access for Prefect Cloud and your cloud provider. You can downgrade it after this setup.
@@ -183,7 +183,7 @@ If you already have a registry, skip ahead to the next section.
## Create a Kubernetes work pool
-[Work pools](/3.0/deploy/infrastructure-concepts/work-pools/) allow you to manage deployment
+[Work pools](/v3/deploy/infrastructure-concepts/work-pools/) allow you to manage deployment
infrastructure.
This section shows you how to configure the default values for your
Kubernetes base job template.
@@ -415,9 +415,9 @@ The `prefect.yaml` file currently allows for more customization in terms of push
steps.
To learn about the Python deployment creation method with `flow.deploy` see
-[Workers](/3.0/deploy/infrastructure-examples/docker/).
+[Workers](/v3/deploy/infrastructure-examples/docker/).
-The [`prefect.yaml`](/3.0/deploy/infrastructure-concepts/prefect-yaml/#managing-deployments) file is used
+The [`prefect.yaml`](/v3/deploy/infrastructure-concepts/prefect-yaml/#managing-deployments) file is used
by the `prefect deploy` command to deploy your flows.
As a part of that process it also builds and pushes your image.
Create a new file named `prefect.yaml` with the following contents:
diff --git a/docs/3.0/deploy/infrastructure-examples/managed.mdx b/docs/v3/deploy/infrastructure-examples/managed.mdx
similarity index 91%
rename from docs/3.0/deploy/infrastructure-examples/managed.mdx
rename to docs/v3/deploy/infrastructure-examples/managed.mdx
index 479ac1971d95..ff70b7030630 100644
--- a/docs/3.0/deploy/infrastructure-examples/managed.mdx
+++ b/docs/v3/deploy/infrastructure-examples/managed.mdx
@@ -35,7 +35,7 @@ Managed execution is a great option for users who want to get started quickly, w
)
```
-1. With your [CLI authenticated to your Prefect Cloud workspace](/3.0/manage/cloud/manage-users/api-keys/), run the script to create your deployment:
+1. With your [CLI authenticated to your Prefect Cloud workspace](/v3/manage/cloud/manage-users/api-keys/), run the script to create your deployment:
```bash
python managed-execution.py
@@ -65,7 +65,7 @@ if __name__ == "__main__":
)
```
-Alternatively, you can create a `requirements.txt` file and reference it in your [prefect.yaml pull step](/3.0/deploy/infrastructure-concepts/prefect-yaml#utility-steps).
+Alternatively, you can create a `requirements.txt` file and reference it in your [prefect.yaml pull step](/v3/deploy/infrastructure-concepts/prefect-yaml#utility-steps).
## Limitations
@@ -106,8 +106,8 @@ You can view your compute hours quota usage on the **Work Pools** page in the UI
## Next steps
-Read more about creating deployments in [Run flows in Docker containers](/3.0/deploy/infrastructure-examples/docker/).
+Read more about creating deployments in [Run flows in Docker containers](/v3/deploy/infrastructure-examples/docker/).
For more control over your infrastructure, such as the ability to run
-custom Docker images, [serverless push work pools](/3.0/deploy/infrastructure-examples/serverless/)
+custom Docker images, [serverless push work pools](/v3/deploy/infrastructure-examples/serverless/)
are also a good option.
\ No newline at end of file
diff --git a/docs/3.0/deploy/infrastructure-examples/serverless.mdx b/docs/v3/deploy/infrastructure-examples/serverless.mdx
similarity index 97%
rename from docs/3.0/deploy/infrastructure-examples/serverless.mdx
rename to docs/v3/deploy/infrastructure-examples/serverless.mdx
index 2d70b6220bdb..3adcf30ff1d8 100644
--- a/docs/3.0/deploy/infrastructure-examples/serverless.mdx
+++ b/docs/v3/deploy/infrastructure-examples/serverless.mdx
@@ -3,7 +3,7 @@ title: Run flows on serverless compute
description: Learn how to use Prefect push work pools to schedule work on serverless infrastructure without having to run a worker.
---
- Push [work pools](/3.0/deploy/infrastructure-concepts/work-pools/) are a special type of work pool. They allow
+ Push [work pools](/v3/deploy/infrastructure-concepts/work-pools/) are a special type of work pool. They allow
Prefect Cloud to submit flow runs for execution to serverless computing infrastructure without requiring you to run a worker.
Push work pools currently support execution in AWS ECS tasks, Azure Container Instances, Google Cloud Run jobs, and Modal.
@@ -553,7 +553,7 @@ In the examples below, you'll create a push work pool through the Prefect Cloud
The service account must have two roles at a minimum: *Cloud Run Developer* and *Service Account User*.
- ![Configuring service account permissions in GCP](/3.0/img/guides/gcr-service-account-setup.png)
+ ![Configuring service account permissions in GCP](/v3/img/guides/gcr-service-account-setup.png)
Once you create the Service account, navigate to its *Keys* page to add an API key. Create a JSON type key, download it,
and store it somewhere safe for use in the next section.
@@ -603,7 +603,7 @@ integrate securely with your serverless infrastructure, you need to store your c
For use in a push work pool, this block must have the contents of the JSON key stored in the
Service Account Info field, as such:
- ![Configuring GCP Credentials block for use in cloud run push work pools](/3.0/img/guides/gcp-creds-block-setup.png)
+ ![Configuring GCP Credentials block for use in cloud run push work pools](/v3/img/guides/gcp-creds-block-setup.png)
Provide any other optional information and create your block.
@@ -624,7 +624,7 @@ Click **Create** to configure your push work pool by selecting a push option in
Each step has several optional fields that are detailed in the
- [work pools documentation](/3.0/deploy/infrastructure-concepts/work-pools/).
+ [work pools documentation](/v3/deploy/infrastructure-concepts/work-pools/).
Select the block you created under the AWS Credentials field.
This allows Prefect Cloud to securely interact with your ECS cluster.
@@ -636,14 +636,14 @@ Click **Create** to configure your push work pool by selecting a push option in
Each step has several optional fields that are detailed in
- [Manage infrastructure with work pools](/3.0/deploy/infrastructure-concepts/work-pools/).
+ [Manage infrastructure with work pools](/v3/deploy/infrastructure-concepts/work-pools/).
Select the block you created under the GCP Credentials field.
This allows Prefect Cloud to securely interact with your GCP project.
Each step has several optional fields that are detailed in the
- [work pools documentation](/3.0/deploy/infrastructure-concepts/work-pools/).
+ [work pools documentation](/v3/deploy/infrastructure-concepts/work-pools/).
Select the block you created under the Modal Credentials field.
This allows Prefect Cloud to securely interact with your Modal account.
@@ -652,7 +652,7 @@ Create your pool to be ready to deploy flows to your Push work pool.
## Deployment
-You need to configure your [deployment](/3.0/deploy/infrastructure-examples/docker/) to send flow runs to your push work pool.
+You need to configure your [deployment](/v3/deploy/infrastructure-examples/docker/) to send flow runs to your push work pool.
For example, if you create a deployment through the interactive command line experience,
choose the work pool you just created. If you are deploying an existing `prefect.yaml` file, the deployment would contain:
@@ -680,6 +680,6 @@ to your serverless infrastructure, created a job, ran the job, and reported on i
## Next steps
-Learn more about [work pools](/3.0/deploy/infrastructure-concepts/work-pools/) and [workers](/3.0/deploy/infrastructure-concepts/workers/).
+Learn more about [work pools](/v3/deploy/infrastructure-concepts/work-pools/) and [workers](/v3/deploy/infrastructure-concepts/workers/).
-Learn about installing dependencies at runtime or baking them into your Docker image in the [Deploy to Docker](/3.0/deploy/infrastructure-examples/docker#automatically-build-a-custom-docker-image-with-a-local-dockerfile) guide.
+Learn about installing dependencies at runtime or baking them into your Docker image in the [Deploy to Docker](/v3/deploy/infrastructure-examples/docker#automatically-build-a-custom-docker-image-with-a-local-dockerfile) guide.
diff --git a/docs/3.0/deploy/run-flows-in-local-processes.mdx b/docs/v3/deploy/run-flows-in-local-processes.mdx
similarity index 94%
rename from docs/3.0/deploy/run-flows-in-local-processes.mdx
rename to docs/v3/deploy/run-flows-in-local-processes.mdx
index 5df76008119d..8c1fa5037f9e 100644
--- a/docs/3.0/deploy/run-flows-in-local-processes.mdx
+++ b/docs/v3/deploy/run-flows-in-local-processes.mdx
@@ -3,7 +3,7 @@ title: Run flows in local processes
description: Create a deployment for a flow by calling the `serve` method.
---
-The simplest way to create a [deployment](/3.0/deploy/infrastructure-examples/docker/) for your flow is by calling its `serve` method.
+The simplest way to create a [deployment](/v3/deploy/infrastructure-examples/docker/) for your flow is by calling its `serve` method.
## Serve a flow
@@ -68,7 +68,7 @@ The `serve` method on flows exposes many options for the deployment.
Here's how to use some of those options:
- `cron`: a keyword that allows you to set a cron string schedule for the deployment; see
-[schedules](/3.0/automate/add-schedules/) for more advanced scheduling options
+[schedules](/v3/automate/add-schedules/) for more advanced scheduling options
- `tags`: a keyword that allows you to tag this deployment and its runs for bookkeeping and filtering purposes
- `description`: a keyword that allows you to document what this deployment does; by default the
description is set from the docstring of the flow function (if documented)
@@ -180,7 +180,7 @@ if __name__ == "__main__":
A flow entrypoint is the path to the file where the flow is located, and the name of the flow function separated by a colon.
-For more ways to store and access flow code, see the [Retrieve code from storage page](/3.0/deploy/infrastructure-concepts/store-flow-code).
+For more ways to store and access flow code, see the [Retrieve code from storage page](/v3/deploy/infrastructure-concepts/store-flow-code).
**You can serve loaded flows**
@@ -214,4 +214,4 @@ Reasons to create a work-pool based deployment include:
Work pools are popular with data platform teams because they allow you to manage infrastructure configuration across an organization.
-Learn more about work-pool based deployments on the [Configure dynamic infrastructure with work pools page](/3.0/deploy/infrastructure-concepts/work-pools)
\ No newline at end of file
+Learn more about work-pool based deployments on the [Configure dynamic infrastructure with work pools page](/v3/deploy/infrastructure-concepts/work-pools)
\ No newline at end of file
diff --git a/docs/v3/deploy/static-infrastructure-examples/docker.mdx b/docs/v3/deploy/static-infrastructure-examples/docker.mdx
new file mode 100644
index 000000000000..face624637cd
--- /dev/null
+++ b/docs/v3/deploy/static-infrastructure-examples/docker.mdx
@@ -0,0 +1,187 @@
+---
+title: Serve flows in a long-lived Docker container
+description: Learn how to serve a flow in a long-lived Docker container
+---
+
+The `.serve` method allows you to easily elevate a flow to a deployment, listening for scheduled work to execute [as a local process](/v3/deploy/run-flows-in-local-processes).
+
+However, this _"local"_ process does not need to be on your local machine. In this example we show how to run a flow in Docker container on your local machine, but you could use a Docker container on any machine that has [Docker installed](https://docs.docker.com/engine/install/).
+
+## Overview
+In this example, you will set up:
+
+- a simple flow that retrieves the number of stars for some GitHub repositories
+- a `Dockerfile` that packages up your flow code and dependencies into a container image
+
+## Writing the flow
+
+Say we have a flow that retrieves the number of stars for a GitHub repository:
+
+```python serve_retrieve_github_stars.py {19-23}
+import httpx
+from prefect import flow, task
+
+
+@task(log_prints=True)
+def get_stars_for_repo(repo: str) -> int:
+ response = httpx.Client().get(f"https://api.github.com/repos/{repo}")
+ stargazer_count = response.json()["stargazers_count"]
+ print(f"{repo} has {stargazer_count} stars")
+ return stargazer_count
+
+
+@flow
+def retrieve_github_stars(repos: list[str]) -> list[int]:
+ return get_stars_for_repo.map(repos).wait()
+
+
+if __name__ == "__main__":
+ retrieve_github_stars.serve(
+ parameters={
+ "repos": ["python/cpython", "prefectHQ/prefect"],
+ }
+ )
+```
+
+We can serve this flow on our local machine using:
+
+```bash
+python serve_retrieve_github_stars.py
+```
+
+... but how can we package this up so we can run it on other machines?
+
+## Writing the Dockerfile
+
+Assuming we have our Python requirements defined in a file:
+
+```txt requirements.txt
+prefect
+```
+
+and this directory structure:
+
+```
+├── Dockerfile
+├── requirements.txt
+└── serve_retrieve_github_stars.py
+```
+
+We can package up our flow into a Docker container using a `Dockerfile`.
+
+
+
+```dockerfile Using pip
+# Use an official Python runtime as the base image
+FROM python:3.12-slim
+
+# Set the working directory in the container
+WORKDIR /app
+
+# Copy the requirements file into the container
+COPY requirements.txt .
+
+# Install the required packages
+RUN pip install --no-cache-dir -r requirements.txt
+
+# Copy the rest of the application code
+COPY serve_retrieve_github_stars.py .
+
+# Set the command to run your application
+CMD ["python", "serve_retrieve_github_stars.py"]
+```
+
+```dockerfile Using uv
+# Use the official Python image with uv pre-installed
+FROM ghcr.io/astral-sh/uv:python3.12-bookworm-slim
+
+# Set the working directory
+WORKDIR /app
+
+# Set environment variables
+ENV UV_SYSTEM_PYTHON=1
+ENV PATH="/root/.local/bin:$PATH"
+
+# Copy only the requirements file first to leverage Docker cache
+COPY requirements.txt .
+
+# Install dependencies
+RUN --mount=type=cache,target=/root/.cache/uv \
+ uv pip install -r requirements.txt
+
+# Copy the rest of the application code
+COPY serve_retrieve_github_stars.py .
+
+# Set the entrypoint
+ENTRYPOINT ["python", "serve_retrieve_github_stars.py"]
+```
+
+
+
+
+Using `pip`, the image is built in about 20 seconds, and using `uv`, the image is built in about 3 seconds.
+
+You can learn more about using `uv` in the [Astral documentation](https://docs.astral.sh/uv/guides/integration/docker/).
+
+
+## Build and run the container
+
+Now that we have a flow and a Dockerfile, we can build the image from the Dockerfile and run a container from this image.
+
+### Build (and push) the image
+
+We can build the image with the `docker build` command and the `-t` flag to specify a name for the image.
+
+```bash
+docker build -t my-flow-image .
+```
+
+At this point, you may also want to push the image to a container registry such as [Docker Hub](https://hub.docker.com/) or [GitHub Container Registry](https://docs.github.com/en/packages/working-with-a-github-packages-registry/working-with-the-container-registry). Please refer to each registry's respective documentation for details on authentication and registry naming conventions.
+
+### Run the container
+
+You'll likely want to inject some environment variables into your container, so let's define a `.env` file:
+
+```bash .env
+PREFECT_API_URL=
+PREFECT_API_KEY=
+```
+
+Then, run the container in [detached mode](https://docs.docker.com/engine/reference/commandline/run/#detached-d) (in other words, in the background):
+
+```bash
+docker run -d --env-file .env my-flow-image
+```
+
+#### Verify the container is running
+
+```bash
+docker ps | grep my-flow-image
+```
+
+You should see your container in the list of running containers, note the `CONTAINER ID` as we'll need it to view logs.
+
+#### View logs
+```bash
+docker logs
+```
+
+You should see logs from your newly served process, with the link to your deployment in the UI.
+
+### Stop the container
+
+```bash
+docker stop
+```
+
+## Next steps
+
+Congratulations! You have packaged and served a flow on a long-lived Docker container.
+
+You may now easily deploy this container to other infrastructures, such as:
+- [Modal](https://modal.com/)
+- [Google Cloud Run](https://cloud.google.com/run)
+- [AWS Fargate / ECS](https://aws.amazon.com/fargate/)
+- Managed Kubernetes (For example: GKE, EKS, or AKS)
+
+or anywhere else you can run a Docker container!
\ No newline at end of file
diff --git a/docs/3.0/develop/artifacts.mdx b/docs/v3/develop/artifacts.mdx
similarity index 95%
rename from docs/3.0/develop/artifacts.mdx
rename to docs/v3/develop/artifacts.mdx
index 2fb2b5590ce5..695d6b457236 100644
--- a/docs/3.0/develop/artifacts.mdx
+++ b/docs/v3/develop/artifacts.mdx
@@ -11,7 +11,7 @@ Prefect artifacts:
- are stored in Prefect Cloud or Prefect server and rendered in the Prefect UI
- make it easy to visualize outputs or side effects that your runs produce, and capture updates over time
-![Markdown artifact sales report screenshot](/3.0/img/ui/md-artifact-info.png)
+![Markdown artifact sales report screenshot](/v3/img/ui/md-artifact-info.png)
Common use cases for artifacts include:
@@ -93,7 +93,7 @@ You can specify multiple artifacts with the same key to easily track something v
After running flows that create artifacts, view the artifacts in the **Artifacts** page of the UI.
Click into the "irregular-data" artifact to see its versions, along with custom descriptions and links to the relevant data.
-![Link artifact details with multiple versions](/3.0/img/ui/link-artifact-info.png)
+![Link artifact details with multiple versions](/v3/img/ui/link-artifact-info.png)
You can also view information about the artifact such as:
@@ -133,7 +133,7 @@ Progress artifacts render dynamically on the flow run graph in the Prefect UI, i
To create a progress artifact, use the `create_progress_artifact()` function. To update a progress artifact, use the `update_progress_artifact()` function.
-![Progress artifact example](/3.0/img/ui/progress-artifact-example.png)
+![Progress artifact example](/v3/img/ui/progress-artifact-example.png)
```python
from time import sleep
@@ -239,7 +239,7 @@ if __name__ == "__main__":
After running the above flow, you should see your "gtm-report" artifact in the Artifacts page of the UI.
-![Markdown sales report screenshot](/3.0/img/ui/md-artifact-info.png)
+![Markdown sales report screenshot](/v3/img/ui/md-artifact-info.png)
You can view the associated flow run id or task run id, previous versions of the artifact, the rendered Markdown data, and the optional Markdown description.
@@ -274,13 +274,13 @@ if __name__ == "__main__":
my_fn()
```
-![Table artifact with customer info](/3.0/img/ui/table-artifact-info.png)
+![Table artifact with customer info](/v3/img/ui/table-artifact-info.png)
### Create image artifacts
Image artifacts render publicly available images in the Prefect UI. To create an image artifact, use the `create_image_artifact()` function.
-![Image artifact example](/3.0/img/ui/image-artifact-example.png)
+![Image artifact example](/v3/img/ui/image-artifact-example.png)
```python
from prefect import flow, task
@@ -354,7 +354,7 @@ prefect artifact delete --id
## Artifacts API
-Create, read, or delete artifacts programmatically through the [Prefect REST API](/3.0/api-ref/rest-api/).
+Create, read, or delete artifacts programmatically through the [Prefect REST API](/v3/api-ref/rest-api/).
With the Artifacts API, you can automate the creation and management of artifacts as part of your workflow.
For example, to read the five most recently created Markdown, table, and link artifacts, you can run the following:
@@ -386,4 +386,4 @@ for artifact in response.json():
If you don't specify a key or that a key must exist, you will also return results, which are a type of key-less artifact.
-See the [Prefect REST API documentation](/3.0/api-ref/rest-api/) on artifacts for more information.
+See the [Prefect REST API documentation](/v3/api-ref/rest-api/) on artifacts for more information.
diff --git a/docs/3.0/develop/blocks.mdx b/docs/v3/develop/blocks.mdx
similarity index 98%
rename from docs/3.0/develop/blocks.mdx
rename to docs/v3/develop/blocks.mdx
index 45cb33dc4bbe..eb8eada8c7c4 100644
--- a/docs/3.0/develop/blocks.mdx
+++ b/docs/v3/develop/blocks.mdx
@@ -12,7 +12,7 @@ Prefect supports [a large number of common blocks](#pre-registered-blocks) and i
Blocks are useful for sharing configuration across flow runs and between flows.
-For configuration that will change between flow runs, we recommend using [parameters](/3.0/develop/write-flows/#parameters).
+For configuration that will change between flow runs, we recommend using [parameters](/v3/develop/write-flows/#parameters).
## How blocks work
@@ -28,9 +28,9 @@ A block _type_ is essentially a schema registered with the Prefect API. This sch
To see block types available for configuration, use `prefect block type ls`
from the CLI or navigate to the **Blocks** page in the UI and click **+**.
-![The block catalogue in the UI](/3.0/img/ui/block-library.png)
+![The block catalogue in the UI](/v3/img/ui/block-library.png)
-These types separate blocks from [Prefect variables](/3.0/develop/variables/), which are unstructured JSON documents.
+These types separate blocks from [Prefect variables](/v3/develop/variables/), which are unstructured JSON documents.
In addition, block schemas allow for fields of `SecretStr` type which are stored with additional encryption and not displayed by default in the UI.
Block types are identified by a _slug_ that is not configurable.
@@ -410,7 +410,7 @@ You can create and use these block types through the UI without installing any a
The `S3`, `Azure`, `GCS`, and `GitHub` blocks are deprecated in favor of the corresponding `S3Bucket`,
`AzureBlobStorageCredentials`, `GCSBucket`, and `GitHubRepository` blocks found in the
[Prefect integration libraries](/integrations/).
-The JSON, DateTime, and String blocks are deprecated in favor of [Variables](/3.0/develop/variables/).
+The JSON, DateTime, and String blocks are deprecated in favor of [Variables](/v3/develop/variables/).
### Blocks in Prefect integration libraries
diff --git a/docs/3.0/develop/deferred-tasks.mdx b/docs/v3/develop/deferred-tasks.mdx
similarity index 100%
rename from docs/3.0/develop/deferred-tasks.mdx
rename to docs/v3/develop/deferred-tasks.mdx
diff --git a/docs/3.0/develop/global-concurrency-limits.mdx b/docs/v3/develop/global-concurrency-limits.mdx
similarity index 97%
rename from docs/3.0/develop/global-concurrency-limits.mdx
rename to docs/v3/develop/global-concurrency-limits.mdx
index 9204189b516d..964b37e7f9d6 100644
--- a/docs/3.0/develop/global-concurrency-limits.mdx
+++ b/docs/v3/develop/global-concurrency-limits.mdx
@@ -369,7 +369,7 @@ In addition to global concurrency limits, Prefect provides several other ways to
Unlike global concurrency limits, which are a more general way to control concurrency for any Python-based operation, the following concurrency limit options are specific to Prefect objects:
-- [Work pool flow run concurrency limits](/3.0/deploy/infrastructure-concepts/work-pools#manage-concurrency)
-- [Work queue flow run concurrency limits](/3.0/deploy/infrastructure-concepts/work-pools#queue-concurrency-limits)
-- [Deployment flow run concurrency limits](/3.0/deploy/index#concurrency-limiting)
-- [Task run concurrency limits](/3.0/develop/task-run-limits)
+- [Work pool flow run concurrency limits](/v3/deploy/infrastructure-concepts/work-pools#manage-concurrency)
+- [Work queue flow run concurrency limits](/v3/deploy/infrastructure-concepts/work-pools#queue-concurrency-limits)
+- [Deployment flow run concurrency limits](/v3/deploy/index#concurrency-limiting)
+- [Task run concurrency limits](/v3/develop/task-run-limits)
diff --git a/docs/v3/develop/index.mdx b/docs/v3/develop/index.mdx
new file mode 100644
index 000000000000..eafe9b9d1f1a
--- /dev/null
+++ b/docs/v3/develop/index.mdx
@@ -0,0 +1,38 @@
+---
+title: Develop overview
+sidebarTitle: Overview
+description: Learn how to write code with Prefect objects for workflows you can trust.
+---
+
+These pages explain how to develop Prefect workflows.
+
+The **Write and run workflows** section introduces and explains Prefect's core concepts.
+
+- [Write and run flows](/v3/develop/write-flows/) introduces the most central Prefect object, the `flow`.
+- [Write and run tasks](/v3/develop/write-tasks/) shows you how to create `task` decorated functions that represent a discrete unit of work in a Prefect workflow.
+- [Run tasks in the background](/v3/develop/task-runners/) explains how to use tasks to execute small, discrete units of work quickly.
+- [Log run activity](/v3/develop/logging/) shows you how to capture fine-grained information about flows and tasks for monitoring, troubleshooting, and auditing.
+
+The **Configure runtime behavior** section introduces advanced features for interacting with running workflows and automatically taking actions based on workflow state.
+
+- [Configure task caching](/v3/develop/task-caching/) explores how to cache task results to save money and time.
+- [Write transactions](/v3/develop/transactions/) shows how to group tasks together to ensure they run atomically.
+- [Pause and resume flow runs](/v3/develop/pause-resume/) explains how to halt flow runs for input.
+- [Send and receive flow run inputs](/v3/develop/inputs/) highlights how to run Human-in-the-Loop workflows with Prefect.
+- [Access runtime context](/v3/develop/runtime-context/) shows how to access information about the current run.
+
+The **Manage state and configuration** section explores how to manage data and configuration in Prefect.
+
+- [Manage results](/v3/develop/results/) explains how to store and retrieve task results.
+- [Manage states](/v3/develop/manage-states/) discusses how to take action based on task and flow run states.
+- [Create run artifacts](/v3/develop/artifacts/) demonstrates how to create artifacts for human consumption in the UI.
+- [Set and get variables](/v3/develop/variables/) shows how to store and retrieve configuration values from API.
+- [Connect to external systems](/v3/develop/blocks/) discusses how Prefect blocks can help you connect to external systems.
+
+The **Manage concurrency** section explains how to speed up your workflows and limit activities that can take place concurrently.
+
+- [Run tasks concurrently or in parallel](/v3/develop/task-runners/) explains how to run tasks concurrently or in parallel with Dask or Ray.
+- [Limit concurrent task runs](/v3/develop/task-run-limits/) shows how to prevent too many tasks from running simultaneously.
+- [Apply concurrency and rate limits](/v3/develop/global-concurrency-limits/) demonstrates how to control concurrency and apply rate limits using Prefect's provided utilities.
+
+Finally, [Test workflows](/v3/develop/test-workflows/) discusses tools for testing workflows.
diff --git a/docs/3.0/develop/inputs.mdx b/docs/v3/develop/inputs.mdx
similarity index 99%
rename from docs/3.0/develop/inputs.mdx
rename to docs/v3/develop/inputs.mdx
index f840db1c9777..748fa7355340 100644
--- a/docs/3.0/develop/inputs.mdx
+++ b/docs/v3/develop/inputs.mdx
@@ -321,7 +321,7 @@ def get_shirt_order():
This code causes the flow run to continually pause until the user enters a valid age.
-As an additional step, you can use an [automation](/3.0/automate/events/automations-triggers) to alert the user to the error.
+As an additional step, you can use an [automation](/v3/automate/events/automations-triggers) to alert the user to the error.
## Send and receive input at runtime
@@ -491,7 +491,7 @@ async def sender():
An iterator helps keep track of the inputs your flow has already received. If you want your flow to suspend and then resume later,
save the keys of the inputs you've seen so the flow can read them back out when it resumes.
-Consider using a [block](/3.0/develop/blocks/), such as a `JSONBlock`.
+Consider using a [block](/v3/develop/blocks/), such as a `JSONBlock`.
The following flow receives input for 30 seconds then suspends itself, which exits the flow and tears down infrastructure:
diff --git a/docs/3.0/develop/logging.mdx b/docs/v3/develop/logging.mdx
similarity index 99%
rename from docs/3.0/develop/logging.mdx
rename to docs/v3/develop/logging.mdx
index c32abe119df0..01d2b68210ec 100644
--- a/docs/3.0/develop/logging.mdx
+++ b/docs/v3/develop/logging.mdx
@@ -335,7 +335,7 @@ and SciPy logging statements with your flow and task run logs, use:
`PREFECT_LOGGING_EXTRA_LOGGERS=dask,scipy`
Configure this setting as an environment variable or in a profile. See
-[Settings](/3.0/manage/settings-and-profiles/) for more details about how to use settings.
+[Settings](/v3/develop/settings-and-profiles/) for more details about how to use settings.
## Access logs from the command line
diff --git a/docs/3.0/develop/manage-states.mdx b/docs/v3/develop/manage-states.mdx
similarity index 94%
rename from docs/3.0/develop/manage-states.mdx
rename to docs/v3/develop/manage-states.mdx
index 38ccb4302dda..398ca4888492 100644
--- a/docs/3.0/develop/manage-states.mdx
+++ b/docs/v3/develop/manage-states.mdx
@@ -3,8 +3,8 @@ title: Manage states
description: Prefect states contain information about the status of a flow or task run.
---
-States are rich objects that contain information about the status of a particular [task](/3.0/develop/write-tasks)
-run or [flow](/3.0/develop/write-flows/) run.
+States are rich objects that contain information about the status of a particular [task](/v3/develop/write-tasks)
+run or [flow](/v3/develop/write-flows/) run.
You can learn many things about a task or flow by examining its current state or the
history of its states. For example, a state could tell you that a task:
@@ -123,10 +123,13 @@ import FinalFlowState from '/snippets/final-flow-state.mdx'
State change hooks execute code in response to **_client side_** changes in flow or task run states, enabling you to define actions for
specific state transitions in a workflow.
-A state hook must have the following signature:
+State hooks have the following signature:
```python
-def my_hook(obj: Task | Flow, run: TaskRun | FlowRun, state: State) -> None:
+def my_task_state_hook(task: Task, run: TaskRun, state: State) -> None:
+ ...
+
+def my_flow_state_hook(flow: Flow, run: FlowRun, state: State) -> None:
...
```
@@ -137,7 +140,7 @@ from prefect import task, flow
# for type hints only
from prefect import Task
-from prefect.context import TaskRun
+from prefect.client.schemas.objects import TaskRun
from prefect.states import State
@@ -154,11 +157,17 @@ def nice_task(name: str):
# alternatively hooks can be specified via decorator
-@my_nice_task.on_completion
+@nice_task.on_completion
def second_hook(tsk: Task, run: TaskRun, state: State) -> None:
print('another hook')
+
+nice_task(name='Marvin')
```
+
+To import a `TaskRun` or `FlowRun` for type hinting, you can import from `prefect.client.schemas.objects`.
+
+
State change hooks are versatile, allowing you to specify multiple state change hooks for the same state transition,
or to use the same state change hook for different transitions:
diff --git a/docs/3.0/develop/pause-resume.mdx b/docs/v3/develop/pause-resume.mdx
similarity index 97%
rename from docs/3.0/develop/pause-resume.mdx
rename to docs/v3/develop/pause-resume.mdx
index 868c881b12b4..66cab5115e74 100644
--- a/docs/3.0/develop/pause-resume.mdx
+++ b/docs/v3/develop/pause-resume.mdx
@@ -102,7 +102,7 @@ When you suspend a flow run, the flow exits completely and the infrastructure ru
This means you can suspend flow runs to save costs instead of paying for long-running infrastructure.
However, when the flow run resumes, the flow code will execute again from the beginning of the flow.
-We recommend using [tasks](/3.0/develop/write-tasks/) and [task caching](/3.0/develop/task-caching) to avoid recomputing expensive operations.
+We recommend using [tasks](/v3/develop/write-tasks/) and [task caching](/v3/develop/task-caching) to avoid recomputing expensive operations.
Prefect exposes this capability through the [`suspend_flow_run`](https://prefect-python-sdk-docs.netlify.app/prefect/engine/#prefect.engine.suspend_flow_run) and
[`resume_flow_run`](https://prefect-python-sdk-docs.netlify.app/prefect/engine/#prefect.engine.resume_flow_run) functions, as well as the Prefect UI.
@@ -211,4 +211,4 @@ After successful validation, the flow run resumes, and the return value of the `
is an instance of the `UserNameInput` model containing the provided data.
For more information on receiving input from users when pausing and suspending flow runs,
-see [Send and receive flow run inputs](/3.0/develop/inputs/).
\ No newline at end of file
+see [Send and receive flow run inputs](/v3/develop/inputs/).
\ No newline at end of file
diff --git a/docs/3.0/develop/results.mdx b/docs/v3/develop/results.mdx
similarity index 75%
rename from docs/3.0/develop/results.mdx
rename to docs/v3/develop/results.mdx
index 93999bf09794..37f7ec2148cf 100644
--- a/docs/3.0/develop/results.mdx
+++ b/docs/v3/develop/results.mdx
@@ -3,8 +3,8 @@ title: Manage results
description: Results represent the data returned by a flow or a task and enable features such as caching.
---
-Results are the bedrock of many Prefect features - most notably [transactions](/3.0/develop/transactions)
-and [caching](/3.0/develop/task-caching) - and are foundational to the resilient execution paradigm that Prefect enables.
+Results are the bedrock of many Prefect features - most notably [transactions](/v3/develop/transactions)
+and [caching](/v3/develop/task-caching) - and are foundational to the resilient execution paradigm that Prefect enables.
Any return value from a task or a flow is a result.
By default these results are not persisted and no reference to them is maintained in the API.
@@ -19,25 +19,25 @@ The simplest way to turn on result persistence globally is through the `PREFECT_
prefect config set PREFECT_RESULTS_PERSIST_BY_DEFAULT=true
```
-See [settings](/3.0/manage/settings-and-profiles) for more information on how settings are managed.
+See [settings](/v3/develop/settings-and-profiles) for more information on how settings are managed.
## Configuring result persistence
There are four categories of configuration for result persistence:
-- [whether to persist results at all](/3.0/develop/results#enabling-result-persistence): this is configured through
-various keyword arguments and the `PREFECT_RESULTS_PERSIST_BY_DEFAULT` setting.
-- [what filesystem to persist results to](/3.0/develop/results#result-storage): this is configured through the `result_storage`
+- [whether to persist results at all](#enabling-result-persistence): this is configured through
+various keyword arguments, the `PREFECT_RESULTS_PERSIST_BY_DEFAULT` setting, and the `PREFECT_TASKS_DEFAULT_PERSIST_RESULT` setting for tasks specifically.
+- [what filesystem to persist results to](#result-storage): this is configured through the `result_storage`
keyword and the `PREFECT_DEFAULT_RESULT_STORAGE_BLOCK` setting.
-- [how to serialize and deserialize results](/3.0/develop/results#result-serialization): this is configured through
+- [how to serialize and deserialize results](#result-serialization): this is configured through
the `result_serializer` keyword and the `PREFECT_RESULTS_DEFAULT_SERIALIZER` setting.
-- [what filename to use](/3.0/develop/results#result-filenames): this is configured through one of
+- [what filename to use](#result-filenames): this is configured through one of
`result_storage_key`, `cache_policy`, or `cache_key_fn`.
### Default persistence configuration
Once result persistence is enabled - whether through the `PREFECT_RESULTS_PERSIST_BY_DEFAULT` setting or
-through any of the mechanisms [described below](/3.0/develop/results#enabling-result-persistence) - Prefect's default
+through any of the mechanisms [described below](#enabling-result-persistence) - Prefect's default
result storage configuration is activated.
If you enable result persistence and don't specify a filesystem block, your results will be stored locally.
@@ -51,8 +51,8 @@ prefect config set PREFECT_LOCAL_STORAGE_PATH='~/.my-results/'
### Enabling result persistence
-In addition to the `PREFECT_RESULTS_PERSIST_BY_DEFAULT` setting, result persistence can also be
-enabled or disabled on both individual flows and individual tasks.
+In addition to the `PREFECT_RESULTS_PERSIST_BY_DEFAULT` and `PREFECT_TASKS_DEFAULT_PERSIST_RESULT` settings,
+result persistence can also be enabled or disabled on both individual flows and individual tasks.
Specifying a non-null value for any of the following keywords on the task decorator will enable result
persistence for that task:
- `persist_result`: a boolean that allows you to explicitly enable or disable result persistence.
@@ -60,8 +60,8 @@ persistence for that task:
specifies where results should be stored.
- `result_storage_key`: a string that specifies the filename of the result within the task's result storage.
- `result_serializer`: a string or serializer that configures how the data should be serialized and deserialized.
-- `cache_policy`: a [cache policy](/3.0/develop/task-caching#cache-policies) specifying the behavior of the task's cache.
-- `cache_key_fn`: [a function](/3.0/develop/task-caching#cache-key-functions) that configures a custom cache policy.
+- `cache_policy`: a [cache policy](/v3/develop/task-caching#cache-policies) specifying the behavior of the task's cache.
+- `cache_key_fn`: [a function](/v3/develop/task-caching#cache-key-functions) that configures a custom cache policy.
Similarly, setting `persist_result=True`, `result_storage`, or `result_serializer` on a flow will enable
persistence for that flow.
@@ -74,16 +74,18 @@ tasks called within that flow by default.
Any settings _explicitly_ set on a task take precedence over the flow settings.
+Additionally, the `PREFECT_TASKS_DEFAULT_PERSIST_RESULT` environment variable can be used to globally control the default persistence behavior for tasks, overriding the default behavior set by a parent flow or task.
### Result storage
You can configure the system of record for your results through the `result_storage` keyword argument.
-This keyword accepts an instantiated [filesystem block](/3.0/develop/blocks/), or a block slug. Find your blocks' slugs with `prefect block ls`.
+This keyword accepts an instantiated [filesystem block](/v3/develop/blocks/), or a block slug. Find your blocks' slugs with `prefect block ls`.
Note that if you want your tasks to share a common cache, your result storage should be accessible by
the infrastructure in which those tasks run. [Integrations](/integrations/integrations) have cloud-specific storage blocks.
For example, a common distributed filesystem for result storage is AWS S3.
+Additionally, you can control the default persistence behavior for task results using the `default_persist_result` setting. This setting allows you to specify whether results should be persisted by default for all tasks. You can set this to `True` to enable persistence by default, or `False` to disable it. This setting can be overridden at the individual task or flow level.
{/* pmd-metadata: fixture:cleanup_s3_bucket_block */}
```python
@@ -142,7 +144,7 @@ You can configure the filename of the result file within result storage using ei
- `result_storage_key`: a templated string that can use any of the fields within `prefect.runtime` and
the task's individual parameter values. These templated values will be populated at runtime.
- `cache_key_fn`: a function that accepts the task run context and its runtime parameters and returns
-a string. See [task caching documentation](/3.0/develop/task-caching#cache-key-functions) for more information.
+a string. See [task caching documentation](/v3/develop/task-caching#cache-key-functions) for more information.
If both `result_storage_key` and `cache_key_fn` are provided, only the `result_storage_key` will be used.
@@ -167,7 +169,7 @@ def my_flow():
```
If a result exists at a given storage key in the storage location, the task will load it without running.
-To learn more about caching mechanics in Prefect, see the [caching documentation](/3.0/develop/task-caching).
+To learn more about caching mechanics in Prefect, see the [caching documentation](/v3/develop/task-caching).
### Result serialization
diff --git a/docs/3.0/develop/runtime-context.mdx b/docs/v3/develop/runtime-context.mdx
similarity index 100%
rename from docs/3.0/develop/runtime-context.mdx
rename to docs/v3/develop/runtime-context.mdx
diff --git a/docs/v3/develop/settings-and-profiles.mdx b/docs/v3/develop/settings-and-profiles.mdx
new file mode 100644
index 000000000000..032791b69b53
--- /dev/null
+++ b/docs/v3/develop/settings-and-profiles.mdx
@@ -0,0 +1,290 @@
+---
+title: Configure settings and profiles
+description: Prefect settings let you customize behavior across different environments.
+---
+
+## Why use settings?
+
+Settings in Prefect help you control how your workflows behave. They let you easily customize Prefect to work the way you need it to, whether you're testing locally or running in production.
+
+Specifically, settings enable:
+
+- **Environment-Specific Configuration**: Use different settings for development (like detailed logging), testing (like test databases), and production (like your production server) without changing your workflow code.
+
+- **Runtime Flexibility**: Quickly adjust things like retry attempts or logging levels without having to modify and redeploy your workflows.
+
+## Get started with settings
+
+The simplest way declare settings is by creating a `prefect.toml` file in your project directory. For example:
+
+```toml prefect.toml
+# Set more detailed logging while developing
+[logging]
+level = "DEBUG"
+```
+
+
+To use `prefect.toml` or `pyproject.toml` for configuration, `prefect>=3.1` must be installed.
+
+To use a `.env` file for configuration, `prefect>=3.0.5` must be installed.
+
+
+Most editors have plugins for TOML that provide syntax highlighting, linting, and autocomplete for `prefect.toml` files. If you use VSCode, we recommend the [Even Better TOML extension](https://marketplace.visualstudio.com/items?itemName=tamasfe.even-better-toml).
+
+
+**Writing TOML**
+
+TOML is a simple configuration language. If you're new to TOML, learn more about the syntax in the [official documentation](https://toml.io/en/).
+
+In particular, note that TOML uses square brackets to denote [tables](https://toml.io/en/v1.0.0#table), which are analogous to dictionaries in Python.
+
+
+## Settings sources
+
+You can configure settings via the following sources (highest to lowest precedence):
+
+- **Environment variables**: Environment variables are useful for temporarily overriding settings or configuring the runtime environment of a single workflow run.
+
+- **`.env` file**: `.env` files are useful for declaring local settings that you want to apply across multiple runs.
+
+- **`prefect.toml` file**: A `prefect.toml` file is useful when you want to declare settings for an entire project. You can keep this file in your project directory and it will be automatically applied regardless of where you run your project.
+
+- **`pyproject.toml` file**: If you already have a `pyproject.toml` file in your project or like to consolidate settings for all your tools in one place, you can declare settings in the `[tool.prefect]` table of your `pyproject.toml` file.
+
+- **Profiles**: Prefect profiles are useful for switching between different environments. For example, you might use one profile for a local Prefect server and another for your production environment.
+
+When multiple settings sources define the same setting, Prefect follows this precedence order (highest to lowest):
+
+
+
+
+
+
+
+
+
+
+For example, if you set `PREFECT_API_URL` in both your environment and your active profile, the environment variable value will take precedence.
+
+### Environment variables
+
+Environment variables are useful for temporarily overriding settings or configuring the runtime environment of a workflow.
+
+All Prefect settings can be set using environment variables prefixed with `PREFECT_`. They take precedence over all other sources, making them ideal for adjustments that should only apply to a single session or process.
+
+For example, you can run the following command to temporarily set the logging level for a single flow run:
+
+```bash
+PREFECT_LOGGING_LEVEL="DEBUG" python my_flow.py
+```
+
+You can also export an environment variable in your shell to apply it to all flow runs in that shell session:
+
+```bash
+export PREFECT_LOGGING_LEVEL="DEBUG"
+prefect run my_flow.py
+```
+
+You can see supported environment variables for each setting in the [settings reference documentation](/v3/develop/settings-ref).
+
+### `.env` file
+
+`.env` files are useful for declaring local settings that you want to apply across multiple runs.
+
+When running `prefect` in a directory that contains a `.env` file, Prefect will automatically apply the settings in the file. We recommend keeping your `.env` files local and not committing them to your code repositories.
+
+For example, the following `.env` file declares a local setting for the logging level:
+
+```bash .env
+PREFECT_LOGGING_LEVEL="DEBUG"
+```
+
+Any flows run in the same directory as this `.env` file will use the `DEBUG` logging level, even if they are run in different shell sessions.
+
+View supported environment variables for each setting in the [settings reference documentation](/v3/develop/settings-ref).
+
+### `prefect.toml` file
+
+A `prefect.toml` file is useful when you want to declare settings for an entire project.
+
+You can keep a `prefect.toml` file in your project directory and the declared settings will be automatically applied when running `prefect` in that directory. We recommend committing this file to your code repositories to ensure consistency across environments.
+
+For example, the following `prefect.toml` file declares a setting for the logging level:
+
+```toml prefect.toml
+[logging]
+level = "DEBUG"
+```
+
+If you commit your `prefect.toml` file to a code repository, creating deployments from flows in that repository will use the settings declared in the `prefect.toml` file.
+
+You can see the `prefect.toml` path for each setting in the [settings reference documentation](/v3/develop/settings-ref).
+
+### `pyproject.toml` file
+
+Declaring settings in a `pyproject.toml` file is very similar to declaring settings in a `prefect.toml` file. The main difference is that settings are declared in the `[tool.prefect]` table instead of at the root of the file.
+
+For example, the following `pyproject.toml` file declares a setting for the logging level:
+
+```toml pyproject.toml
+[tool.prefect]
+logging.level = "DEBUG"
+```
+
+The advantage of declaring settings in a `pyproject.toml` file is that it allows you to keep all your dependencies and settings for all your tools in one place. You can learn more about `pyproject.toml` files in the [Python Packaging User Guide](https://packaging.python.org/en/latest/specifications/pyproject-toml/#arbitrary-tool-configuration-the-tool-table).
+
+### Profiles
+
+Prefect profiles are useful for switching between different environments. By creating different profiles with different API URLs, you can easily switch between a local Prefect server and your production environment.
+
+Profiles are stored in a [TOML](https://toml.io/en/) file located at `~/.prefect/profiles.toml` by default. This location can be configured by setting `PREFECT_PROFILES_PATH`.
+
+One and only one profile can be active at any time.
+
+Immediately after installation, the `ephemeral` profile will be used, which only has 1 setting configured:
+
+```bash
+» docker run -it prefecthq/prefect:3-latest
+ ___ ___ ___ ___ ___ ___ _____
+ | _ \ _ \ __| __| __/ __|_ _|
+ | _/ / _|| _|| _| (__ | |
+ |_| |_|_\___|_| |___\___| |_|
+
+
+root@e56e34ab8934:/opt/prefect $ prefect config view
+PREFECT_PROFILE='ephemeral'
+PREFECT_SERVER_ALLOW_EPHEMERAL_MODE='True' (from profile)
+```
+
+
+**What is `PREFECT_SERVER_ALLOW_EPHEMERAL_MODE`?**
+
+This setting allows a Prefect server to be run ephemerally as needed without explicitly starting a server process.
+
+
+The `prefect profile` CLI commands enable you to create, review, and manage profiles:
+
+| Command | Description |
+| --- | --- |
+| `create` | Create a new profile; use the `--from` flag to copy settings from another profile. |
+| `delete` | Delete the given profile. |
+| `inspect` | Display settings from a given profile; defaults to active. |
+| `ls` | List all profile names. |
+| `rename` | Change the name of a profile. |
+| `use` | Switch the active profile. |
+| `populate-defaults` | Populate your `profiles.toml` file with opinionated stock profiles. |
+
+... or you may edit your `profiles.toml` file directly:
+
+```bash
+vim ~/.prefect/profiles.toml
+```
+
+#### Configure settings for the active profile
+
+The `prefect config` CLI commands enable you to manage the settings within the currently active profile.
+
+| Command | Description |
+| --- | --- |
+| set | Change the value for a setting. |
+| unset | Restore the default value for a setting. |
+| view | Display the current settings. |
+
+For example, the following CLI commands set configuration in the `ephemeral` profile and then create a new
+profile with new settings:
+
+```bash
+prefect profile use ephemeral
+prefect config set PREFECT_API_URL=http://127.0.0.1:4200/api
+
+prefect profile create new-profile --from ephemeral
+prefect profile use new-profile
+prefect config set PREFECT_RESULTS_PERSIST_BY_DEFAULT=true PREFECT_LOGGING_LEVEL="ERROR"
+
+prefect profile inspect
+prefect config unset PREFECT_LOGGING_LEVEL -y
+```
+
+
+**Environment variables always take precedence**
+
+Environment variables always take precedence over values declared in other sources.
+This allows you to configure certain runtime behavior for your workflows by setting the appropriate
+environment variable on the job or process executing the workflow.
+
+
+## View current configuration
+
+To view all available settings and their active values from the command line, run:
+
+```bash
+prefect config view --show-defaults
+```
+
+These settings are type-validated and you may verify your setup at any time with:
+
+```bash
+prefect config validate
+```
+
+## Common client settings
+
+- [`api.url`](/v3/develop/settings-ref/#url): this setting specifies the API endpoint of your
+Prefect Cloud workspace or a self-hosted Prefect server instance.
+- [`api.key`](/v3/develop/settings-ref/#key): this setting specifies the
+[API key](/v3/manage/cloud/manage-users/api-keys/) used to authenticate with Prefect Cloud.
+- [`home`](/v3/develop/settings-ref/#home): the `home` value specifies the local Prefect directory for configuration files,
+profiles, and the location of the default Prefect SQLite database.
+
+
+**Use `prefect cloud login` to set these values for Prefect Cloud**
+
+To set `PREFECT_API_URL` and `PREFECT_API_KEY` for your active profile, run `prefect cloud login`.
+Read more about [managing API keys](/v3/manage/cloud/manage-users/api-keys/).
+
+
+## Common server settings
+
+
+- [`server.database.connection_url`](http://localhost:3000/v3/develop/settings-ref#connection-url): the database connection URL for a self-hosted Prefect server instance.
+Must be provided in a SQLAlchemy-compatible format. Prefect currently supports SQLite and Postgres.
+
+#### Security settings
+
+**Host the UI behind a reverse proxy**
+
+When using a reverse proxy (such as [Nginx](https://nginx.org) or [Traefik](https://traefik.io)) to proxy traffic to a
+ hosted Prefect UI instance, you must also configure the self-hosted Prefect server instance to connect to the API.
+The [`ui.api_url`](/v3/develop/settings-ref/#api_url) setting should be set to the external proxy URL.
+
+For example, if your external URL is `https://prefect-server.example.com` then you can configure a `prefect.toml` file for your server like this:
+
+```toml prefect.toml
+[ui]
+api_url = "https://prefect-server.example.com/api"
+```
+
+If you do not set `ui.api_url`, then `api.url` will be used as a fallback.
+
+**CSRF protection settings**
+
+If using self-hosted Prefect server, you can configure CSRF protection settings.
+
+- [`server.api.csrf_protection_enabled`](/v3/develop/settings-ref/#csrf-protection-enabled): activates CSRF protection on the server,
+requiring valid CSRF tokens for applicable requests. Recommended for production to prevent CSRF attacks.
+Defaults to `False`.
+- [`server.api.csrf_token_expiration`](/v3/develop/settings-ref/#csrf-token-expiration): sets the expiration duration for server-issued CSRF tokens,
+influencing how often tokens need to be refreshed. The default is 1 hour.
+- [`client.csrf_support_enabled`](/v3/develop/settings-ref/#csrf-support-enabled): enables or disables CSRF token handling in the Prefect client.
+When enabled, the client manages CSRF tokens for state-changing API requests. Defaults to `True`.
+
+By default clients expect that CSRF protection is enabled on the server. If you are running a server without CSRF protection,
+you can disable CSRF support in the client.
+
+**CORS settings**
+
+If using self-hosted Prefect server, you can configure CORS settings to control which origins are allowed to make cross-origin requests to your server.
+
+- [`server.api.cors_allowed_origins`](/v3/develop/settings-ref/#cors-allowed-origins): a list of origins that are allowed to make cross-origin requests.
+- [`server.api.cors_allowed_methods`](/v3/develop/settings-ref/#cors-allowed-methods): a list of HTTP methods that are allowed to be used during cross-origin requests.
+- [`server.api.cors_allowed_headers`](/v3/develop/settings-ref/#cors-allowed-headers): a list of headers that are allowed to be used during cross-origin requests.
diff --git a/docs/v3/develop/settings-ref.mdx b/docs/v3/develop/settings-ref.mdx
new file mode 100644
index 000000000000..40f29b21b662
--- /dev/null
+++ b/docs/v3/develop/settings-ref.mdx
@@ -0,0 +1,2366 @@
+---
+title: Settings reference
+description: Reference for all available settings for Prefect.
+---
+{/* This page is generated by `scripts/generate_settings_ref.py`. Update the generation script to update this page. */}
+To use `prefect.toml` or `pyproject.toml` for configuration, `prefect>=3.1` must be installed.
+## Root Settings
+### `home`
+The path to the Prefect home directory. Defaults to ~/.prefect
+
+**Type**: `string`
+
+**Default**: `~/.prefect`
+
+**TOML dotted key path**: `home`
+
+**Supported environment variables**:
+`PREFECT_HOME`
+
+### `profiles_path`
+The path to a profiles configuration file.
+
+**Type**: `string | None`
+
+**Default**: `None`
+
+**TOML dotted key path**: `profiles_path`
+
+**Supported environment variables**:
+`PREFECT_PROFILES_PATH`
+
+### `debug_mode`
+If True, enables debug mode which may provide additional logging and debugging features.
+
+**Type**: `boolean`
+
+**Default**: `False`
+
+**TOML dotted key path**: `debug_mode`
+
+**Supported environment variables**:
+`PREFECT_DEBUG_MODE`
+
+### `api`
+
+**Type**: [APISettings](#apisettings)
+
+**TOML dotted key path**: `api`
+
+### `cli`
+
+**Type**: [CLISettings](#clisettings)
+
+**TOML dotted key path**: `cli`
+
+### `client`
+
+**Type**: [ClientSettings](#clientsettings)
+
+**TOML dotted key path**: `client`
+
+### `cloud`
+
+**Type**: [CloudSettings](#cloudsettings)
+
+**TOML dotted key path**: `cloud`
+
+### `deployments`
+
+**Type**: [DeploymentsSettings](#deploymentssettings)
+
+**TOML dotted key path**: `deployments`
+
+### `experiments`
+Settings for controlling experimental features
+
+**Type**: [ExperimentsSettings](#experimentssettings)
+
+**TOML dotted key path**: `experiments`
+
+### `flows`
+
+**Type**: [FlowsSettings](#flowssettings)
+
+**TOML dotted key path**: `flows`
+
+### `internal`
+Settings for internal Prefect machinery
+
+**Type**: [InternalSettings](#internalsettings)
+
+**TOML dotted key path**: `internal`
+
+### `logging`
+
+**Type**: [LoggingSettings](#loggingsettings)
+
+**TOML dotted key path**: `logging`
+
+### `results`
+
+**Type**: [ResultsSettings](#resultssettings)
+
+**TOML dotted key path**: `results`
+
+### `runner`
+
+**Type**: [RunnerSettings](#runnersettings)
+
+**TOML dotted key path**: `runner`
+
+### `server`
+
+**Type**: [ServerSettings](#serversettings)
+
+**TOML dotted key path**: `server`
+
+### `tasks`
+Settings for controlling task behavior
+
+**Type**: [TasksSettings](#taskssettings)
+
+**TOML dotted key path**: `tasks`
+
+### `testing`
+Settings used during testing
+
+**Type**: [TestingSettings](#testingsettings)
+
+**TOML dotted key path**: `testing`
+
+### `worker`
+Settings for controlling worker behavior
+
+**Type**: [WorkerSettings](#workersettings)
+
+**TOML dotted key path**: `worker`
+
+### `ui_url`
+The URL of the Prefect UI. If not set, the client will attempt to infer it.
+
+**Type**: `string | None`
+
+**Default**: `None`
+
+**TOML dotted key path**: `ui_url`
+
+**Supported environment variables**:
+`PREFECT_UI_URL`
+
+### `silence_api_url_misconfiguration`
+
+ If `True`, disable the warning when a user accidentally misconfigure its `PREFECT_API_URL`
+ Sometimes when a user manually set `PREFECT_API_URL` to a custom url,reverse-proxy for example,
+ we would like to silence this warning so we will set it to `FALSE`.
+
+
+**Type**: `boolean`
+
+**Default**: `False`
+
+**TOML dotted key path**: `silence_api_url_misconfiguration`
+
+**Supported environment variables**:
+`PREFECT_SILENCE_API_URL_MISCONFIGURATION`
+
+---
+## APISettings
+Settings for interacting with the Prefect API
+### `url`
+The URL of the Prefect API. If not set, the client will attempt to infer it.
+
+**Type**: `string | None`
+
+**Default**: `None`
+
+**TOML dotted key path**: `api.url`
+
+**Supported environment variables**:
+`PREFECT_API_URL`
+
+### `key`
+The API key used for authentication with the Prefect API. Should be kept secret.
+
+**Type**: `string | None`
+
+**Default**: `None`
+
+**TOML dotted key path**: `api.key`
+
+**Supported environment variables**:
+`PREFECT_API_KEY`
+
+### `tls_insecure_skip_verify`
+If `True`, disables SSL checking to allow insecure requests. Setting to False is recommended only during development. For example, when using self-signed certificates.
+
+**Type**: `boolean`
+
+**Default**: `False`
+
+**TOML dotted key path**: `api.tls_insecure_skip_verify`
+
+**Supported environment variables**:
+`PREFECT_API_TLS_INSECURE_SKIP_VERIFY`
+
+### `ssl_cert_file`
+This configuration settings option specifies the path to an SSL certificate file.
+
+**Type**: `string | None`
+
+**Default**: `None`
+
+**TOML dotted key path**: `api.ssl_cert_file`
+
+**Supported environment variables**:
+`PREFECT_API_SSL_CERT_FILE`
+
+### `enable_http2`
+If true, enable support for HTTP/2 for communicating with an API. If the API does not support HTTP/2, this will have no effect and connections will be made via HTTP/1.1.
+
+**Type**: `boolean`
+
+**Default**: `False`
+
+**TOML dotted key path**: `api.enable_http2`
+
+**Supported environment variables**:
+`PREFECT_API_ENABLE_HTTP2`
+
+### `request_timeout`
+The default timeout for requests to the API
+
+**Type**: `number`
+
+**Default**: `60.0`
+
+**TOML dotted key path**: `api.request_timeout`
+
+**Supported environment variables**:
+`PREFECT_API_REQUEST_TIMEOUT`
+
+---
+## CLISettings
+Settings for controlling CLI behavior
+### `colors`
+If True, use colors in CLI output. If `False`, output will not include colors codes.
+
+**Type**: `boolean`
+
+**Default**: `True`
+
+**TOML dotted key path**: `cli.colors`
+
+**Supported environment variables**:
+`PREFECT_CLI_COLORS`
+
+### `prompt`
+If `True`, use interactive prompts in CLI commands. If `False`, no interactive prompts will be used. If `None`, the value will be dynamically determined based on the presence of an interactive-enabled terminal.
+
+**Type**: `boolean | None`
+
+**Default**: `None`
+
+**TOML dotted key path**: `cli.prompt`
+
+**Supported environment variables**:
+`PREFECT_CLI_PROMPT`
+
+### `wrap_lines`
+If `True`, wrap text by inserting new lines in long lines in CLI output. If `False`, output will not be wrapped.
+
+**Type**: `boolean`
+
+**Default**: `True`
+
+**TOML dotted key path**: `cli.wrap_lines`
+
+**Supported environment variables**:
+`PREFECT_CLI_WRAP_LINES`
+
+---
+## ClientMetricsSettings
+Settings for controlling metrics reporting from the client
+### `enabled`
+Whether or not to enable Prometheus metrics in the client.
+
+**Type**: `boolean`
+
+**Default**: `False`
+
+**TOML dotted key path**: `client.metrics.enabled`
+
+**Supported environment variables**:
+`PREFECT_CLIENT_METRICS_ENABLED`, `PREFECT_CLIENT_ENABLE_METRICS`
+
+### `port`
+The port to expose the client Prometheus metrics on.
+
+**Type**: `integer`
+
+**Default**: `4201`
+
+**TOML dotted key path**: `client.metrics.port`
+
+**Supported environment variables**:
+`PREFECT_CLIENT_METRICS_PORT`
+
+---
+## ClientSettings
+Settings for controlling API client behavior
+### `max_retries`
+
+ The maximum number of retries to perform on failed HTTP requests.
+ Defaults to 5. Set to 0 to disable retries.
+ See `PREFECT_CLIENT_RETRY_EXTRA_CODES` for details on which HTTP status codes are
+ retried.
+
+
+**Type**: `integer`
+
+**Default**: `5`
+
+**Constraints**:
+- Minimum: 0
+
+**TOML dotted key path**: `client.max_retries`
+
+**Supported environment variables**:
+`PREFECT_CLIENT_MAX_RETRIES`
+
+### `retry_jitter_factor`
+
+ A value greater than or equal to zero to control the amount of jitter added to retried
+ client requests. Higher values introduce larger amounts of jitter.
+ Set to 0 to disable jitter. See `clamped_poisson_interval` for details on the how jitter
+ can affect retry lengths.
+
+
+**Type**: `number`
+
+**Default**: `0.2`
+
+**Constraints**:
+- Minimum: 0.0
+
+**TOML dotted key path**: `client.retry_jitter_factor`
+
+**Supported environment variables**:
+`PREFECT_CLIENT_RETRY_JITTER_FACTOR`
+
+### `retry_extra_codes`
+
+ A list of extra HTTP status codes to retry on. Defaults to an empty list.
+ 429, 502 and 503 are always retried. Please note that not all routes are idempotent and retrying
+ may result in unexpected behavior.
+
+
+**Type**: `string | integer | array | None`
+
+**TOML dotted key path**: `client.retry_extra_codes`
+
+**Supported environment variables**:
+`PREFECT_CLIENT_RETRY_EXTRA_CODES`
+
+### `csrf_support_enabled`
+
+ Determines if CSRF token handling is active in the Prefect client for API
+ requests.
+
+ When enabled (`True`), the client automatically manages CSRF tokens by
+ retrieving, storing, and including them in applicable state-changing requests
+
+
+**Type**: `boolean`
+
+**Default**: `True`
+
+**TOML dotted key path**: `client.csrf_support_enabled`
+
+**Supported environment variables**:
+`PREFECT_CLIENT_CSRF_SUPPORT_ENABLED`
+
+### `metrics`
+
+**Type**: [ClientMetricsSettings](#clientmetricssettings)
+
+**TOML dotted key path**: `client.metrics`
+
+---
+## CloudSettings
+Settings for interacting with Prefect Cloud
+### `api_url`
+API URL for Prefect Cloud. Used for authentication with Prefect Cloud.
+
+**Type**: `string`
+
+**Default**: `https://api.prefect.cloud/api`
+
+**TOML dotted key path**: `cloud.api_url`
+
+**Supported environment variables**:
+`PREFECT_CLOUD_API_URL`
+
+### `ui_url`
+The URL of the Prefect Cloud UI. If not set, the client will attempt to infer it.
+
+**Type**: `string | None`
+
+**Default**: `None`
+
+**TOML dotted key path**: `cloud.ui_url`
+
+**Supported environment variables**:
+`PREFECT_CLOUD_UI_URL`
+
+---
+## DeploymentsSettings
+Settings for configuring deployments defaults
+### `default_work_pool_name`
+The default work pool to use when creating deployments.
+
+**Type**: `string | None`
+
+**Default**: `None`
+
+**TOML dotted key path**: `deployments.default_work_pool_name`
+
+**Supported environment variables**:
+`PREFECT_DEPLOYMENTS_DEFAULT_WORK_POOL_NAME`, `PREFECT_DEFAULT_WORK_POOL_NAME`
+
+### `default_docker_build_namespace`
+The default Docker namespace to use when building images.
+
+**Type**: `string | None`
+
+**Default**: `None`
+
+**TOML dotted key path**: `deployments.default_docker_build_namespace`
+
+**Supported environment variables**:
+`PREFECT_DEPLOYMENTS_DEFAULT_DOCKER_BUILD_NAMESPACE`, `PREFECT_DEFAULT_DOCKER_BUILD_NAMESPACE`
+
+---
+## ExperimentsSettings
+Settings for configuring experimental features
+### `warn`
+If `True`, warn on usage of experimental features.
+
+**Type**: `boolean`
+
+**Default**: `True`
+
+**TOML dotted key path**: `experiments.warn`
+
+**Supported environment variables**:
+`PREFECT_EXPERIMENTS_WARN`, `PREFECT_EXPERIMENTAL_WARN`
+
+### `telemetry_enabled`
+Enables sending telemetry to Prefect Cloud.
+
+**Type**: `boolean`
+
+**Default**: `False`
+
+**TOML dotted key path**: `experiments.telemetry_enabled`
+
+**Supported environment variables**:
+`PREFECT_EXPERIMENTS_TELEMETRY_ENABLED`
+
+---
+## FlowsSettings
+Settings for controlling flow behavior
+### `default_retries`
+This value sets the default number of retries for all flows.
+
+**Type**: `integer`
+
+**Default**: `0`
+
+**Constraints**:
+- Minimum: 0
+
+**TOML dotted key path**: `flows.default_retries`
+
+**Supported environment variables**:
+`PREFECT_FLOWS_DEFAULT_RETRIES`, `PREFECT_FLOW_DEFAULT_RETRIES`
+
+### `default_retry_delay_seconds`
+This value sets the default retry delay seconds for all flows.
+
+**Type**: `integer | number | array`
+
+**Default**: `0`
+
+**TOML dotted key path**: `flows.default_retry_delay_seconds`
+
+**Supported environment variables**:
+`PREFECT_FLOWS_DEFAULT_RETRY_DELAY_SECONDS`, `PREFECT_FLOW_DEFAULT_RETRY_DELAY_SECONDS`
+
+---
+## InternalSettings
+### `logging_level`
+The default logging level for Prefect's internal machinery loggers.
+
+**Type**: `string`
+
+**Default**: `ERROR`
+
+**Constraints**:
+- Allowed values: 'DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'
+
+**TOML dotted key path**: `internal.logging_level`
+
+**Supported environment variables**:
+`PREFECT_INTERNAL_LOGGING_LEVEL`, `PREFECT_LOGGING_INTERNAL_LEVEL`
+
+---
+## LoggingSettings
+Settings for controlling logging behavior
+### `level`
+The default logging level for Prefect loggers.
+
+**Type**: `string`
+
+**Default**: `INFO`
+
+**Constraints**:
+- Allowed values: 'DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'
+
+**TOML dotted key path**: `logging.level`
+
+**Supported environment variables**:
+`PREFECT_LOGGING_LEVEL`
+
+### `config_path`
+The path to a custom YAML logging configuration file.
+
+**Type**: `string | None`
+
+**Default**: `None`
+
+**TOML dotted key path**: `logging.config_path`
+
+**Supported environment variables**:
+`PREFECT_LOGGING_CONFIG_PATH`, `PREFECT_LOGGING_SETTINGS_PATH`
+
+### `extra_loggers`
+Additional loggers to attach to Prefect logging at runtime.
+
+**Type**: `string | array | None`
+
+**Default**: `None`
+
+**TOML dotted key path**: `logging.extra_loggers`
+
+**Supported environment variables**:
+`PREFECT_LOGGING_EXTRA_LOGGERS`
+
+### `log_prints`
+If `True`, `print` statements in flows and tasks will be redirected to the Prefect logger for the given run.
+
+**Type**: `boolean`
+
+**Default**: `False`
+
+**TOML dotted key path**: `logging.log_prints`
+
+**Supported environment variables**:
+`PREFECT_LOGGING_LOG_PRINTS`
+
+### `colors`
+If `True`, use colors in CLI output. If `False`, output will not include colors codes.
+
+**Type**: `boolean`
+
+**Default**: `True`
+
+**TOML dotted key path**: `logging.colors`
+
+**Supported environment variables**:
+`PREFECT_LOGGING_COLORS`
+
+### `markup`
+
+ Whether to interpret strings wrapped in square brackets as a style.
+ This allows styles to be conveniently added to log messages, e.g.
+ `[red]This is a red message.[/red]`. However, the downside is, if enabled,
+ strings that contain square brackets may be inaccurately interpreted and
+ lead to incomplete output, e.g.
+ `[red]This is a red message.[/red]` may be interpreted as
+ `[red]This is a red message.[/red]`.
+
+
+**Type**: `boolean`
+
+**Default**: `False`
+
+**TOML dotted key path**: `logging.markup`
+
+**Supported environment variables**:
+`PREFECT_LOGGING_MARKUP`
+
+### `to_api`
+
+**Type**: [LoggingToAPISettings](#loggingtoapisettings)
+
+**TOML dotted key path**: `logging.to_api`
+
+---
+## LoggingToAPISettings
+Settings for controlling logging to the API
+### `enabled`
+If `True`, logs will be sent to the API.
+
+**Type**: `boolean`
+
+**Default**: `True`
+
+**TOML dotted key path**: `logging.to_api.enabled`
+
+**Supported environment variables**:
+`PREFECT_LOGGING_TO_API_ENABLED`
+
+### `batch_interval`
+The number of seconds between batched writes of logs to the API.
+
+**Type**: `number`
+
+**Default**: `2.0`
+
+**TOML dotted key path**: `logging.to_api.batch_interval`
+
+**Supported environment variables**:
+`PREFECT_LOGGING_TO_API_BATCH_INTERVAL`
+
+### `batch_size`
+The number of logs to batch before sending to the API.
+
+**Type**: `integer`
+
+**Default**: `4000000`
+
+**TOML dotted key path**: `logging.to_api.batch_size`
+
+**Supported environment variables**:
+`PREFECT_LOGGING_TO_API_BATCH_SIZE`
+
+### `max_log_size`
+The maximum size in bytes for a single log.
+
+**Type**: `integer`
+
+**Default**: `1000000`
+
+**TOML dotted key path**: `logging.to_api.max_log_size`
+
+**Supported environment variables**:
+`PREFECT_LOGGING_TO_API_MAX_LOG_SIZE`
+
+### `when_missing_flow`
+
+ Controls the behavior when loggers attempt to send logs to the API handler from outside of a flow.
+
+ All logs sent to the API must be associated with a flow run. The API log handler can
+ only be used outside of a flow by manually providing a flow run identifier. Logs
+ that are not associated with a flow run will not be sent to the API. This setting can
+ be used to determine if a warning or error is displayed when the identifier is missing.
+
+ The following options are available:
+
+ - "warn": Log a warning message.
+ - "error": Raise an error.
+ - "ignore": Do not log a warning message or raise an error.
+
+
+**Type**: `string`
+
+**Default**: `warn`
+
+**Constraints**:
+- Allowed values: 'warn', 'error', 'ignore'
+
+**TOML dotted key path**: `logging.to_api.when_missing_flow`
+
+**Supported environment variables**:
+`PREFECT_LOGGING_TO_API_WHEN_MISSING_FLOW`
+
+---
+## ResultsSettings
+Settings for controlling result storage behavior
+### `default_serializer`
+The default serializer to use when not otherwise specified.
+
+**Type**: `string`
+
+**Default**: `pickle`
+
+**TOML dotted key path**: `results.default_serializer`
+
+**Supported environment variables**:
+`PREFECT_RESULTS_DEFAULT_SERIALIZER`
+
+### `persist_by_default`
+The default setting for persisting results when not otherwise specified.
+
+**Type**: `boolean`
+
+**Default**: `False`
+
+**TOML dotted key path**: `results.persist_by_default`
+
+**Supported environment variables**:
+`PREFECT_RESULTS_PERSIST_BY_DEFAULT`
+
+### `default_storage_block`
+The `block-type/block-document` slug of a block to use as the default result storage.
+
+**Type**: `string | None`
+
+**Default**: `None`
+
+**TOML dotted key path**: `results.default_storage_block`
+
+**Supported environment variables**:
+`PREFECT_RESULTS_DEFAULT_STORAGE_BLOCK`, `PREFECT_DEFAULT_RESULT_STORAGE_BLOCK`
+
+### `local_storage_path`
+The path to a directory to store results in.
+
+**Type**: `string | None`
+
+**Default**: `None`
+
+**TOML dotted key path**: `results.local_storage_path`
+
+**Supported environment variables**:
+`PREFECT_RESULTS_LOCAL_STORAGE_PATH`, `PREFECT_LOCAL_STORAGE_PATH`
+
+---
+## RunnerServerSettings
+Settings for controlling runner server behavior
+### `enable`
+Whether or not to enable the runner's webserver.
+
+**Type**: `boolean`
+
+**Default**: `False`
+
+**TOML dotted key path**: `runner.server.enable`
+
+**Supported environment variables**:
+`PREFECT_RUNNER_SERVER_ENABLE`
+
+### `host`
+The host address the runner's webserver should bind to.
+
+**Type**: `string`
+
+**Default**: `localhost`
+
+**TOML dotted key path**: `runner.server.host`
+
+**Supported environment variables**:
+`PREFECT_RUNNER_SERVER_HOST`
+
+### `port`
+The port the runner's webserver should bind to.
+
+**Type**: `integer`
+
+**Default**: `8080`
+
+**TOML dotted key path**: `runner.server.port`
+
+**Supported environment variables**:
+`PREFECT_RUNNER_SERVER_PORT`
+
+### `log_level`
+The log level of the runner's webserver.
+
+**Type**: `string`
+
+**Default**: `error`
+
+**Constraints**:
+- Allowed values: 'DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'
+
+**TOML dotted key path**: `runner.server.log_level`
+
+**Supported environment variables**:
+`PREFECT_RUNNER_SERVER_LOG_LEVEL`
+
+### `missed_polls_tolerance`
+Number of missed polls before a runner is considered unhealthy by its webserver.
+
+**Type**: `integer`
+
+**Default**: `2`
+
+**TOML dotted key path**: `runner.server.missed_polls_tolerance`
+
+**Supported environment variables**:
+`PREFECT_RUNNER_SERVER_MISSED_POLLS_TOLERANCE`
+
+---
+## RunnerSettings
+Settings for controlling runner behavior
+### `process_limit`
+Maximum number of processes a runner will execute in parallel.
+
+**Type**: `integer`
+
+**Default**: `5`
+
+**TOML dotted key path**: `runner.process_limit`
+
+**Supported environment variables**:
+`PREFECT_RUNNER_PROCESS_LIMIT`
+
+### `poll_frequency`
+Number of seconds a runner should wait between queries for scheduled work.
+
+**Type**: `integer`
+
+**Default**: `10`
+
+**TOML dotted key path**: `runner.poll_frequency`
+
+**Supported environment variables**:
+`PREFECT_RUNNER_POLL_FREQUENCY`
+
+### `server`
+
+**Type**: [RunnerServerSettings](#runnerserversettings)
+
+**TOML dotted key path**: `runner.server`
+
+---
+## ServerAPISettings
+Settings for controlling API server behavior
+### `host`
+The API's host address (defaults to `127.0.0.1`).
+
+**Type**: `string`
+
+**Default**: `127.0.0.1`
+
+**TOML dotted key path**: `server.api.host`
+
+**Supported environment variables**:
+`PREFECT_SERVER_API_HOST`
+
+### `port`
+The API's port address (defaults to `4200`).
+
+**Type**: `integer`
+
+**Default**: `4200`
+
+**TOML dotted key path**: `server.api.port`
+
+**Supported environment variables**:
+`PREFECT_SERVER_API_PORT`
+
+### `default_limit`
+The default limit applied to queries that can return multiple objects, such as `POST /flow_runs/filter`.
+
+**Type**: `integer`
+
+**Default**: `200`
+
+**TOML dotted key path**: `server.api.default_limit`
+
+**Supported environment variables**:
+`PREFECT_SERVER_API_DEFAULT_LIMIT`, `PREFECT_API_DEFAULT_LIMIT`
+
+### `keepalive_timeout`
+
+ The API's keep alive timeout (defaults to `5`).
+ Refer to https://www.uvicorn.org/settings/#timeouts for details.
+
+ When the API is hosted behind a load balancer, you may want to set this to a value
+ greater than the load balancer's idle timeout.
+
+ Note this setting only applies when calling `prefect server start`; if hosting the
+ API with another tool you will need to configure this there instead.
+
+
+**Type**: `integer`
+
+**Default**: `5`
+
+**TOML dotted key path**: `server.api.keepalive_timeout`
+
+**Supported environment variables**:
+`PREFECT_SERVER_API_KEEPALIVE_TIMEOUT`
+
+### `csrf_protection_enabled`
+
+ Controls the activation of CSRF protection for the Prefect server API.
+
+ When enabled (`True`), the server enforces CSRF validation checks on incoming
+ state-changing requests (POST, PUT, PATCH, DELETE), requiring a valid CSRF
+ token to be included in the request headers or body. This adds a layer of
+ security by preventing unauthorized or malicious sites from making requests on
+ behalf of authenticated users.
+
+ It is recommended to enable this setting in production environments where the
+ API is exposed to web clients to safeguard against CSRF attacks.
+
+ Note: Enabling this setting requires corresponding support in the client for
+ CSRF token management. See PREFECT_CLIENT_CSRF_SUPPORT_ENABLED for more.
+
+
+**Type**: `boolean`
+
+**Default**: `False`
+
+**TOML dotted key path**: `server.api.csrf_protection_enabled`
+
+**Supported environment variables**:
+`PREFECT_SERVER_API_CSRF_PROTECTION_ENABLED`, `PREFECT_SERVER_CSRF_PROTECTION_ENABLED`
+
+### `csrf_token_expiration`
+
+ Specifies the duration for which a CSRF token remains valid after being issued
+ by the server.
+
+ The default expiration time is set to 1 hour, which offers a reasonable
+ compromise. Adjust this setting based on your specific security requirements
+ and usage patterns.
+
+
+**Type**: `string`
+
+**Default**: `PT1H`
+
+**TOML dotted key path**: `server.api.csrf_token_expiration`
+
+**Supported environment variables**:
+`PREFECT_SERVER_API_CSRF_TOKEN_EXPIRATION`, `PREFECT_SERVER_CSRF_TOKEN_EXPIRATION`
+
+### `cors_allowed_origins`
+
+ A comma-separated list of origins that are authorized to make cross-origin requests to the API.
+
+ By default, this is set to `*`, which allows requests from all origins.
+
+
+**Type**: `string`
+
+**Default**: `*`
+
+**TOML dotted key path**: `server.api.cors_allowed_origins`
+
+**Supported environment variables**:
+`PREFECT_SERVER_API_CORS_ALLOWED_ORIGINS`, `PREFECT_SERVER_CORS_ALLOWED_ORIGINS`
+
+### `cors_allowed_methods`
+
+ A comma-separated list of methods that are authorized to make cross-origin requests to the API.
+
+ By default, this is set to `*`, which allows requests from all methods.
+
+
+**Type**: `string`
+
+**Default**: `*`
+
+**TOML dotted key path**: `server.api.cors_allowed_methods`
+
+**Supported environment variables**:
+`PREFECT_SERVER_API_CORS_ALLOWED_METHODS`, `PREFECT_SERVER_CORS_ALLOWED_METHODS`
+
+### `cors_allowed_headers`
+
+ A comma-separated list of headers that are authorized to make cross-origin requests to the API.
+
+ By default, this is set to `*`, which allows requests from all headers.
+
+
+**Type**: `string`
+
+**Default**: `*`
+
+**TOML dotted key path**: `server.api.cors_allowed_headers`
+
+**Supported environment variables**:
+`PREFECT_SERVER_API_CORS_ALLOWED_HEADERS`, `PREFECT_SERVER_CORS_ALLOWED_HEADERS`
+
+---
+## ServerDatabaseSettings
+Settings for controlling server database behavior
+### `connection_url`
+
+ A database connection URL in a SQLAlchemy-compatible
+ format. Prefect currently supports SQLite and Postgres. Note that all
+ Prefect database engines must use an async driver - for SQLite, use
+ `sqlite+aiosqlite` and for Postgres use `postgresql+asyncpg`.
+
+ SQLite in-memory databases can be used by providing the url
+ `sqlite+aiosqlite:///file::memory:?cache=shared&uri=true&check_same_thread=false`,
+ which will allow the database to be accessed by multiple threads. Note
+ that in-memory databases can not be accessed from multiple processes and
+ should only be used for simple tests.
+
+
+**Type**: `string | None`
+
+**Default**: `None`
+
+**TOML dotted key path**: `server.database.connection_url`
+
+**Supported environment variables**:
+`PREFECT_SERVER_DATABASE_CONNECTION_URL`, `PREFECT_API_DATABASE_CONNECTION_URL`
+
+### `driver`
+The database driver to use when connecting to the database. If not set, the driver will be inferred from the connection URL.
+
+**Type**: `string | None`
+
+**Default**: `None`
+
+**TOML dotted key path**: `server.database.driver`
+
+**Supported environment variables**:
+`PREFECT_SERVER_DATABASE_DRIVER`, `PREFECT_API_DATABASE_DRIVER`
+
+### `host`
+The database server host.
+
+**Type**: `string | None`
+
+**Default**: `None`
+
+**TOML dotted key path**: `server.database.host`
+
+**Supported environment variables**:
+`PREFECT_SERVER_DATABASE_HOST`, `PREFECT_API_DATABASE_HOST`
+
+### `port`
+The database server port.
+
+**Type**: `integer | None`
+
+**Default**: `None`
+
+**TOML dotted key path**: `server.database.port`
+
+**Supported environment variables**:
+`PREFECT_SERVER_DATABASE_PORT`, `PREFECT_API_DATABASE_PORT`
+
+### `user`
+The user to use when connecting to the database.
+
+**Type**: `string | None`
+
+**Default**: `None`
+
+**TOML dotted key path**: `server.database.user`
+
+**Supported environment variables**:
+`PREFECT_SERVER_DATABASE_USER`, `PREFECT_API_DATABASE_USER`
+
+### `name`
+The name of the Prefect database on the remote server, or the path to the database file for SQLite.
+
+**Type**: `string | None`
+
+**Default**: `None`
+
+**TOML dotted key path**: `server.database.name`
+
+**Supported environment variables**:
+`PREFECT_SERVER_DATABASE_NAME`, `PREFECT_API_DATABASE_NAME`
+
+### `password`
+The password to use when connecting to the database. Should be kept secret.
+
+**Type**: `string | None`
+
+**Default**: `None`
+
+**TOML dotted key path**: `server.database.password`
+
+**Supported environment variables**:
+`PREFECT_SERVER_DATABASE_PASSWORD`, `PREFECT_API_DATABASE_PASSWORD`
+
+### `echo`
+If `True`, SQLAlchemy will log all SQL issued to the database. Defaults to `False`.
+
+**Type**: `boolean`
+
+**Default**: `False`
+
+**TOML dotted key path**: `server.database.echo`
+
+**Supported environment variables**:
+`PREFECT_SERVER_DATABASE_ECHO`, `PREFECT_API_DATABASE_ECHO`
+
+### `migrate_on_start`
+If `True`, the database will be migrated on application startup.
+
+**Type**: `boolean`
+
+**Default**: `True`
+
+**TOML dotted key path**: `server.database.migrate_on_start`
+
+**Supported environment variables**:
+`PREFECT_SERVER_DATABASE_MIGRATE_ON_START`, `PREFECT_API_DATABASE_MIGRATE_ON_START`
+
+### `timeout`
+A statement timeout, in seconds, applied to all database interactions made by the API. Defaults to 10 seconds.
+
+**Type**: `number | None`
+
+**Default**: `10.0`
+
+**TOML dotted key path**: `server.database.timeout`
+
+**Supported environment variables**:
+`PREFECT_SERVER_DATABASE_TIMEOUT`, `PREFECT_API_DATABASE_TIMEOUT`
+
+### `connection_timeout`
+A connection timeout, in seconds, applied to database connections. Defaults to `5`.
+
+**Type**: `number | None`
+
+**Default**: `5`
+
+**TOML dotted key path**: `server.database.connection_timeout`
+
+**Supported environment variables**:
+`PREFECT_SERVER_DATABASE_CONNECTION_TIMEOUT`, `PREFECT_API_DATABASE_CONNECTION_TIMEOUT`
+
+### `sqlalchemy_pool_size`
+Controls connection pool size when using a PostgreSQL database with the Prefect API. If not set, the default SQLAlchemy pool size will be used.
+
+**Type**: `integer | None`
+
+**Default**: `None`
+
+**TOML dotted key path**: `server.database.sqlalchemy_pool_size`
+
+**Supported environment variables**:
+`PREFECT_SERVER_DATABASE_SQLALCHEMY_POOL_SIZE`, `PREFECT_SQLALCHEMY_POOL_SIZE`
+
+### `sqlalchemy_max_overflow`
+Controls maximum overflow of the connection pool when using a PostgreSQL database with the Prefect API. If not set, the default SQLAlchemy maximum overflow value will be used.
+
+**Type**: `integer | None`
+
+**Default**: `None`
+
+**TOML dotted key path**: `server.database.sqlalchemy_max_overflow`
+
+**Supported environment variables**:
+`PREFECT_SERVER_DATABASE_SQLALCHEMY_MAX_OVERFLOW`, `PREFECT_SQLALCHEMY_MAX_OVERFLOW`
+
+---
+## ServerDeploymentsSettings
+### `concurrency_slot_wait_seconds`
+The number of seconds to wait before retrying when a deployment flow run cannot secure a concurrency slot from the server.
+
+**Type**: `number`
+
+**Default**: `30.0`
+
+**Constraints**:
+- Minimum: 0.0
+
+**TOML dotted key path**: `server.deployments.concurrency_slot_wait_seconds`
+
+**Supported environment variables**:
+`PREFECT_SERVER_DEPLOYMENTS_CONCURRENCY_SLOT_WAIT_SECONDS`, `PREFECT_DEPLOYMENT_CONCURRENCY_SLOT_WAIT_SECONDS`
+
+---
+## ServerEphemeralSettings
+Settings for controlling ephemeral server behavior
+### `enabled`
+
+ Controls whether or not a subprocess server can be started when no API URL is provided.
+
+
+**Type**: `boolean`
+
+**Default**: `False`
+
+**TOML dotted key path**: `server.ephemeral.enabled`
+
+**Supported environment variables**:
+`PREFECT_SERVER_EPHEMERAL_ENABLED`, `PREFECT_SERVER_ALLOW_EPHEMERAL_MODE`
+
+### `startup_timeout_seconds`
+
+ The number of seconds to wait for the server to start when ephemeral mode is enabled.
+ Defaults to `10`.
+
+
+**Type**: `integer`
+
+**Default**: `20`
+
+**TOML dotted key path**: `server.ephemeral.startup_timeout_seconds`
+
+**Supported environment variables**:
+`PREFECT_SERVER_EPHEMERAL_STARTUP_TIMEOUT_SECONDS`
+
+---
+## ServerEventsSettings
+Settings for controlling behavior of the events subsystem
+### `stream_out_enabled`
+Whether or not to stream events out to the API via websockets.
+
+**Type**: `boolean`
+
+**Default**: `True`
+
+**TOML dotted key path**: `server.events.stream_out_enabled`
+
+**Supported environment variables**:
+`PREFECT_SERVER_EVENTS_STREAM_OUT_ENABLED`, `PREFECT_API_EVENTS_STREAM_OUT_ENABLED`
+
+### `related_resource_cache_ttl`
+The number of seconds to cache related resources for in the API.
+
+**Type**: `string`
+
+**Default**: `PT5M`
+
+**TOML dotted key path**: `server.events.related_resource_cache_ttl`
+
+**Supported environment variables**:
+`PREFECT_SERVER_EVENTS_RELATED_RESOURCE_CACHE_TTL`, `PREFECT_API_EVENTS_RELATED_RESOURCE_CACHE_TTL`
+
+### `maximum_labels_per_resource`
+The maximum number of labels a resource may have.
+
+**Type**: `integer`
+
+**Default**: `500`
+
+**TOML dotted key path**: `server.events.maximum_labels_per_resource`
+
+**Supported environment variables**:
+`PREFECT_SERVER_EVENTS_MAXIMUM_LABELS_PER_RESOURCE`, `PREFECT_EVENTS_MAXIMUM_LABELS_PER_RESOURCE`
+
+### `maximum_related_resources`
+The maximum number of related resources an Event may have.
+
+**Type**: `integer`
+
+**Default**: `500`
+
+**TOML dotted key path**: `server.events.maximum_related_resources`
+
+**Supported environment variables**:
+`PREFECT_SERVER_EVENTS_MAXIMUM_RELATED_RESOURCES`, `PREFECT_EVENTS_MAXIMUM_RELATED_RESOURCES`
+
+### `maximum_size_bytes`
+The maximum size of an Event when serialized to JSON
+
+**Type**: `integer`
+
+**Default**: `1500000`
+
+**TOML dotted key path**: `server.events.maximum_size_bytes`
+
+**Supported environment variables**:
+`PREFECT_SERVER_EVENTS_MAXIMUM_SIZE_BYTES`, `PREFECT_EVENTS_MAXIMUM_SIZE_BYTES`
+
+### `expired_bucket_buffer`
+The amount of time to retain expired automation buckets
+
+**Type**: `string`
+
+**Default**: `PT1M`
+
+**TOML dotted key path**: `server.events.expired_bucket_buffer`
+
+**Supported environment variables**:
+`PREFECT_SERVER_EVENTS_EXPIRED_BUCKET_BUFFER`, `PREFECT_EVENTS_EXPIRED_BUCKET_BUFFER`
+
+### `proactive_granularity`
+How frequently proactive automations are evaluated
+
+**Type**: `string`
+
+**Default**: `PT5S`
+
+**TOML dotted key path**: `server.events.proactive_granularity`
+
+**Supported environment variables**:
+`PREFECT_SERVER_EVENTS_PROACTIVE_GRANULARITY`, `PREFECT_EVENTS_PROACTIVE_GRANULARITY`
+
+### `retention_period`
+The amount of time to retain events in the database.
+
+**Type**: `string`
+
+**Default**: `P7D`
+
+**TOML dotted key path**: `server.events.retention_period`
+
+**Supported environment variables**:
+`PREFECT_SERVER_EVENTS_RETENTION_PERIOD`, `PREFECT_EVENTS_RETENTION_PERIOD`
+
+### `maximum_websocket_backfill`
+The maximum range to look back for backfilling events for a websocket subscriber.
+
+**Type**: `string`
+
+**Default**: `PT15M`
+
+**TOML dotted key path**: `server.events.maximum_websocket_backfill`
+
+**Supported environment variables**:
+`PREFECT_SERVER_EVENTS_MAXIMUM_WEBSOCKET_BACKFILL`, `PREFECT_EVENTS_MAXIMUM_WEBSOCKET_BACKFILL`
+
+### `websocket_backfill_page_size`
+The page size for the queries to backfill events for websocket subscribers.
+
+**Type**: `integer`
+
+**Default**: `250`
+
+**TOML dotted key path**: `server.events.websocket_backfill_page_size`
+
+**Supported environment variables**:
+`PREFECT_SERVER_EVENTS_WEBSOCKET_BACKFILL_PAGE_SIZE`, `PREFECT_EVENTS_WEBSOCKET_BACKFILL_PAGE_SIZE`
+
+### `messaging_broker`
+Which message broker implementation to use for the messaging system, should point to a module that exports a Publisher and Consumer class.
+
+**Type**: `string`
+
+**Default**: `prefect.server.utilities.messaging.memory`
+
+**TOML dotted key path**: `server.events.messaging_broker`
+
+**Supported environment variables**:
+`PREFECT_SERVER_EVENTS_MESSAGING_BROKER`, `PREFECT_MESSAGING_BROKER`
+
+### `messaging_cache`
+Which cache implementation to use for the events system. Should point to a module that exports a Cache class.
+
+**Type**: `string`
+
+**Default**: `prefect.server.utilities.messaging.memory`
+
+**TOML dotted key path**: `server.events.messaging_cache`
+
+**Supported environment variables**:
+`PREFECT_SERVER_EVENTS_MESSAGING_CACHE`, `PREFECT_MESSAGING_CACHE`
+
+---
+## ServerFlowRunGraphSettings
+Settings for controlling behavior of the flow run graph
+### `max_nodes`
+The maximum size of a flow run graph on the v2 API
+
+**Type**: `integer`
+
+**Default**: `10000`
+
+**TOML dotted key path**: `server.flow_run_graph.max_nodes`
+
+**Supported environment variables**:
+`PREFECT_SERVER_FLOW_RUN_GRAPH_MAX_NODES`, `PREFECT_API_MAX_FLOW_RUN_GRAPH_NODES`
+
+### `max_artifacts`
+The maximum number of artifacts to show on a flow run graph on the v2 API
+
+**Type**: `integer`
+
+**Default**: `10000`
+
+**TOML dotted key path**: `server.flow_run_graph.max_artifacts`
+
+**Supported environment variables**:
+`PREFECT_SERVER_FLOW_RUN_GRAPH_MAX_ARTIFACTS`, `PREFECT_API_MAX_FLOW_RUN_GRAPH_ARTIFACTS`
+
+---
+## ServerServicesCancellationCleanupSettings
+Settings for controlling the cancellation cleanup service
+### `enabled`
+Whether or not to start the cancellation cleanup service in the server application.
+
+**Type**: `boolean`
+
+**Default**: `True`
+
+**TOML dotted key path**: `server.services.cancellation_cleanup.enabled`
+
+**Supported environment variables**:
+`PREFECT_SERVER_SERVICES_CANCELLATION_CLEANUP_ENABLED`, `PREFECT_API_SERVICES_CANCELLATION_CLEANUP_ENABLED`
+
+### `loop_seconds`
+The cancellation cleanup service will look for non-terminal tasks and subflows this often. Defaults to `20`.
+
+**Type**: `number`
+
+**Default**: `20`
+
+**TOML dotted key path**: `server.services.cancellation_cleanup.loop_seconds`
+
+**Supported environment variables**:
+`PREFECT_SERVER_SERVICES_CANCELLATION_CLEANUP_LOOP_SECONDS`, `PREFECT_API_SERVICES_CANCELLATION_CLEANUP_LOOP_SECONDS`
+
+---
+## ServerServicesEventPersisterSettings
+Settings for controlling the event persister service
+### `enabled`
+Whether or not to start the event persister service in the server application.
+
+**Type**: `boolean`
+
+**Default**: `True`
+
+**TOML dotted key path**: `server.services.event_persister.enabled`
+
+**Supported environment variables**:
+`PREFECT_SERVER_SERVICES_EVENT_PERSISTER_ENABLED`, `PREFECT_API_SERVICES_EVENT_PERSISTER_ENABLED`
+
+### `batch_size`
+The number of events the event persister will attempt to insert in one batch.
+
+**Type**: `integer`
+
+**Default**: `20`
+
+**TOML dotted key path**: `server.services.event_persister.batch_size`
+
+**Supported environment variables**:
+`PREFECT_SERVER_SERVICES_EVENT_PERSISTER_BATCH_SIZE`, `PREFECT_API_SERVICES_EVENT_PERSISTER_BATCH_SIZE`
+
+### `flush_interval`
+The maximum number of seconds between flushes of the event persister.
+
+**Type**: `number`
+
+**Default**: `5`
+
+**TOML dotted key path**: `server.services.event_persister.flush_interval`
+
+**Supported environment variables**:
+`PREFECT_SERVER_SERVICES_EVENT_PERSISTER_FLUSH_INTERVAL`, `PREFECT_API_SERVICES_EVENT_PERSISTER_FLUSH_INTERVAL`
+
+---
+## ServerServicesFlowRunNotificationsSettings
+Settings for controlling the flow run notifications service
+### `enabled`
+Whether or not to start the flow run notifications service in the server application.
+
+**Type**: `boolean`
+
+**Default**: `True`
+
+**TOML dotted key path**: `server.services.flow_run_notifications.enabled`
+
+**Supported environment variables**:
+`PREFECT_SERVER_SERVICES_FLOW_RUN_NOTIFICATIONS_ENABLED`, `PREFECT_API_SERVICES_FLOW_RUN_NOTIFICATIONS_ENABLED`
+
+---
+## ServerServicesForemanSettings
+Settings for controlling the foreman service
+### `enabled`
+Whether or not to start the foreman service in the server application.
+
+**Type**: `boolean`
+
+**Default**: `True`
+
+**TOML dotted key path**: `server.services.foreman.enabled`
+
+**Supported environment variables**:
+`PREFECT_SERVER_SERVICES_FOREMAN_ENABLED`, `PREFECT_API_SERVICES_FOREMAN_ENABLED`
+
+### `loop_seconds`
+The foreman service will check for offline workers this often. Defaults to `15`.
+
+**Type**: `number`
+
+**Default**: `15`
+
+**TOML dotted key path**: `server.services.foreman.loop_seconds`
+
+**Supported environment variables**:
+`PREFECT_SERVER_SERVICES_FOREMAN_LOOP_SECONDS`, `PREFECT_API_SERVICES_FOREMAN_LOOP_SECONDS`
+
+### `inactivity_heartbeat_multiple`
+
+ The number of heartbeats that must be missed before a worker is marked as offline. Defaults to `3`.
+
+
+**Type**: `integer`
+
+**Default**: `3`
+
+**TOML dotted key path**: `server.services.foreman.inactivity_heartbeat_multiple`
+
+**Supported environment variables**:
+`PREFECT_SERVER_SERVICES_FOREMAN_INACTIVITY_HEARTBEAT_MULTIPLE`, `PREFECT_API_SERVICES_FOREMAN_INACTIVITY_HEARTBEAT_MULTIPLE`
+
+### `fallback_heartbeat_interval_seconds`
+
+ The number of seconds to use for online/offline evaluation if a worker's heartbeat
+ interval is not set. Defaults to `30`.
+
+
+**Type**: `integer`
+
+**Default**: `30`
+
+**TOML dotted key path**: `server.services.foreman.fallback_heartbeat_interval_seconds`
+
+**Supported environment variables**:
+`PREFECT_SERVER_SERVICES_FOREMAN_FALLBACK_HEARTBEAT_INTERVAL_SECONDS`, `PREFECT_API_SERVICES_FOREMAN_FALLBACK_HEARTBEAT_INTERVAL_SECONDS`
+
+### `deployment_last_polled_timeout_seconds`
+
+ The number of seconds before a deployment is marked as not ready if it has not been
+ polled. Defaults to `60`.
+
+
+**Type**: `integer`
+
+**Default**: `60`
+
+**TOML dotted key path**: `server.services.foreman.deployment_last_polled_timeout_seconds`
+
+**Supported environment variables**:
+`PREFECT_SERVER_SERVICES_FOREMAN_DEPLOYMENT_LAST_POLLED_TIMEOUT_SECONDS`, `PREFECT_API_SERVICES_FOREMAN_DEPLOYMENT_LAST_POLLED_TIMEOUT_SECONDS`
+
+### `work_queue_last_polled_timeout_seconds`
+
+ The number of seconds before a work queue is marked as not ready if it has not been
+ polled. Defaults to `60`.
+
+
+**Type**: `integer`
+
+**Default**: `60`
+
+**TOML dotted key path**: `server.services.foreman.work_queue_last_polled_timeout_seconds`
+
+**Supported environment variables**:
+`PREFECT_SERVER_SERVICES_FOREMAN_WORK_QUEUE_LAST_POLLED_TIMEOUT_SECONDS`, `PREFECT_API_SERVICES_FOREMAN_WORK_QUEUE_LAST_POLLED_TIMEOUT_SECONDS`
+
+---
+## ServerServicesLateRunsSettings
+Settings for controlling the late runs service
+### `enabled`
+Whether or not to start the late runs service in the server application.
+
+**Type**: `boolean`
+
+**Default**: `True`
+
+**TOML dotted key path**: `server.services.late_runs.enabled`
+
+**Supported environment variables**:
+`PREFECT_SERVER_SERVICES_LATE_RUNS_ENABLED`, `PREFECT_API_SERVICES_LATE_RUNS_ENABLED`
+
+### `loop_seconds`
+
+ The late runs service will look for runs to mark as late this often. Defaults to `5`.
+
+
+**Type**: `number`
+
+**Default**: `5`
+
+**TOML dotted key path**: `server.services.late_runs.loop_seconds`
+
+**Supported environment variables**:
+`PREFECT_SERVER_SERVICES_LATE_RUNS_LOOP_SECONDS`, `PREFECT_API_SERVICES_LATE_RUNS_LOOP_SECONDS`
+
+### `after_seconds`
+
+ The late runs service will mark runs as late after they have exceeded their scheduled start time by this many seconds. Defaults to `5` seconds.
+
+
+**Type**: `string`
+
+**Default**: `PT15S`
+
+**TOML dotted key path**: `server.services.late_runs.after_seconds`
+
+**Supported environment variables**:
+`PREFECT_SERVER_SERVICES_LATE_RUNS_AFTER_SECONDS`, `PREFECT_API_SERVICES_LATE_RUNS_AFTER_SECONDS`
+
+---
+## ServerServicesPauseExpirationsSettings
+Settings for controlling the pause expiration service
+### `enabled`
+
+ Whether or not to start the paused flow run expiration service in the server
+ application. If disabled, paused flows that have timed out will remain in a Paused state
+ until a resume attempt.
+
+
+**Type**: `boolean`
+
+**Default**: `True`
+
+**TOML dotted key path**: `server.services.pause_expirations.enabled`
+
+**Supported environment variables**:
+`PREFECT_SERVER_SERVICES_PAUSE_EXPIRATIONS_ENABLED`, `PREFECT_API_SERVICES_PAUSE_EXPIRATIONS_ENABLED`
+
+### `loop_seconds`
+
+ The pause expiration service will look for runs to mark as failed this often. Defaults to `5`.
+
+
+**Type**: `number`
+
+**Default**: `5`
+
+**TOML dotted key path**: `server.services.pause_expirations.loop_seconds`
+
+**Supported environment variables**:
+`PREFECT_SERVER_SERVICES_PAUSE_EXPIRATIONS_LOOP_SECONDS`, `PREFECT_API_SERVICES_PAUSE_EXPIRATIONS_LOOP_SECONDS`
+
+---
+## ServerServicesSchedulerSettings
+Settings for controlling the scheduler service
+### `enabled`
+Whether or not to start the scheduler service in the server application.
+
+**Type**: `boolean`
+
+**Default**: `True`
+
+**TOML dotted key path**: `server.services.scheduler.enabled`
+
+**Supported environment variables**:
+`PREFECT_SERVER_SERVICES_SCHEDULER_ENABLED`, `PREFECT_API_SERVICES_SCHEDULER_ENABLED`
+
+### `loop_seconds`
+
+ The scheduler loop interval, in seconds. This determines
+ how often the scheduler will attempt to schedule new flow runs, but has no
+ impact on how quickly either flow runs or task runs are actually executed.
+ Defaults to `60`.
+
+
+**Type**: `number`
+
+**Default**: `60`
+
+**TOML dotted key path**: `server.services.scheduler.loop_seconds`
+
+**Supported environment variables**:
+`PREFECT_SERVER_SERVICES_SCHEDULER_LOOP_SECONDS`, `PREFECT_API_SERVICES_SCHEDULER_LOOP_SECONDS`
+
+### `deployment_batch_size`
+
+ The number of deployments the scheduler will attempt to
+ schedule in a single batch. If there are more deployments than the batch
+ size, the scheduler immediately attempts to schedule the next batch; it
+ does not sleep for `scheduler_loop_seconds` until it has visited every
+ deployment once. Defaults to `100`.
+
+
+**Type**: `integer`
+
+**Default**: `100`
+
+**TOML dotted key path**: `server.services.scheduler.deployment_batch_size`
+
+**Supported environment variables**:
+`PREFECT_SERVER_SERVICES_SCHEDULER_DEPLOYMENT_BATCH_SIZE`, `PREFECT_API_SERVICES_SCHEDULER_DEPLOYMENT_BATCH_SIZE`
+
+### `max_runs`
+
+ The scheduler will attempt to schedule up to this many
+ auto-scheduled runs in the future. Note that runs may have fewer than
+ this many scheduled runs, depending on the value of
+ `scheduler_max_scheduled_time`. Defaults to `100`.
+
+
+**Type**: `integer`
+
+**Default**: `100`
+
+**TOML dotted key path**: `server.services.scheduler.max_runs`
+
+**Supported environment variables**:
+`PREFECT_SERVER_SERVICES_SCHEDULER_MAX_RUNS`, `PREFECT_API_SERVICES_SCHEDULER_MAX_RUNS`
+
+### `min_runs`
+
+ The scheduler will attempt to schedule at least this many
+ auto-scheduled runs in the future. Note that runs may have more than
+ this many scheduled runs, depending on the value of
+ `scheduler_min_scheduled_time`. Defaults to `3`.
+
+
+**Type**: `integer`
+
+**Default**: `3`
+
+**TOML dotted key path**: `server.services.scheduler.min_runs`
+
+**Supported environment variables**:
+`PREFECT_SERVER_SERVICES_SCHEDULER_MIN_RUNS`, `PREFECT_API_SERVICES_SCHEDULER_MIN_RUNS`
+
+### `max_scheduled_time`
+
+ The scheduler will create new runs up to this far in the
+ future. Note that this setting will take precedence over
+ `scheduler_max_runs`: if a flow runs once a month and
+ `scheduler_max_scheduled_time` is three months, then only three runs will be
+ scheduled. Defaults to 100 days (`8640000` seconds).
+
+
+**Type**: `string`
+
+**Default**: `P100D`
+
+**TOML dotted key path**: `server.services.scheduler.max_scheduled_time`
+
+**Supported environment variables**:
+`PREFECT_SERVER_SERVICES_SCHEDULER_MAX_SCHEDULED_TIME`, `PREFECT_API_SERVICES_SCHEDULER_MAX_SCHEDULED_TIME`
+
+### `min_scheduled_time`
+
+ The scheduler will create new runs at least this far in the
+ future. Note that this setting will take precedence over `scheduler_min_runs`:
+ if a flow runs every hour and `scheduler_min_scheduled_time` is three hours,
+ then three runs will be scheduled even if `scheduler_min_runs` is 1. Defaults to
+
+
+**Type**: `string`
+
+**Default**: `PT1H`
+
+**TOML dotted key path**: `server.services.scheduler.min_scheduled_time`
+
+**Supported environment variables**:
+`PREFECT_SERVER_SERVICES_SCHEDULER_MIN_SCHEDULED_TIME`, `PREFECT_API_SERVICES_SCHEDULER_MIN_SCHEDULED_TIME`
+
+### `insert_batch_size`
+
+ The number of runs the scheduler will attempt to insert in a single batch.
+ Defaults to `500`.
+
+
+**Type**: `integer`
+
+**Default**: `500`
+
+**TOML dotted key path**: `server.services.scheduler.insert_batch_size`
+
+**Supported environment variables**:
+`PREFECT_SERVER_SERVICES_SCHEDULER_INSERT_BATCH_SIZE`, `PREFECT_API_SERVICES_SCHEDULER_INSERT_BATCH_SIZE`
+
+---
+## ServerServicesSettings
+Settings for controlling server services
+### `cancellation_cleanup`
+
+**Type**: [ServerServicesCancellationCleanupSettings](#serverservicescancellationcleanupsettings)
+
+**TOML dotted key path**: `server.services.cancellation_cleanup`
+
+### `event_persister`
+
+**Type**: [ServerServicesEventPersisterSettings](#serverserviceseventpersistersettings)
+
+**TOML dotted key path**: `server.services.event_persister`
+
+### `flow_run_notifications`
+
+**Type**: [ServerServicesFlowRunNotificationsSettings](#serverservicesflowrunnotificationssettings)
+
+**TOML dotted key path**: `server.services.flow_run_notifications`
+
+### `foreman`
+
+**Type**: [ServerServicesForemanSettings](#serverservicesforemansettings)
+
+**TOML dotted key path**: `server.services.foreman`
+
+### `late_runs`
+
+**Type**: [ServerServicesLateRunsSettings](#serverserviceslaterunssettings)
+
+**TOML dotted key path**: `server.services.late_runs`
+
+### `scheduler`
+
+**Type**: [ServerServicesSchedulerSettings](#serverservicesschedulersettings)
+
+**TOML dotted key path**: `server.services.scheduler`
+
+### `pause_expirations`
+
+**Type**: [ServerServicesPauseExpirationsSettings](#serverservicespauseexpirationssettings)
+
+**TOML dotted key path**: `server.services.pause_expirations`
+
+### `task_run_recorder`
+
+**Type**: [ServerServicesTaskRunRecorderSettings](#serverservicestaskrunrecordersettings)
+
+**TOML dotted key path**: `server.services.task_run_recorder`
+
+### `triggers`
+
+**Type**: [ServerServicesTriggersSettings](#serverservicestriggerssettings)
+
+**TOML dotted key path**: `server.services.triggers`
+
+---
+## ServerServicesTaskRunRecorderSettings
+Settings for controlling the task run recorder service
+### `enabled`
+Whether or not to start the task run recorder service in the server application.
+
+**Type**: `boolean`
+
+**Default**: `True`
+
+**TOML dotted key path**: `server.services.task_run_recorder.enabled`
+
+**Supported environment variables**:
+`PREFECT_SERVER_SERVICES_TASK_RUN_RECORDER_ENABLED`, `PREFECT_API_SERVICES_TASK_RUN_RECORDER_ENABLED`
+
+---
+## ServerServicesTriggersSettings
+Settings for controlling the triggers service
+### `enabled`
+Whether or not to start the triggers service in the server application.
+
+**Type**: `boolean`
+
+**Default**: `True`
+
+**TOML dotted key path**: `server.services.triggers.enabled`
+
+**Supported environment variables**:
+`PREFECT_SERVER_SERVICES_TRIGGERS_ENABLED`, `PREFECT_API_SERVICES_TRIGGERS_ENABLED`
+
+---
+## ServerSettings
+Settings for controlling server behavior
+### `logging_level`
+The default logging level for the Prefect API server.
+
+**Type**: `string`
+
+**Default**: `WARNING`
+
+**Constraints**:
+- Allowed values: 'DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'
+
+**TOML dotted key path**: `server.logging_level`
+
+**Supported environment variables**:
+`PREFECT_SERVER_LOGGING_LEVEL`, `PREFECT_LOGGING_SERVER_LEVEL`
+
+### `analytics_enabled`
+
+ When enabled, Prefect sends anonymous data (e.g. count of flow runs, package version)
+ on server startup to help us improve our product.
+
+
+**Type**: `boolean`
+
+**Default**: `True`
+
+**TOML dotted key path**: `server.analytics_enabled`
+
+**Supported environment variables**:
+`PREFECT_SERVER_ANALYTICS_ENABLED`
+
+### `metrics_enabled`
+Whether or not to enable Prometheus metrics in the API.
+
+**Type**: `boolean`
+
+**Default**: `False`
+
+**TOML dotted key path**: `server.metrics_enabled`
+
+**Supported environment variables**:
+`PREFECT_SERVER_METRICS_ENABLED`, `PREFECT_API_ENABLE_METRICS`
+
+### `log_retryable_errors`
+If `True`, log retryable errors in the API and it's services.
+
+**Type**: `boolean`
+
+**Default**: `False`
+
+**TOML dotted key path**: `server.log_retryable_errors`
+
+**Supported environment variables**:
+`PREFECT_SERVER_LOG_RETRYABLE_ERRORS`, `PREFECT_API_LOG_RETRYABLE_ERRORS`
+
+### `register_blocks_on_start`
+If set, any block types that have been imported will be registered with the backend on application startup. If not set, block types must be manually registered.
+
+**Type**: `boolean`
+
+**Default**: `True`
+
+**TOML dotted key path**: `server.register_blocks_on_start`
+
+**Supported environment variables**:
+`PREFECT_SERVER_REGISTER_BLOCKS_ON_START`, `PREFECT_API_BLOCKS_REGISTER_ON_START`
+
+### `memoize_block_auto_registration`
+Controls whether or not block auto-registration on start
+
+**Type**: `boolean`
+
+**Default**: `True`
+
+**TOML dotted key path**: `server.memoize_block_auto_registration`
+
+**Supported environment variables**:
+`PREFECT_SERVER_MEMOIZE_BLOCK_AUTO_REGISTRATION`, `PREFECT_MEMOIZE_BLOCK_AUTO_REGISTRATION`
+
+### `memo_store_path`
+The path to the memo store file.
+
+**Type**: `string | None`
+
+**Default**: `None`
+
+**TOML dotted key path**: `server.memo_store_path`
+
+**Supported environment variables**:
+`PREFECT_SERVER_MEMO_STORE_PATH`, `PREFECT_MEMO_STORE_PATH`
+
+### `deployment_schedule_max_scheduled_runs`
+The maximum number of scheduled runs to create for a deployment.
+
+**Type**: `integer`
+
+**Default**: `50`
+
+**TOML dotted key path**: `server.deployment_schedule_max_scheduled_runs`
+
+**Supported environment variables**:
+`PREFECT_SERVER_DEPLOYMENT_SCHEDULE_MAX_SCHEDULED_RUNS`, `PREFECT_DEPLOYMENT_SCHEDULE_MAX_SCHEDULED_RUNS`
+
+### `api`
+
+**Type**: [ServerAPISettings](#serverapisettings)
+
+**TOML dotted key path**: `server.api`
+
+### `database`
+
+**Type**: [ServerDatabaseSettings](#serverdatabasesettings)
+
+**TOML dotted key path**: `server.database`
+
+### `deployments`
+Settings for controlling server deployments behavior
+
+**Type**: [ServerDeploymentsSettings](#serverdeploymentssettings)
+
+**TOML dotted key path**: `server.deployments`
+
+### `ephemeral`
+
+**Type**: [ServerEphemeralSettings](#serverephemeralsettings)
+
+**TOML dotted key path**: `server.ephemeral`
+
+### `events`
+Settings for controlling server events behavior
+
+**Type**: [ServerEventsSettings](#servereventssettings)
+
+**TOML dotted key path**: `server.events`
+
+### `flow_run_graph`
+Settings for controlling flow run graph behavior
+
+**Type**: [ServerFlowRunGraphSettings](#serverflowrungraphsettings)
+
+**TOML dotted key path**: `server.flow_run_graph`
+
+### `services`
+Settings for controlling server services behavior
+
+**Type**: [ServerServicesSettings](#serverservicessettings)
+
+**TOML dotted key path**: `server.services`
+
+### `tasks`
+Settings for controlling server tasks behavior
+
+**Type**: [ServerTasksSettings](#servertaskssettings)
+
+**TOML dotted key path**: `server.tasks`
+
+### `ui`
+Settings for controlling server UI behavior
+
+**Type**: [ServerUISettings](#serveruisettings)
+
+**TOML dotted key path**: `server.ui`
+
+---
+## ServerTasksSchedulingSettings
+Settings for controlling server-side behavior related to task scheduling
+### `max_scheduled_queue_size`
+The maximum number of scheduled tasks to queue for submission.
+
+**Type**: `integer`
+
+**Default**: `1000`
+
+**TOML dotted key path**: `server.tasks.scheduling.max_scheduled_queue_size`
+
+**Supported environment variables**:
+`PREFECT_SERVER_TASKS_SCHEDULING_MAX_SCHEDULED_QUEUE_SIZE`, `PREFECT_TASK_SCHEDULING_MAX_SCHEDULED_QUEUE_SIZE`
+
+### `max_retry_queue_size`
+The maximum number of retries to queue for submission.
+
+**Type**: `integer`
+
+**Default**: `100`
+
+**TOML dotted key path**: `server.tasks.scheduling.max_retry_queue_size`
+
+**Supported environment variables**:
+`PREFECT_SERVER_TASKS_SCHEDULING_MAX_RETRY_QUEUE_SIZE`, `PREFECT_TASK_SCHEDULING_MAX_RETRY_QUEUE_SIZE`
+
+### `pending_task_timeout`
+How long before a PENDING task are made available to another task worker.
+
+**Type**: `string`
+
+**Default**: `PT0S`
+
+**TOML dotted key path**: `server.tasks.scheduling.pending_task_timeout`
+
+**Supported environment variables**:
+`PREFECT_SERVER_TASKS_SCHEDULING_PENDING_TASK_TIMEOUT`, `PREFECT_TASK_SCHEDULING_PENDING_TASK_TIMEOUT`
+
+---
+## ServerTasksSettings
+Settings for controlling server-side behavior related to tasks
+### `tag_concurrency_slot_wait_seconds`
+The number of seconds to wait before retrying when a task run cannot secure a concurrency slot from the server.
+
+**Type**: `number`
+
+**Default**: `30`
+
+**Constraints**:
+- Minimum: 0.0
+
+**TOML dotted key path**: `server.tasks.tag_concurrency_slot_wait_seconds`
+
+**Supported environment variables**:
+`PREFECT_SERVER_TASKS_TAG_CONCURRENCY_SLOT_WAIT_SECONDS`, `PREFECT_TASK_RUN_TAG_CONCURRENCY_SLOT_WAIT_SECONDS`
+
+### `max_cache_key_length`
+The maximum number of characters allowed for a task run cache key.
+
+**Type**: `integer`
+
+**Default**: `2000`
+
+**TOML dotted key path**: `server.tasks.max_cache_key_length`
+
+**Supported environment variables**:
+`PREFECT_SERVER_TASKS_MAX_CACHE_KEY_LENGTH`, `PREFECT_API_TASK_CACHE_KEY_MAX_LENGTH`
+
+### `scheduling`
+
+**Type**: [ServerTasksSchedulingSettings](#servertasksschedulingsettings)
+
+**TOML dotted key path**: `server.tasks.scheduling`
+
+---
+## ServerUISettings
+### `enabled`
+Whether or not to serve the Prefect UI.
+
+**Type**: `boolean`
+
+**Default**: `True`
+
+**TOML dotted key path**: `server.ui.enabled`
+
+**Supported environment variables**:
+`PREFECT_SERVER_UI_ENABLED`, `PREFECT_UI_ENABLED`
+
+### `api_url`
+The connection url for communication from the UI to the API. Defaults to `PREFECT_API_URL` if set. Otherwise, the default URL is generated from `PREFECT_SERVER_API_HOST` and `PREFECT_SERVER_API_PORT`.
+
+**Type**: `string | None`
+
+**Default**: `None`
+
+**TOML dotted key path**: `server.ui.api_url`
+
+**Supported environment variables**:
+`PREFECT_SERVER_UI_API_URL`, `PREFECT_UI_API_URL`
+
+### `serve_base`
+The base URL path to serve the Prefect UI from.
+
+**Type**: `string`
+
+**Default**: `/`
+
+**TOML dotted key path**: `server.ui.serve_base`
+
+**Supported environment variables**:
+`PREFECT_SERVER_UI_SERVE_BASE`, `PREFECT_UI_SERVE_BASE`
+
+### `static_directory`
+The directory to serve static files from. This should be used when running into permissions issues when attempting to serve the UI from the default directory (for example when running in a Docker container).
+
+**Type**: `string | None`
+
+**Default**: `None`
+
+**TOML dotted key path**: `server.ui.static_directory`
+
+**Supported environment variables**:
+`PREFECT_SERVER_UI_STATIC_DIRECTORY`, `PREFECT_UI_STATIC_DIRECTORY`
+
+---
+## TasksRunnerSettings
+### `thread_pool_max_workers`
+The maximum number of workers for ThreadPoolTaskRunner.
+
+**Type**: `integer | None`
+
+**Default**: `None`
+
+**TOML dotted key path**: `tasks.runner.thread_pool_max_workers`
+
+**Supported environment variables**:
+`PREFECT_TASKS_RUNNER_THREAD_POOL_MAX_WORKERS`, `PREFECT_TASK_RUNNER_THREAD_POOL_MAX_WORKERS`
+
+---
+## TasksSchedulingSettings
+### `default_storage_block`
+The `block-type/block-document` slug of a block to use as the default storage for autonomous tasks.
+
+**Type**: `string | None`
+
+**Default**: `None`
+
+**TOML dotted key path**: `tasks.scheduling.default_storage_block`
+
+**Supported environment variables**:
+`PREFECT_TASKS_SCHEDULING_DEFAULT_STORAGE_BLOCK`, `PREFECT_TASK_SCHEDULING_DEFAULT_STORAGE_BLOCK`
+
+### `delete_failed_submissions`
+Whether or not to delete failed task submissions from the database.
+
+**Type**: `boolean`
+
+**Default**: `True`
+
+**TOML dotted key path**: `tasks.scheduling.delete_failed_submissions`
+
+**Supported environment variables**:
+`PREFECT_TASKS_SCHEDULING_DELETE_FAILED_SUBMISSIONS`, `PREFECT_TASK_SCHEDULING_DELETE_FAILED_SUBMISSIONS`
+
+---
+## TasksSettings
+### `refresh_cache`
+If `True`, enables a refresh of cached results: re-executing the task will refresh the cached results.
+
+**Type**: `boolean`
+
+**Default**: `False`
+
+**TOML dotted key path**: `tasks.refresh_cache`
+
+**Supported environment variables**:
+`PREFECT_TASKS_REFRESH_CACHE`
+
+### `default_retries`
+This value sets the default number of retries for all tasks.
+
+**Type**: `integer`
+
+**Default**: `0`
+
+**Constraints**:
+- Minimum: 0
+
+**TOML dotted key path**: `tasks.default_retries`
+
+**Supported environment variables**:
+`PREFECT_TASKS_DEFAULT_RETRIES`, `PREFECT_TASK_DEFAULT_RETRIES`
+
+### `default_retry_delay_seconds`
+This value sets the default retry delay seconds for all tasks.
+
+**Type**: `integer | number | array`
+
+**Default**: `0`
+
+**TOML dotted key path**: `tasks.default_retry_delay_seconds`
+
+**Supported environment variables**:
+`PREFECT_TASKS_DEFAULT_RETRY_DELAY_SECONDS`, `PREFECT_TASK_DEFAULT_RETRY_DELAY_SECONDS`
+
+### `default_persist_result`
+If `True`, results will be persisted by default for all tasks. Set to `False` to disable persistence by default. Note that setting to `False` will override the behavior set by a parent flow or task.
+
+**Type**: `boolean | None`
+
+**Default**: `None`
+
+**TOML dotted key path**: `tasks.default_persist_result`
+
+**Supported environment variables**:
+`PREFECT_TASKS_DEFAULT_PERSIST_RESULT`
+
+### `runner`
+Settings for controlling task runner behavior
+
+**Type**: [TasksRunnerSettings](#tasksrunnersettings)
+
+**TOML dotted key path**: `tasks.runner`
+
+### `scheduling`
+Settings for controlling client-side task scheduling behavior
+
+**Type**: [TasksSchedulingSettings](#tasksschedulingsettings)
+
+**TOML dotted key path**: `tasks.scheduling`
+
+---
+## TestingSettings
+### `test_mode`
+If `True`, places the API in test mode. This may modify behavior to facilitate testing.
+
+**Type**: `boolean`
+
+**Default**: `False`
+
+**TOML dotted key path**: `testing.test_mode`
+
+**Supported environment variables**:
+`PREFECT_TESTING_TEST_MODE`, `PREFECT_TEST_MODE`
+
+### `unit_test_mode`
+This setting only exists to facilitate unit testing. If `True`, code is executing in a unit test context. Defaults to `False`.
+
+**Type**: `boolean`
+
+**Default**: `False`
+
+**TOML dotted key path**: `testing.unit_test_mode`
+
+**Supported environment variables**:
+`PREFECT_TESTING_UNIT_TEST_MODE`, `PREFECT_UNIT_TEST_MODE`
+
+### `unit_test_loop_debug`
+If `True` turns on debug mode for the unit testing event loop.
+
+**Type**: `boolean`
+
+**Default**: `True`
+
+**TOML dotted key path**: `testing.unit_test_loop_debug`
+
+**Supported environment variables**:
+`PREFECT_TESTING_UNIT_TEST_LOOP_DEBUG`, `PREFECT_UNIT_TEST_LOOP_DEBUG`
+
+### `test_setting`
+This setting only exists to facilitate unit testing. If in test mode, this setting will return its value. Otherwise, it returns `None`.
+
+**Type**: `None`
+
+**Default**: `FOO`
+
+**TOML dotted key path**: `testing.test_setting`
+
+**Supported environment variables**:
+`PREFECT_TESTING_TEST_SETTING`, `PREFECT_TEST_SETTING`
+
+---
+## WorkerSettings
+### `heartbeat_seconds`
+Number of seconds a worker should wait between sending a heartbeat.
+
+**Type**: `number`
+
+**Default**: `30`
+
+**TOML dotted key path**: `worker.heartbeat_seconds`
+
+**Supported environment variables**:
+`PREFECT_WORKER_HEARTBEAT_SECONDS`
+
+### `query_seconds`
+Number of seconds a worker should wait between queries for scheduled work.
+
+**Type**: `number`
+
+**Default**: `10`
+
+**TOML dotted key path**: `worker.query_seconds`
+
+**Supported environment variables**:
+`PREFECT_WORKER_QUERY_SECONDS`
+
+### `prefetch_seconds`
+The number of seconds into the future a worker should query for scheduled work.
+
+**Type**: `number`
+
+**Default**: `10`
+
+**TOML dotted key path**: `worker.prefetch_seconds`
+
+**Supported environment variables**:
+`PREFECT_WORKER_PREFETCH_SECONDS`
+
+### `webserver`
+Settings for a worker's webserver
+
+**Type**: [WorkerWebserverSettings](#workerwebserversettings)
+
+**TOML dotted key path**: `worker.webserver`
+
+---
+## WorkerWebserverSettings
+### `host`
+The host address the worker's webserver should bind to.
+
+**Type**: `string`
+
+**Default**: `0.0.0.0`
+
+**TOML dotted key path**: `worker.webserver.host`
+
+**Supported environment variables**:
+`PREFECT_WORKER_WEBSERVER_HOST`
+
+### `port`
+The port the worker's webserver should bind to.
+
+**Type**: `integer`
+
+**Default**: `8080`
+
+**TOML dotted key path**: `worker.webserver.port`
+
+**Supported environment variables**:
+`PREFECT_WORKER_WEBSERVER_PORT`
+
+---
\ No newline at end of file
diff --git a/docs/3.0/develop/task-caching.mdx b/docs/v3/develop/task-caching.mdx
similarity index 96%
rename from docs/3.0/develop/task-caching.mdx
rename to docs/v3/develop/task-caching.mdx
index a163c3691c17..bf2d23be413b 100644
--- a/docs/3.0/develop/task-caching.mdx
+++ b/docs/v3/develop/task-caching.mdx
@@ -5,7 +5,7 @@ description: Learn how to use caching to gain efficiency and pipeline idempotenc
Caching refers to the ability of a task run to enter a `Completed` state and return a predetermined
value without actually running the code that defines the task.
-Caching allows you to efficiently reuse [results of tasks](/3.0/develop/results/) that may be expensive to compute
+Caching allows you to efficiently reuse [results of tasks](/v3/develop/results/) that may be expensive to compute
and ensure that your pipelines are idempotent when retrying them due to unexpected failure.
By default Prefect's caching logic is based on the following attributes of a task invocation:
@@ -16,7 +16,7 @@ By default Prefect's caching logic is based on the following attributes of a tas
These values are hashed to compute the task's _cache key_.
This implies that, by default, calling the same task with the same inputs more than once within a flow
will result in cached behavior for all calls after the first.
-This behavior can be configured - see [customizing the cache](/3.0/develop/write-tasks#customizing-the-cache) below.
+This behavior can be configured - see [customizing the cache](/v3/develop/write-tasks#customizing-the-cache) below.
**Caching requires result persistence**
@@ -28,8 +28,8 @@ To turn on result persistence for all of your tasks use the `PREFECT_RESULTS_PER
prefect config set PREFECT_RESULTS_PERSIST_BY_DEFAULT=true
```
-See [managing results](/3.0/develop/results/) for more details on managing your result configuration, and
-[settings](/3.0/manage/settings-and-profiles) for more details on managing Prefect settings.
+See [managing results](/v3/develop/results/) for more details on managing your result configuration, and
+[settings](/v3/develop/settings-and-profiles) for more details on managing Prefect settings.
## Cache keys
@@ -364,7 +364,7 @@ def my_cached_task(x: int):
There are many situations in which multiple tasks need to always run together or not at all.
This can be achieved in Prefect by configuring these tasks to always write to their caches within
-a single [_transaction_](/3.0/develop/transactions).
+a single [_transaction_](/v3/develop/transactions).
```python
from prefect import task, flow
@@ -399,7 +399,7 @@ the `process_data` task succeeds as well.
This ensures that anytime you need to rerun this flow both `load_data` and `process_data` are executed
together.
After a successful execution both tasks will be cached until the cache key is updated.
-Read more about [transactions](/3.0/develop/transactions).
+Read more about [transactions](/v3/develop/transactions).
## Caching example
@@ -473,7 +473,7 @@ particularly useful when you have a flow that is responsible for updating the ca
To refresh the cache for all tasks, use the `PREFECT_TASKS_REFRESH_CACHE` setting.
Setting `PREFECT_TASKS_REFRESH_CACHE=true` changes the default behavior of all tasks to refresh.
This is particularly useful to rerun a flow without cached results.
-See [settings](/3.0/manage/settings-and-profiles) for more details on managing Prefect settings.
+See [settings](/v3/develop/settings-and-profiles) for more details on managing Prefect settings.
If you have tasks that should not refresh when this setting is enabled, you may explicitly set `refresh_cache`
to `False`. These tasks will never refresh the cache. If a cache key exists it will be read, not updated.
diff --git a/docs/3.0/develop/task-run-limits.mdx b/docs/v3/develop/task-run-limits.mdx
similarity index 91%
rename from docs/3.0/develop/task-run-limits.mdx
rename to docs/v3/develop/task-run-limits.mdx
index 5adbbb7ef6ff..3bb97b63ca16 100644
--- a/docs/3.0/develop/task-run-limits.mdx
+++ b/docs/v3/develop/task-run-limits.mdx
@@ -10,7 +10,7 @@ Task run concurrency limits use [task tags](#tags). You can specify an optional
task runs in a `Running` state for tasks with a given tag.
-Tag-based task concurrency is different from [Global concurrency limits](/3.0/develop/global-concurrency-limits), though they can be used to achieve similar outcomes. Global concurrency limits are a more general way to control concurrency for any Python-based operation, whereas tag-based concurrency limits are specific to Prefect tasks.
+Tag-based task concurrency is different from [Global concurrency limits](/v3/develop/global-concurrency-limits), though they can be used to achieve similar outcomes. Global concurrency limits are a more general way to control concurrency for any Python-based operation, whereas tag-based concurrency limits are specific to Prefect tasks.
If a task has multiple tags, it will run only if **_all_** tags have available concurrency.
@@ -19,7 +19,7 @@ Tags without specified concurrency limits are treated as unlimited. Setting a ta
### Execution behavior
-Task tag limits are checked whenever a task run attempts to enter a [`Running` state](/3.0/develop/manage-states/).
+Task tag limits are checked whenever a task run attempts to enter a [`Running` state](/v3/develop/manage-states/).
If there are no concurrency slots available for any one of your task's tags, it delays the transition to a `Running` state
and instructs the client to try entering a `Running` state again in 30 seconds
@@ -30,7 +30,7 @@ and instructs the client to try entering a `Running` state again in 30 seconds
**Flow run concurrency limits are set at a work pool, work queue, or deployment level**
-While task run concurrency limits are configured through tags (as shown below), [flow run concurrency limits](/3.0/develop/global-concurrency-limits) are configured through work pools and/or work queues.
+While task run concurrency limits are configured through tags (as shown below), [flow run concurrency limits](/v3/develop/global-concurrency-limits) are configured through work pools and/or work queues.
You can set concurrency limits on as few or as many tags as you wish. You can set limits through:
diff --git a/docs/3.0/develop/task-runners.mdx b/docs/v3/develop/task-runners.mdx
similarity index 98%
rename from docs/3.0/develop/task-runners.mdx
rename to docs/v3/develop/task-runners.mdx
index c1b33f16ca66..5c27e814dd67 100644
--- a/docs/3.0/develop/task-runners.mdx
+++ b/docs/v3/develop/task-runners.mdx
@@ -291,7 +291,7 @@ assert resulting_sum == [10, 11, 12]
## Use multiple task runners
Each flow can only have one task runner, but sometimes you may want a subset of your tasks to run using a different task runner than the one configured on the flow.
-In this case, you can create [nested flows](/3.0/develop/write-flows/#composing-flows) for tasks that need to use a different task runner.
+In this case, you can create [nested flows](/v3/develop/write-flows/#composing-flows) for tasks that need to use a different task runner.
For example, you can have a flow (in the example below called `multiple_runner_flow`) that runs its tasks locally using the `ThreadPoolTaskRunner`.
If you have some tasks that can run more efficiently in parallel on a Dask cluster, you can create a nested flow (such as `dask_nested_flow`) to run those tasks using the `DaskTaskRunner`.
diff --git a/docs/3.0/develop/test-workflows.mdx b/docs/v3/develop/test-workflows.mdx
similarity index 100%
rename from docs/3.0/develop/test-workflows.mdx
rename to docs/v3/develop/test-workflows.mdx
diff --git a/docs/3.0/develop/transactions.mdx b/docs/v3/develop/transactions.mdx
similarity index 99%
rename from docs/3.0/develop/transactions.mdx
rename to docs/v3/develop/transactions.mdx
index 00d30c144e65..3948755ed8fb 100644
--- a/docs/3.0/develop/transactions.mdx
+++ b/docs/v3/develop/transactions.mdx
@@ -13,7 +13,7 @@ These records can be shared across tasks and flows.
Under the hood, every Prefect task run is governed by a transaction.
In the default mode of task execution, all you need to understand about transactions are [the policies
-determining the task's cache key computation](/3.0/develop/task-caching).
+determining the task's cache key computation](/v3/develop/task-caching).
**Transactions and states**
diff --git a/docs/3.0/develop/variables.mdx b/docs/v3/develop/variables.mdx
similarity index 100%
rename from docs/3.0/develop/variables.mdx
rename to docs/v3/develop/variables.mdx
diff --git a/docs/3.0/develop/write-flows.mdx b/docs/v3/develop/write-flows.mdx
similarity index 93%
rename from docs/3.0/develop/write-flows.mdx
rename to docs/v3/develop/write-flows.mdx
index d61574683d8b..123e75cb66eb 100644
--- a/docs/3.0/develop/write-flows.mdx
+++ b/docs/v3/develop/write-flows.mdx
@@ -25,12 +25,12 @@ if __name__ == "__main__":
When a function becomes a flow, it gains the following capabilities:
- Metadata about [flow runs](#flow-runs), such as run time and final state, is automatically tracked.
-- Each [state](/3.0/develop/manage-states/) the flow enters is recorded. This
-allows you to observe and [act upon each transition](/3.0/develop/manage-states#execute-code-on-state-changes) in the flow's execution.
+- Each [state](/v3/develop/manage-states/) the flow enters is recorded. This
+allows you to observe and [act upon each transition](/v3/develop/manage-states#execute-code-on-state-changes) in the flow's execution.
- Input arguments can be type validated as workflow [parameters](/#specify-flow-parameters).
- [Retries](#retries) can be performed on failure, with configurable delay and retry limits.
- Timeouts can be enforced to prevent unintentional, long-running workflows.
-- A flow can be [deployed](/3.0/deploy/infrastructure-examples/docker/), which exposes an API for interacting with it remotely.
+- A flow can be [deployed](/v3/deploy/infrastructure-examples/docker/), which exposes an API for interacting with it remotely.
Flows are uniquely identified by name.
You can provide a `name` parameter value for the flow:
@@ -54,11 +54,11 @@ Create a flow run by calling a flow by its function name, just as you would a no
You can also create a flow run by:
- Using external schedulers such as `cron` to invoke a flow function
-- Triggering a [deployment](/3.0/deploy/infrastructure-examples/docker/) of that flow in Prefect Cloud or self-hosted Prefect server
+- Triggering a [deployment](/v3/deploy/infrastructure-examples/docker/) of that flow in Prefect Cloud or self-hosted Prefect server
- Starting a flow run for the deployment through a schedule, the Prefect UI, or the Prefect API
However you run your flow, Prefect monitors the flow run, capturing its state for observability.
-You can log a [variety of metadata](/3.0/develop/logging) about flow runs for monitoring, troubleshooting, and auditing purposes.
+You can log a [variety of metadata](/v3/develop/logging) about flow runs for monitoring, troubleshooting, and auditing purposes.
The example below uses the HTTPX client library to fetch statistics about the [main Prefect repository](https://github.com/PrefectHQ/prefect).
@@ -95,7 +95,7 @@ Forks 🍴 : 1245
## Specify flow parameters
As with any Python function, you can pass arguments to a flow, including both positional and keyword arguments.
-These arguments defined on your flow function are called [parameters](/3.0/develop/write-flows/#parameters).
+These arguments defined on your flow function are called [parameters](/v3/develop/write-flows/#parameters).
They are stored by the Prefect orchestration engine on the flow run object.
Prefect automatically performs type conversion of inputs using any provided type hints.
@@ -158,7 +158,7 @@ Flow run received invalid parameters:
- model.a: Input should be a valid integer, unable to parse string as an integer
```
-Note that you can provide parameter values to a flow through the API using a [deployment](/3.0/deploy/).
+Note that you can provide parameter values to a flow through the API using a [deployment](/v3/deploy/).
Flow run parameters sent to the API are coerced to the appropriate types when possible.
@@ -177,7 +177,7 @@ Flow run parameters cannot exceed `512kb` in size.
## Compose flows
-Flows can call [tasks](/3.0/develop/write-tasks), the most granular units of orchestrated work in Prefect workflows:
+Flows can call [tasks](/v3/develop/write-tasks), the most granular units of orchestrated work in Prefect workflows:
```python
from prefect import flow, task
@@ -212,7 +212,7 @@ There is a full representation of the nested flow run in the backend as if it ha
Nested flow runs differ from normal flow runs in that they resolve any passed task futures into data.
This allows data to be passed from the parent flow run to a nested flow run easily.
-When a nested flow run starts, it creates a new [task runner](/3.0/develop/task-runners/) for any tasks it contains.
+When a nested flow run starts, it creates a new [task runner](/v3/develop/task-runners/) for any tasks it contains.
When the nested flow run completes, the task runner shuts down.
Nested flow runs block execution of the parent flow run until completion.
However, asynchronous nested flows can run concurrently with [AnyIO task groups](https://anyio.readthedocs.io/en/stable/tasks.html) or [asyncio.gather](https://docs.python.org/3/library/asyncio-task.html#id6).
@@ -222,7 +222,7 @@ The `state_details` field of the task run representing the child flow run includ
The `state_details` field of the nested flow run includes a `parent_task_run_id`.
You can define multiple flows within the same file.
-Whether running locally or through a [deployment](/3.0/deploy/infrastructure-examples/docker/), you must indicate which flow is the entrypoint for a flow run.
+Whether running locally or through a [deployment](/v3/deploy/infrastructure-examples/docker/), you must indicate which flow is the entrypoint for a flow run.
**Cancel nested flow runs**
@@ -448,7 +448,7 @@ list(generator()) # prints 'Generator consumed!'
A _flow run_ is a single execution of a flow.
You can create a flow run by calling the flow function manually, or even by using an external scheduler such as `cron` to invoke a flow function.
-Most users run flows by creating a [deployment](/3.0/deploy/) on Prefect Cloud or Prefect server and then scheduling a flow run for the deployment through a schedule, the Prefect UI, or the Prefect API.
+Most users run flows by creating a [deployment](/v3/deploy/) on Prefect Cloud or Prefect server and then scheduling a flow run for the deployment through a schedule, the Prefect UI, or the Prefect API.
However you run a flow, the Prefect API monitors the flow run and records information for monitoring, troubleshooting, and auditing.
@@ -463,7 +463,7 @@ All flows can be configured by passing arguments to the decorator. Flows accept
| `retries` | An optional number of times to retry on flow run failure. |
| `retry_delay_seconds` | An optional number of seconds to wait before retrying the flow after failure. This is only applicable if `retries` is nonzero. |
| `flow_run_name` | An optional name to distinguish runs of this flow; this name can be provided as a string template with the flow's parameters as variables; you can also provide this name as a function that returns a string. |
-| `task_runner` | An optional [task runner](/3.0/develop/task-runners/) to use for task execution within the flow when you `.submit()` tasks. If not provided and you `.submit()` tasks, the `ThreadPoolTaskRunner` is used. |
+| `task_runner` | An optional [task runner](/v3/develop/task-runners/) to use for task execution within the flow when you `.submit()` tasks. If not provided and you `.submit()` tasks, the `ThreadPoolTaskRunner` is used. |
| `timeout_seconds` | An optional number of seconds indicating a maximum runtime for the flow. If the flow exceeds this runtime, it is marked as failed. Flow execution may continue until the next task is called. |
| `validate_parameters` | Boolean indicating whether parameters passed to flows are validated by Pydantic. Default is `True`. |
| `version` | An optional version string for the flow. If not provided, we will attempt to create a version string as a hash of the file containing the wrapped function. If the file cannot be located, the version will be null. |
@@ -562,7 +562,7 @@ If set to `False`, no validation is performed on flow parameters.
## Final state determination
A state is a record of the status of a particular task run or flow run.
-See the [manage states](/3.0/develop/manage-states) page for more information.
+See the [manage states](/v3/develop/manage-states) page for more information.
import FinalFlowState from '/snippets/final-flow-state.mdx'
@@ -750,9 +750,9 @@ If the flow run fails on the final retry, Prefect records the final flow run sta
Optionally, pass an integer to `retry_delay_seconds` to specify how many seconds to wait between each retry attempt.
-Check out [Transactions](/3.0/develop/transactions/) to make your flows even more resilient and rollback actions when desired.
+Check out [Transactions](/v3/develop/transactions/) to make your flows even more resilient and rollback actions when desired.
## See also
-- Store and reuse non-sensitive bits of data, such as configuration information, by using [variables](/3.0/develop/variables).
-- Make your flow more manageable, performant, and observable by breaking it into discrete units of orchestrated work with [tasks](/3.0/develop/write-tasks/).
+- Store and reuse non-sensitive bits of data, such as configuration information, by using [variables](/v3/develop/variables).
+- Make your flow more manageable, performant, and observable by breaking it into discrete units of orchestrated work with [tasks](/v3/develop/write-tasks/).
diff --git a/docs/3.0/develop/write-tasks.mdx b/docs/v3/develop/write-tasks.mdx
similarity index 98%
rename from docs/3.0/develop/write-tasks.mdx
rename to docs/v3/develop/write-tasks.mdx
index 71ab464c1954..22510ce51421 100644
--- a/docs/3.0/develop/write-tasks.mdx
+++ b/docs/v3/develop/write-tasks.mdx
@@ -11,7 +11,7 @@ Tasks can:
- Cache their execution across invocations
- Encapsulate workflow logic into reusable units across flows
- Receive metadata about upstream task dependencies and their state before running
-- Use automatic [logging](/3.0/develop/logging/) to capture runtime details, tags,
+- Use automatic [logging](/v3/develop/logging/) to capture runtime details, tags,
and final state
- Execute concurrently
- Be defined in the same file as the flow or imported from modules
@@ -20,8 +20,8 @@ and final state
Flows and tasks share some common features:
- They can be defined using their respective decorator, which accepts configuration settings
-(see all [task settings](/3.0/develop/write-tasks/#task-configuration) and
-[flow settings](/3.0/develop/write-flows/#flow-settings))
+(see all [task settings](/v3/develop/write-tasks/#task-configuration) and
+[flow settings](/v3/develop/write-flows/#flow-settings))
- They can have a name, description, and tags for organization and bookkeeping
- They provide retries, timeouts, and other hooks to handle failure and completion events
@@ -91,7 +91,7 @@ Almost any standard Python function can be turned into a Prefect task by adding
Prefect uses client-side task run orchestration by default, which significantly improves performance, especially for workflows with many tasks. Task creation and state updates happen locally, reducing API calls to the Prefect server during execution. This enables efficient handling of large-scale workflows and improves reliability when server connectivity is intermittent.
-Tasks are always executed in the main thread by default, unless a specific [task runner](/3.0/develop/task-runners) is used to execute them on different threads, processes, or infrastructure. This facilitates native Python debugging and profiling.
+Tasks are always executed in the main thread by default, unless a specific [task runner](/v3/develop/task-runners) is used to execute them on different threads, processes, or infrastructure. This facilitates native Python debugging and profiling.
Task updates are logged in batch, leading to eventual consistency for task states in the UI and API queries. While this means there may be a slight delay in seeing the most up-to-date task states, it allows for substantial performance improvements and increased workflow scale.
@@ -454,7 +454,7 @@ def my_flow(name: str):
Tags are optional string labels that enable you to identify and group tasks other than by name or flow.
Tags are useful to:
-- Filter task runs by tag in the UI and through the [Prefect REST API](/3.0/api-ref/rest-api/#filtering).
+- Filter task runs by tag in the UI and through the [Prefect REST API](/v3/api-ref/rest-api/#filtering).
- Set [concurrency limits](#task-run-concurrency-limits) on task runs by tag.
You may specify tags as a keyword argument on the [task decorator](https://prefect-python-sdk-docs.netlify.app/prefect/tasks/#prefect.tasks.task).
diff --git a/docs/3.0/get-started/index.mdx b/docs/v3/get-started/index.mdx
similarity index 74%
rename from docs/3.0/get-started/index.mdx
rename to docs/v3/get-started/index.mdx
index eb060dffe1d2..174f780d133d 100644
--- a/docs/3.0/get-started/index.mdx
+++ b/docs/v3/get-started/index.mdx
@@ -24,13 +24,13 @@ if __name__ == "__main__":
## Get started
-
+
Learn how to schedule a script to run on remote infrastructure and observe its state.
-
+
Supercharge Prefect with enhanced governance, security, and performance capabilities.
-
+
Upgrade from Prefect 2 to Prefect 3 to get the latest features and performance enhancements.
@@ -38,13 +38,13 @@ if __name__ == "__main__":
## Start building
-
+
Write, run, configure, and observe workflows and their tasks, results, artifacts, and more.
-
+
Run workflows in local processes or deploy them to dynamically provisioned infrastructure.
-
+
Enable workflows to react to their environment with events, automations, and webhooks.
diff --git a/docs/3.0/get-started/install.mdx b/docs/v3/get-started/install.mdx
similarity index 86%
rename from docs/3.0/get-started/install.mdx
rename to docs/v3/get-started/install.mdx
index 2cf8b8c52440..a9aba320feb4 100644
--- a/docs/3.0/get-started/install.mdx
+++ b/docs/v3/get-started/install.mdx
@@ -59,10 +59,10 @@ pip install -U prefect-client
You also need an API server, either:
-* [Prefect Cloud](/3.0/manage/cloud/), a managed solution that provides strong scaling, performance, and security, or
-* [Self-hosted Prefect server](/3.0/manage/self-host/), an API server that you run on your own infrastructure where you are responsible for scaling and any authentication and authorization.
+* [Prefect Cloud](/v3/manage/cloud/), a managed solution that provides strong scaling, performance, and security, or
+* [Self-hosted Prefect server](/v3/manage/self-host/), an API server that you run on your own infrastructure where you are responsible for scaling and any authentication and authorization.
-Now that you have Prefect installed, go through the [quickstart](/3.0/get-started/quickstart/) to try it out.
+Now that you have Prefect installed, go through the [quickstart](/v3/get-started/quickstart/) to try it out.
Learn more about release versions in the [Prefect release notes](https://github.com/PrefectHQ/prefect/releases).
diff --git a/docs/v3/get-started/quickstart.mdx b/docs/v3/get-started/quickstart.mdx
new file mode 100644
index 000000000000..5a4ae499a924
--- /dev/null
+++ b/docs/v3/get-started/quickstart.mdx
@@ -0,0 +1,176 @@
+---
+title: Quickstart
+description: Get started with Prefect, the easiest way to orchestrate and observe your data pipelines
+---
+
+import Installation from '/snippets/installation.mdx'
+
+Prefect is an orchestration and observability platform that empowers developers to build and scale workflows quickly.
+In this quickstart, you will use Prefect to convert the following Python script to a deployable workflow.
+
+```python my_script.py
+import httpx
+
+
+def show_stars(github_repos: list[str]):
+ """Show the number of stars that GitHub repos have"""
+
+ for repo in github_repos:
+ repo_stats = fetch_stats(repo)
+ stars = get_stars(repo_stats)
+ print(f"{repo}: {stars} stars")
+
+
+def fetch_stats(github_repo: str):
+ """Fetch the statistics for a GitHub repo"""
+
+ return httpx.get(f"https://api.github.com/repos/{github_repo}").json()
+
+
+def get_stars(repo_stats: dict):
+ """Get the number of stars from GitHub repo statistics"""
+
+ return repo_stats['stargazers_count']
+
+
+if __name__ == "__main__":
+ show_stars([
+ "PrefectHQ/prefect",
+ "pydantic/pydantic",
+ "huggingface/transformers"
+ ])
+```
+
+## Install Prefect
+
+
+
+
+See [Install Prefect](/v3/get-started/install/) for more details on installation.
+
+
+## Connect to a Prefect API
+
+Connect to a Prefect API:
+
+
+
+1. Start a local API server:
+
+ ```bash
+ prefect server start
+ ```
+
+1. Open the Prefect dashboard in your browser at [http://localhost:4200](http://localhost:4200).
+
+
+1. Head to [https://app.prefect.cloud/](https://app.prefect.cloud/) and sign in or create a free Prefect Cloud account.
+1. Log in to Prefect Cloud from your development environment:
+
+ ```bash
+ prefect cloud login
+ ```
+
+1. Choose **Log in with a web browser** and click the **Authorize** button in the browser window that opens.
+
+Your CLI is now authenticated with your Prefect Cloud account through a locally stored API key that expires in 30 days.
+
+If you have any issues with browser-based authentication, you can [authenticate with a manually created API key](/v3/manage/cloud/manage-users/api-keys/) instead.
+
+
+
+## Convert your script to a Prefect workflow
+
+Decorators are the easiest way to convert a Python script into a workflow.
+
+1. Add a `@flow` decorator to the script's entrypoint.
+1. Add `@task` decorators to any methods called by the flow.
+
+This will create a [flow](/v3/develop/write-flows/) and corresponding [tasks](/v3/develop/write-tasks/).
+Tasks receive metadata about upstream dependencies and the state of those dependencies before they run.
+Prefect records these dependencies and states as it orchestrates these tasks.
+
+```python my_workflow.py
+import httpx
+
+from prefect import flow, task # Prefect flow and task decorators
+
+
+@flow(log_prints=True)
+def show_stars(github_repos: list[str]):
+ """Flow: Show the number of stars that GitHub repos have"""
+
+ for repo in github_repos:
+ # Call Task 1
+ repo_stats = fetch_stats(repo)
+
+ # Call Task 2
+ stars = get_stars(repo_stats)
+
+ # Print the result
+ print(f"{repo}: {stars} stars")
+
+
+@task
+def fetch_stats(github_repo: str):
+ """Task 1: Fetch the statistics for a GitHub repo"""
+
+ return httpx.get(f"https://api.github.com/repos/{github_repo}").json()
+
+
+@task
+def get_stars(repo_stats: dict):
+ """Task 2: Get the number of stars from GitHub repo statistics"""
+
+ return repo_stats['stargazers_count']
+
+
+# Run the flow
+if __name__ == "__main__":
+ show_stars([
+ "PrefectHQ/prefect",
+ "pydantic/pydantic",
+ "huggingface/transformers"
+ ])
+```
+
+
+The `log_prints=True` argument provided to the `@flow` decorator automatically converts any `print` statements within the function to `INFO` level logs.
+
+
+## Run your flow
+
+You can run your Prefect flow just as you would a Python script:
+
+```bash
+python my_workflow.py
+```
+
+The output in your terminal should look similar to this:
+
+```bash
+08:21:31.335 | INFO | prefect.engine - Created flow run 'attentive-kestrel' for flow 'show-stars'
+08:21:31.336 | INFO | prefect.engine - View at http://127.0.0.1:4200/runs/flow-run/edf6866f-371d-4e51-a9e3-556a525b1146
+08:21:31.731 | INFO | Task run 'fetch_stats-dce' - Finished in state Completed()
+08:21:31.775 | INFO | Task run 'get_stars-585' - Finished in state Completed()
+08:21:31.776 | INFO | Flow run 'attentive-kestrel' - PrefectHQ/prefect: 17318 stars
+08:21:32.089 | INFO | Task run 'fetch_stats-e16' - Finished in state Completed()
+08:21:32.118 | INFO | Task run 'get_stars-756' - Finished in state Completed()
+08:21:32.119 | INFO | Flow run 'attentive-kestrel' - pydantic/pydantic: 186318 stars
+08:21:32.409 | INFO | Task run 'fetch_stats-b62' - Finished in state Completed()
+08:21:32.440 | INFO | Task run 'get_stars-8ad' - Finished in state Completed()
+08:21:32.441 | INFO | Flow run 'attentive-kestrel' - huggingface/transformers: 134848 stars
+08:21:32.469 | INFO | Flow run 'attentive-kestrel' - Finished in state Completed()
+```
+
+Prefect automatically tracks the state of the flow run and logs the output, which can be viewed directly in the terminal or in the UI.
+
+## Next steps
+
+In this tutorial, you successfully converted a Python script to a deployable workflow tracked by Prefect.
+
+Next, get this workflow off of your laptop and [run it automatically on a schedule](/v3/tutorials/schedule).
+
+
+Need help? [Book a meeting](https://calendly.com/prefect-experts/prefect-product-advocates?utm_campaign=prefect_docs_cloud&utm_content=prefect_docs&utm_medium=docs&utm_source=docs) with a Prefect Product Advocate to get your questions answered.
+
diff --git a/docs/3.0/img/guides/automatic-task-dependencies.png b/docs/v3/img/guides/automatic-task-dependencies.png
similarity index 100%
rename from docs/3.0/img/guides/automatic-task-dependencies.png
rename to docs/v3/img/guides/automatic-task-dependencies.png
diff --git a/docs/3.0/img/guides/automation-custom.png b/docs/v3/img/guides/automation-custom.png
similarity index 100%
rename from docs/3.0/img/guides/automation-custom.png
rename to docs/v3/img/guides/automation-custom.png
diff --git a/docs/3.0/img/guides/automation-list.png b/docs/v3/img/guides/automation-list.png
similarity index 100%
rename from docs/3.0/img/guides/automation-list.png
rename to docs/v3/img/guides/automation-list.png
diff --git a/docs/3.0/img/guides/automation-triggers.png b/docs/v3/img/guides/automation-triggers.png
similarity index 100%
rename from docs/3.0/img/guides/automation-triggers.png
rename to docs/v3/img/guides/automation-triggers.png
diff --git a/docs/3.0/img/guides/block-list.png b/docs/v3/img/guides/block-list.png
similarity index 100%
rename from docs/3.0/img/guides/block-list.png
rename to docs/v3/img/guides/block-list.png
diff --git a/docs/3.0/img/guides/final-automation.png b/docs/v3/img/guides/final-automation.png
similarity index 100%
rename from docs/3.0/img/guides/final-automation.png
rename to docs/v3/img/guides/final-automation.png
diff --git a/docs/3.0/img/guides/flow-of-deployments.png b/docs/v3/img/guides/flow-of-deployments.png
similarity index 100%
rename from docs/3.0/img/guides/flow-of-deployments.png
rename to docs/v3/img/guides/flow-of-deployments.png
diff --git a/docs/3.0/img/guides/gcp-creds-block-setup.png b/docs/v3/img/guides/gcp-creds-block-setup.png
similarity index 100%
rename from docs/3.0/img/guides/gcp-creds-block-setup.png
rename to docs/v3/img/guides/gcp-creds-block-setup.png
diff --git a/docs/3.0/img/guides/gcr-service-account-setup.png b/docs/v3/img/guides/gcr-service-account-setup.png
similarity index 100%
rename from docs/3.0/img/guides/gcr-service-account-setup.png
rename to docs/v3/img/guides/gcr-service-account-setup.png
diff --git a/docs/3.0/img/guides/github-actions-trigger.png b/docs/v3/img/guides/github-actions-trigger.png
similarity index 100%
rename from docs/3.0/img/guides/github-actions-trigger.png
rename to docs/v3/img/guides/github-actions-trigger.png
diff --git a/docs/3.0/img/guides/github-secrets.png b/docs/v3/img/guides/github-secrets.png
similarity index 100%
rename from docs/3.0/img/guides/github-secrets.png
rename to docs/v3/img/guides/github-secrets.png
diff --git a/docs/3.0/img/guides/job-variables.png b/docs/v3/img/guides/job-variables.png
similarity index 100%
rename from docs/3.0/img/guides/job-variables.png
rename to docs/v3/img/guides/job-variables.png
diff --git a/docs/3.0/img/guides/manual-task-dependencies.png b/docs/v3/img/guides/manual-task-dependencies.png
similarity index 100%
rename from docs/3.0/img/guides/manual-task-dependencies.png
rename to docs/v3/img/guides/manual-task-dependencies.png
diff --git a/docs/3.0/img/guides/notification-block.png b/docs/v3/img/guides/notification-block.png
similarity index 100%
rename from docs/3.0/img/guides/notification-block.png
rename to docs/v3/img/guides/notification-block.png
diff --git a/docs/3.0/img/guides/notify-auto-block.png b/docs/v3/img/guides/notify-auto-block.png
similarity index 100%
rename from docs/3.0/img/guides/notify-auto-block.png
rename to docs/v3/img/guides/notify-auto-block.png
diff --git a/docs/3.0/img/guides/push-flow-running.png b/docs/v3/img/guides/push-flow-running.png
similarity index 100%
rename from docs/3.0/img/guides/push-flow-running.png
rename to docs/v3/img/guides/push-flow-running.png
diff --git a/docs/3.0/img/guides/webhook-automate.png b/docs/v3/img/guides/webhook-automate.png
similarity index 100%
rename from docs/3.0/img/guides/webhook-automate.png
rename to docs/v3/img/guides/webhook-automate.png
diff --git a/docs/3.0/img/guides/webhook-created.png b/docs/v3/img/guides/webhook-created.png
similarity index 100%
rename from docs/3.0/img/guides/webhook-created.png
rename to docs/v3/img/guides/webhook-created.png
diff --git a/docs/3.0/img/guides/webhook-simple.png b/docs/v3/img/guides/webhook-simple.png
similarity index 100%
rename from docs/3.0/img/guides/webhook-simple.png
rename to docs/v3/img/guides/webhook-simple.png
diff --git a/docs/3.0/img/integrations/alert.png b/docs/v3/img/integrations/alert.png
similarity index 100%
rename from docs/3.0/img/integrations/alert.png
rename to docs/v3/img/integrations/alert.png
diff --git a/docs/3.0/img/integrations/aws.png b/docs/v3/img/integrations/aws.png
similarity index 100%
rename from docs/3.0/img/integrations/aws.png
rename to docs/v3/img/integrations/aws.png
diff --git a/docs/3.0/img/integrations/azure.png b/docs/v3/img/integrations/azure.png
similarity index 100%
rename from docs/3.0/img/integrations/azure.png
rename to docs/v3/img/integrations/azure.png
diff --git a/docs/3.0/img/integrations/bitbucket.png b/docs/v3/img/integrations/bitbucket.png
similarity index 100%
rename from docs/3.0/img/integrations/bitbucket.png
rename to docs/v3/img/integrations/bitbucket.png
diff --git a/docs/3.0/img/integrations/coiled.png b/docs/v3/img/integrations/coiled.png
similarity index 100%
rename from docs/3.0/img/integrations/coiled.png
rename to docs/v3/img/integrations/coiled.png
diff --git a/docs/3.0/img/integrations/cubejs.png b/docs/v3/img/integrations/cubejs.png
similarity index 100%
rename from docs/3.0/img/integrations/cubejs.png
rename to docs/v3/img/integrations/cubejs.png
diff --git a/docs/3.0/img/integrations/dask.png b/docs/v3/img/integrations/dask.png
similarity index 100%
rename from docs/3.0/img/integrations/dask.png
rename to docs/v3/img/integrations/dask.png
diff --git a/docs/3.0/img/integrations/databricks.png b/docs/v3/img/integrations/databricks.png
similarity index 100%
rename from docs/3.0/img/integrations/databricks.png
rename to docs/v3/img/integrations/databricks.png
diff --git a/docs/3.0/img/integrations/dbt.png b/docs/v3/img/integrations/dbt.png
similarity index 100%
rename from docs/3.0/img/integrations/dbt.png
rename to docs/v3/img/integrations/dbt.png
diff --git a/docs/3.0/img/integrations/dlthub.png b/docs/v3/img/integrations/dlthub.png
similarity index 100%
rename from docs/3.0/img/integrations/dlthub.png
rename to docs/v3/img/integrations/dlthub.png
diff --git a/docs/3.0/img/integrations/docker.png b/docs/v3/img/integrations/docker.png
similarity index 100%
rename from docs/3.0/img/integrations/docker.png
rename to docs/v3/img/integrations/docker.png
diff --git a/docs/3.0/img/integrations/email.png b/docs/v3/img/integrations/email.png
similarity index 100%
rename from docs/3.0/img/integrations/email.png
rename to docs/v3/img/integrations/email.png
diff --git a/docs/3.0/img/integrations/fivetran.png b/docs/v3/img/integrations/fivetran.png
similarity index 100%
rename from docs/3.0/img/integrations/fivetran.png
rename to docs/v3/img/integrations/fivetran.png
diff --git a/docs/3.0/img/integrations/gcp.png b/docs/v3/img/integrations/gcp.png
similarity index 100%
rename from docs/3.0/img/integrations/gcp.png
rename to docs/v3/img/integrations/gcp.png
diff --git a/docs/3.0/img/integrations/github.png b/docs/v3/img/integrations/github.png
similarity index 100%
rename from docs/3.0/img/integrations/github.png
rename to docs/v3/img/integrations/github.png
diff --git a/docs/3.0/img/integrations/gitlab.png b/docs/v3/img/integrations/gitlab.png
similarity index 100%
rename from docs/3.0/img/integrations/gitlab.png
rename to docs/v3/img/integrations/gitlab.png
diff --git a/docs/3.0/img/integrations/gsheets.png b/docs/v3/img/integrations/gsheets.png
similarity index 100%
rename from docs/3.0/img/integrations/gsheets.png
rename to docs/v3/img/integrations/gsheets.png
diff --git a/docs/3.0/img/integrations/kubernetes.png b/docs/v3/img/integrations/kubernetes.png
similarity index 100%
rename from docs/3.0/img/integrations/kubernetes.png
rename to docs/v3/img/integrations/kubernetes.png
diff --git a/docs/3.0/img/integrations/metricflow.png b/docs/v3/img/integrations/metricflow.png
similarity index 100%
rename from docs/3.0/img/integrations/metricflow.png
rename to docs/v3/img/integrations/metricflow.png
diff --git a/docs/3.0/img/integrations/microsoft.png b/docs/v3/img/integrations/microsoft.png
similarity index 100%
rename from docs/3.0/img/integrations/microsoft.png
rename to docs/v3/img/integrations/microsoft.png
diff --git a/docs/3.0/img/integrations/mitmproxy.png b/docs/v3/img/integrations/mitmproxy.png
similarity index 100%
rename from docs/3.0/img/integrations/mitmproxy.png
rename to docs/v3/img/integrations/mitmproxy.png
diff --git a/docs/3.0/img/integrations/nasa.png b/docs/v3/img/integrations/nasa.png
similarity index 100%
rename from docs/3.0/img/integrations/nasa.png
rename to docs/v3/img/integrations/nasa.png
diff --git a/docs/3.0/img/integrations/prefect-kv.svg b/docs/v3/img/integrations/prefect-kv.svg
similarity index 100%
rename from docs/3.0/img/integrations/prefect-kv.svg
rename to docs/v3/img/integrations/prefect-kv.svg
diff --git a/docs/3.0/img/integrations/prefect.png b/docs/v3/img/integrations/prefect.png
similarity index 100%
rename from docs/3.0/img/integrations/prefect.png
rename to docs/v3/img/integrations/prefect.png
diff --git a/docs/3.0/img/integrations/python.png b/docs/v3/img/integrations/python.png
similarity index 100%
rename from docs/3.0/img/integrations/python.png
rename to docs/v3/img/integrations/python.png
diff --git a/docs/3.0/img/integrations/ray.png b/docs/v3/img/integrations/ray.png
similarity index 100%
rename from docs/3.0/img/integrations/ray.png
rename to docs/v3/img/integrations/ray.png
diff --git a/docs/3.0/img/integrations/shell.png b/docs/v3/img/integrations/shell.png
similarity index 100%
rename from docs/3.0/img/integrations/shell.png
rename to docs/v3/img/integrations/shell.png
diff --git a/docs/3.0/img/integrations/sifflet.png b/docs/v3/img/integrations/sifflet.png
similarity index 100%
rename from docs/3.0/img/integrations/sifflet.png
rename to docs/v3/img/integrations/sifflet.png
diff --git a/docs/3.0/img/integrations/slack.png b/docs/v3/img/integrations/slack.png
similarity index 100%
rename from docs/3.0/img/integrations/slack.png
rename to docs/v3/img/integrations/slack.png
diff --git a/docs/3.0/img/integrations/snowflake.png b/docs/v3/img/integrations/snowflake.png
similarity index 100%
rename from docs/3.0/img/integrations/snowflake.png
rename to docs/v3/img/integrations/snowflake.png
diff --git a/docs/3.0/img/integrations/soda.png b/docs/v3/img/integrations/soda.png
similarity index 100%
rename from docs/3.0/img/integrations/soda.png
rename to docs/v3/img/integrations/soda.png
diff --git a/docs/3.0/img/integrations/spark-on-kubernetes.png b/docs/v3/img/integrations/spark-on-kubernetes.png
similarity index 100%
rename from docs/3.0/img/integrations/spark-on-kubernetes.png
rename to docs/v3/img/integrations/spark-on-kubernetes.png
diff --git a/docs/3.0/img/integrations/sqlalchemy.png b/docs/v3/img/integrations/sqlalchemy.png
similarity index 100%
rename from docs/3.0/img/integrations/sqlalchemy.png
rename to docs/v3/img/integrations/sqlalchemy.png
diff --git a/docs/3.0/img/integrations/stitch.png b/docs/v3/img/integrations/stitch.png
similarity index 100%
rename from docs/3.0/img/integrations/stitch.png
rename to docs/v3/img/integrations/stitch.png
diff --git a/docs/3.0/img/integrations/terraform.png b/docs/v3/img/integrations/terraform.png
similarity index 100%
rename from docs/3.0/img/integrations/terraform.png
rename to docs/v3/img/integrations/terraform.png
diff --git a/docs/3.0/img/integrations/transform.png b/docs/v3/img/integrations/transform.png
similarity index 100%
rename from docs/3.0/img/integrations/transform.png
rename to docs/v3/img/integrations/transform.png
diff --git a/docs/3.0/img/integrations/vault.png b/docs/v3/img/integrations/vault.png
similarity index 100%
rename from docs/3.0/img/integrations/vault.png
rename to docs/v3/img/integrations/vault.png
diff --git a/docs/3.0/img/logos/prefect-logo-white-social-cards.svg b/docs/v3/img/logos/prefect-logo-white-social-cards.svg
similarity index 100%
rename from docs/3.0/img/logos/prefect-logo-white-social-cards.svg
rename to docs/v3/img/logos/prefect-logo-white-social-cards.svg
diff --git a/docs/3.0/img/logos/prefect-logo-white.svg b/docs/v3/img/logos/prefect-logo-white.svg
similarity index 100%
rename from docs/3.0/img/logos/prefect-logo-white.svg
rename to docs/v3/img/logos/prefect-logo-white.svg
diff --git a/docs/3.0/img/orchestration/flow-states.png b/docs/v3/img/orchestration/flow-states.png
similarity index 100%
rename from docs/3.0/img/orchestration/flow-states.png
rename to docs/v3/img/orchestration/flow-states.png
diff --git a/docs/3.0/img/orchestration/hello-flow-viz.png b/docs/v3/img/orchestration/hello-flow-viz.png
similarity index 100%
rename from docs/3.0/img/orchestration/hello-flow-viz.png
rename to docs/v3/img/orchestration/hello-flow-viz.png
diff --git a/docs/3.0/img/orchestration/task-states.png b/docs/v3/img/orchestration/task-states.png
similarity index 100%
rename from docs/3.0/img/orchestration/task-states.png
rename to docs/v3/img/orchestration/task-states.png
diff --git a/docs/3.0/img/orchestration/viz-return-value-tracked.png b/docs/v3/img/orchestration/viz-return-value-tracked.png
similarity index 100%
rename from docs/3.0/img/orchestration/viz-return-value-tracked.png
rename to docs/v3/img/orchestration/viz-return-value-tracked.png
diff --git a/docs/3.0/img/ui/access-control.png b/docs/v3/img/ui/access-control.png
similarity index 100%
rename from docs/3.0/img/ui/access-control.png
rename to docs/v3/img/ui/access-control.png
diff --git a/docs/3.0/img/ui/all-workspaces.png b/docs/v3/img/ui/all-workspaces.png
similarity index 100%
rename from docs/3.0/img/ui/all-workspaces.png
rename to docs/v3/img/ui/all-workspaces.png
diff --git a/docs/3.0/img/ui/audit-log.png b/docs/v3/img/ui/audit-log.png
similarity index 100%
rename from docs/3.0/img/ui/audit-log.png
rename to docs/v3/img/ui/audit-log.png
diff --git a/docs/3.0/img/ui/automation-from-event.png b/docs/v3/img/ui/automation-from-event.png
similarity index 100%
rename from docs/3.0/img/ui/automation-from-event.png
rename to docs/v3/img/ui/automation-from-event.png
diff --git a/docs/3.0/img/ui/automations-action-job-variable.png b/docs/v3/img/ui/automations-action-job-variable.png
similarity index 100%
rename from docs/3.0/img/ui/automations-action-job-variable.png
rename to docs/v3/img/ui/automations-action-job-variable.png
diff --git a/docs/3.0/img/ui/automations-action.png b/docs/v3/img/ui/automations-action.png
similarity index 100%
rename from docs/3.0/img/ui/automations-action.png
rename to docs/v3/img/ui/automations-action.png
diff --git a/docs/3.0/img/ui/automations-custom.png b/docs/v3/img/ui/automations-custom.png
similarity index 100%
rename from docs/3.0/img/ui/automations-custom.png
rename to docs/v3/img/ui/automations-custom.png
diff --git a/docs/3.0/img/ui/automations-notifications.png b/docs/v3/img/ui/automations-notifications.png
similarity index 100%
rename from docs/3.0/img/ui/automations-notifications.png
rename to docs/v3/img/ui/automations-notifications.png
diff --git a/docs/3.0/img/ui/automations-trigger.png b/docs/v3/img/ui/automations-trigger.png
similarity index 100%
rename from docs/3.0/img/ui/automations-trigger.png
rename to docs/v3/img/ui/automations-trigger.png
diff --git a/docs/3.0/img/ui/automations.png b/docs/v3/img/ui/automations.png
similarity index 100%
rename from docs/3.0/img/ui/automations.png
rename to docs/v3/img/ui/automations.png
diff --git a/docs/3.0/img/ui/block-library.png b/docs/v3/img/ui/block-library.png
similarity index 100%
rename from docs/3.0/img/ui/block-library.png
rename to docs/v3/img/ui/block-library.png
diff --git a/docs/3.0/img/ui/cloud-api-keys.png b/docs/v3/img/ui/cloud-api-keys.png
similarity index 100%
rename from docs/3.0/img/ui/cloud-api-keys.png
rename to docs/v3/img/ui/cloud-api-keys.png
diff --git a/docs/3.0/img/ui/cloud-dashboard.png b/docs/v3/img/ui/cloud-dashboard.png
similarity index 100%
rename from docs/3.0/img/ui/cloud-dashboard.png
rename to docs/v3/img/ui/cloud-dashboard.png
diff --git a/docs/3.0/img/ui/cloud-new-api-key.png b/docs/v3/img/ui/cloud-new-api-key.png
similarity index 100%
rename from docs/3.0/img/ui/cloud-new-api-key.png
rename to docs/v3/img/ui/cloud-new-api-key.png
diff --git a/docs/3.0/img/ui/cloud-new-workspace.png b/docs/v3/img/ui/cloud-new-workspace.png
similarity index 100%
rename from docs/3.0/img/ui/cloud-new-workspace.png
rename to docs/v3/img/ui/cloud-new-workspace.png
diff --git a/docs/3.0/img/ui/cloud-sign-in.png b/docs/v3/img/ui/cloud-sign-in.png
similarity index 100%
rename from docs/3.0/img/ui/cloud-sign-in.png
rename to docs/v3/img/ui/cloud-sign-in.png
diff --git a/docs/3.0/img/ui/cloud-sso-dashboard.png b/docs/v3/img/ui/cloud-sso-dashboard.png
similarity index 100%
rename from docs/3.0/img/ui/cloud-sso-dashboard.png
rename to docs/v3/img/ui/cloud-sso-dashboard.png
diff --git a/docs/3.0/img/ui/cloud-sso-provider.png b/docs/v3/img/ui/cloud-sso-provider.png
similarity index 100%
rename from docs/3.0/img/ui/cloud-sso-provider.png
rename to docs/v3/img/ui/cloud-sso-provider.png
diff --git a/docs/3.0/img/ui/cloud-workspace-details.png b/docs/v3/img/ui/cloud-workspace-details.png
similarity index 100%
rename from docs/3.0/img/ui/cloud-workspace-details.png
rename to docs/v3/img/ui/cloud-workspace-details.png
diff --git a/docs/3.0/img/ui/create-email-notification.png b/docs/v3/img/ui/create-email-notification.png
similarity index 100%
rename from docs/3.0/img/ui/create-email-notification.png
rename to docs/v3/img/ui/create-email-notification.png
diff --git a/docs/3.0/img/ui/create-workspace.png b/docs/v3/img/ui/create-workspace.png
similarity index 100%
rename from docs/3.0/img/ui/create-workspace.png
rename to docs/v3/img/ui/create-workspace.png
diff --git a/docs/3.0/img/ui/dashboard-cloud.png b/docs/v3/img/ui/dashboard-cloud.png
similarity index 100%
rename from docs/3.0/img/ui/dashboard-cloud.png
rename to docs/v3/img/ui/dashboard-cloud.png
diff --git a/docs/3.0/img/ui/dashboard-oss.png b/docs/v3/img/ui/dashboard-oss.png
similarity index 100%
rename from docs/3.0/img/ui/dashboard-oss.png
rename to docs/v3/img/ui/dashboard-oss.png
diff --git a/docs/3.0/img/ui/declare-incident.png b/docs/v3/img/ui/declare-incident.png
similarity index 100%
rename from docs/3.0/img/ui/declare-incident.png
rename to docs/v3/img/ui/declare-incident.png
diff --git a/docs/3.0/img/ui/dependency-graph.png b/docs/v3/img/ui/dependency-graph.png
similarity index 100%
rename from docs/3.0/img/ui/dependency-graph.png
rename to docs/v3/img/ui/dependency-graph.png
diff --git a/docs/3.0/img/ui/deployment-cron-schedule.png b/docs/v3/img/ui/deployment-cron-schedule.png
similarity index 100%
rename from docs/3.0/img/ui/deployment-cron-schedule.png
rename to docs/v3/img/ui/deployment-cron-schedule.png
diff --git a/docs/3.0/img/ui/deployment-details.png b/docs/v3/img/ui/deployment-details.png
similarity index 100%
rename from docs/3.0/img/ui/deployment-details.png
rename to docs/v3/img/ui/deployment-details.png
diff --git a/docs/3.0/img/ui/deployment-job-variables.png b/docs/v3/img/ui/deployment-job-variables.png
similarity index 100%
rename from docs/3.0/img/ui/deployment-job-variables.png
rename to docs/v3/img/ui/deployment-job-variables.png
diff --git a/docs/3.0/img/ui/deployment-managed.png b/docs/v3/img/ui/deployment-managed.png
similarity index 100%
rename from docs/3.0/img/ui/deployment-managed.png
rename to docs/v3/img/ui/deployment-managed.png
diff --git a/docs/3.0/img/ui/event-feed.png b/docs/v3/img/ui/event-feed.png
similarity index 100%
rename from docs/3.0/img/ui/event-feed.png
rename to docs/v3/img/ui/event-feed.png
diff --git a/docs/3.0/img/ui/event-spec.png b/docs/v3/img/ui/event-spec.png
similarity index 100%
rename from docs/3.0/img/ui/event-spec.png
rename to docs/v3/img/ui/event-spec.png
diff --git a/docs/3.0/img/ui/flow-run-cancellation-ui.png b/docs/v3/img/ui/flow-run-cancellation-ui.png
similarity index 100%
rename from docs/3.0/img/ui/flow-run-cancellation-ui.png
rename to docs/v3/img/ui/flow-run-cancellation-ui.png
diff --git a/docs/3.0/img/ui/flow-run-details.png b/docs/v3/img/ui/flow-run-details.png
similarity index 100%
rename from docs/3.0/img/ui/flow-run-details.png
rename to docs/v3/img/ui/flow-run-details.png
diff --git a/docs/3.0/img/ui/flow-run-diagram.jpg b/docs/v3/img/ui/flow-run-diagram.jpg
similarity index 100%
rename from docs/3.0/img/ui/flow-run-diagram.jpg
rename to docs/v3/img/ui/flow-run-diagram.jpg
diff --git a/docs/3.0/img/ui/flow-run-diagram.png b/docs/v3/img/ui/flow-run-diagram.png
similarity index 100%
rename from docs/3.0/img/ui/flow-run-diagram.png
rename to docs/v3/img/ui/flow-run-diagram.png
diff --git a/docs/3.0/img/ui/flow-run-examples.png b/docs/v3/img/ui/flow-run-examples.png
similarity index 100%
rename from docs/3.0/img/ui/flow-run-examples.png
rename to docs/v3/img/ui/flow-run-examples.png
diff --git a/docs/3.0/img/ui/flow-run-page-server.png b/docs/v3/img/ui/flow-run-page-server.png
similarity index 100%
rename from docs/3.0/img/ui/flow-run-page-server.png
rename to docs/v3/img/ui/flow-run-page-server.png
diff --git a/docs/3.0/img/ui/flows-icon.png b/docs/v3/img/ui/flows-icon.png
similarity index 100%
rename from docs/3.0/img/ui/flows-icon.png
rename to docs/v3/img/ui/flows-icon.png
diff --git a/docs/3.0/img/ui/healthy-work-queue.png b/docs/v3/img/ui/healthy-work-queue.png
similarity index 100%
rename from docs/3.0/img/ui/healthy-work-queue.png
rename to docs/v3/img/ui/healthy-work-queue.png
diff --git a/docs/3.0/img/ui/image-artifact-example.png b/docs/v3/img/ui/image-artifact-example.png
similarity index 100%
rename from docs/3.0/img/ui/image-artifact-example.png
rename to docs/v3/img/ui/image-artifact-example.png
diff --git a/docs/3.0/img/ui/incidents-dashboard.png b/docs/v3/img/ui/incidents-dashboard.png
similarity index 100%
rename from docs/3.0/img/ui/incidents-dashboard.png
rename to docs/v3/img/ui/incidents-dashboard.png
diff --git a/docs/3.0/img/ui/interval-schedule.png b/docs/v3/img/ui/interval-schedule.png
similarity index 100%
rename from docs/3.0/img/ui/interval-schedule.png
rename to docs/v3/img/ui/interval-schedule.png
diff --git a/docs/3.0/img/ui/link-artifact-info.png b/docs/v3/img/ui/link-artifact-info.png
similarity index 100%
rename from docs/3.0/img/ui/link-artifact-info.png
rename to docs/v3/img/ui/link-artifact-info.png
diff --git a/docs/3.0/img/ui/md-artifact-info.png b/docs/v3/img/ui/md-artifact-info.png
similarity index 100%
rename from docs/3.0/img/ui/md-artifact-info.png
rename to docs/v3/img/ui/md-artifact-info.png
diff --git a/docs/3.0/img/ui/notifications.png b/docs/v3/img/ui/notifications.png
similarity index 100%
rename from docs/3.0/img/ui/notifications.png
rename to docs/v3/img/ui/notifications.png
diff --git a/docs/3.0/img/ui/org-inherited-role.png b/docs/v3/img/ui/org-inherited-role.png
similarity index 100%
rename from docs/3.0/img/ui/org-inherited-role.png
rename to docs/v3/img/ui/org-inherited-role.png
diff --git a/docs/3.0/img/ui/org-invite-members.png b/docs/v3/img/ui/org-invite-members.png
similarity index 100%
rename from docs/3.0/img/ui/org-invite-members.png
rename to docs/v3/img/ui/org-invite-members.png
diff --git a/docs/3.0/img/ui/organizations.png b/docs/v3/img/ui/organizations.png
similarity index 100%
rename from docs/3.0/img/ui/organizations.png
rename to docs/v3/img/ui/organizations.png
diff --git a/docs/3.0/img/ui/process-work-pool-config.png b/docs/v3/img/ui/process-work-pool-config.png
similarity index 100%
rename from docs/3.0/img/ui/process-work-pool-config.png
rename to docs/v3/img/ui/process-work-pool-config.png
diff --git a/docs/3.0/img/ui/progress-artifact-example.png b/docs/v3/img/ui/progress-artifact-example.png
similarity index 100%
rename from docs/3.0/img/ui/progress-artifact-example.png
rename to docs/v3/img/ui/progress-artifact-example.png
diff --git a/docs/3.0/img/ui/qs-flow-run.png b/docs/v3/img/ui/qs-flow-run.png
similarity index 100%
rename from docs/3.0/img/ui/qs-flow-run.png
rename to docs/v3/img/ui/qs-flow-run.png
diff --git a/docs/3.0/img/ui/select-account.png b/docs/v3/img/ui/select-account.png
similarity index 100%
rename from docs/3.0/img/ui/select-account.png
rename to docs/v3/img/ui/select-account.png
diff --git a/docs/3.0/img/ui/self-hosted-server-dashboard.png b/docs/v3/img/ui/self-hosted-server-dashboard.png
similarity index 100%
rename from docs/3.0/img/ui/self-hosted-server-dashboard.png
rename to docs/v3/img/ui/self-hosted-server-dashboard.png
diff --git a/docs/3.0/img/ui/service-accounts.png b/docs/v3/img/ui/service-accounts.png
similarity index 100%
rename from docs/3.0/img/ui/service-accounts.png
rename to docs/v3/img/ui/service-accounts.png
diff --git a/docs/3.0/img/ui/sso-scim-enabled.png b/docs/v3/img/ui/sso-scim-enabled.png
similarity index 100%
rename from docs/3.0/img/ui/sso-scim-enabled.png
rename to docs/v3/img/ui/sso-scim-enabled.png
diff --git a/docs/3.0/img/ui/table-artifact-info.png b/docs/v3/img/ui/table-artifact-info.png
similarity index 100%
rename from docs/3.0/img/ui/table-artifact-info.png
rename to docs/v3/img/ui/table-artifact-info.png
diff --git a/docs/3.0/img/ui/teams.png b/docs/v3/img/ui/teams.png
similarity index 100%
rename from docs/3.0/img/ui/teams.png
rename to docs/v3/img/ui/teams.png
diff --git a/docs/3.0/img/ui/templated-notification.png b/docs/v3/img/ui/templated-notification.png
similarity index 100%
rename from docs/3.0/img/ui/templated-notification.png
rename to docs/v3/img/ui/templated-notification.png
diff --git a/docs/3.0/img/ui/timeline-flows.png b/docs/v3/img/ui/timeline-flows.png
similarity index 100%
rename from docs/3.0/img/ui/timeline-flows.png
rename to docs/v3/img/ui/timeline-flows.png
diff --git a/docs/3.0/img/ui/variables-ui.png b/docs/v3/img/ui/variables-ui.png
similarity index 100%
rename from docs/3.0/img/ui/variables-ui.png
rename to docs/v3/img/ui/variables-ui.png
diff --git a/docs/3.0/img/ui/webhook.png b/docs/v3/img/ui/webhook.png
similarity index 100%
rename from docs/3.0/img/ui/webhook.png
rename to docs/v3/img/ui/webhook.png
diff --git a/docs/3.0/img/ui/work-pool-list.png b/docs/v3/img/ui/work-pool-list.png
similarity index 100%
rename from docs/3.0/img/ui/work-pool-list.png
rename to docs/v3/img/ui/work-pool-list.png
diff --git a/docs/3.0/img/ui/work-pools.png b/docs/v3/img/ui/work-pools.png
similarity index 100%
rename from docs/3.0/img/ui/work-pools.png
rename to docs/v3/img/ui/work-pools.png
diff --git a/docs/3.0/img/ui/work-queues-icon.png b/docs/v3/img/ui/work-queues-icon.png
similarity index 100%
rename from docs/3.0/img/ui/work-queues-icon.png
rename to docs/v3/img/ui/work-queues-icon.png
diff --git a/docs/3.0/img/ui/workspace-collaborators.png b/docs/v3/img/ui/workspace-collaborators.png
similarity index 100%
rename from docs/3.0/img/ui/workspace-collaborators.png
rename to docs/v3/img/ui/workspace-collaborators.png
diff --git a/docs/3.0/img/ui/workspace-icon.png b/docs/v3/img/ui/workspace-icon.png
similarity index 100%
rename from docs/3.0/img/ui/workspace-icon.png
rename to docs/v3/img/ui/workspace-icon.png
diff --git a/docs/3.0/img/ui/workspace-settings.png b/docs/v3/img/ui/workspace-settings.png
similarity index 100%
rename from docs/3.0/img/ui/workspace-settings.png
rename to docs/v3/img/ui/workspace-settings.png
diff --git a/docs/3.0/img/ui/workspace-sharing.png b/docs/v3/img/ui/workspace-sharing.png
similarity index 100%
rename from docs/3.0/img/ui/workspace-sharing.png
rename to docs/v3/img/ui/workspace-sharing.png
diff --git a/docs/3.0/img/ui/workspace-transfer.png b/docs/v3/img/ui/workspace-transfer.png
similarity index 100%
rename from docs/3.0/img/ui/workspace-transfer.png
rename to docs/v3/img/ui/workspace-transfer.png
diff --git a/docs/3.0/manage/cloud/connect-to-cloud.mdx b/docs/v3/manage/cloud/connect-to-cloud.mdx
similarity index 75%
rename from docs/3.0/manage/cloud/connect-to-cloud.mdx
rename to docs/v3/manage/cloud/connect-to-cloud.mdx
index 5361d49f9dee..ff2e71e47220 100644
--- a/docs/3.0/manage/cloud/connect-to-cloud.mdx
+++ b/docs/v3/manage/cloud/connect-to-cloud.mdx
@@ -19,7 +19,7 @@ You will log in a to Prefect Cloud account from the local environment where you
### Steps
1. Open a new terminal session.
-2. [Install Prefect](/3.0/get-started/install/) in the environment where you want to execute flow runs.
+2. [Install Prefect](/v3/get-started/install/) in the environment where you want to execute flow runs.
```bash
pip install -U prefect
@@ -48,7 +48,7 @@ Paste your authentication key:
Authenticated with Prefect Cloud! Using workspace 'prefect/terry-prefect-workspace'.
```
-You can authenticate by manually pasting an [API key](/3.0/manage/cloud/manage-users/api-keys/) or through a browser-based approval that auto-generates an API key with a 30-day expiration.
+You can authenticate by manually pasting an [API key](/v3/manage/cloud/manage-users/api-keys/) or through a browser-based approval that auto-generates an API key with a 30-day expiration.
### Change workspaces
@@ -82,20 +82,30 @@ prefect config set PREFECT_API_URL="https://api.prefect.cloud/api/accounts/[ACCO
prefect config set PREFECT_API_KEY="[API-KEY]"
```
-When you're in a Prefect Cloud workspace, you can copy the `PREFECT_API_URL` value directly from the page URL.
+
+**Find account ID and workspace ID in the browser**
+
+When authenticated to your Prefect Cloud workspace in the browser, you can get the account ID and workspace ID for the `PREFECT_API_URL` string from the page URL.
+
+For example, if you're on the dashboard page, the URL looks like this:
+
+https://app.prefect.cloud/account/[ACCOUNT-ID]/workspace/[WORKSPACE-ID]/dashboard
+
+
+The example above configures `PREFECT_API_URL` and `PREFECT_API_KEY` in the default profile.
-This example configures `PREFECT_API_URL` and `PREFECT_API_KEY` in the default profile.
You can use `prefect profile` CLI commands to create settings profiles for different configurations.
For example, you can configure a "cloud" profile to use the Prefect Cloud API URL and API key;
and another "local" profile for local development using a local Prefect API server started with `prefect server start`.
-See [Settings](/3.0/manage/settings-and-profiles/) for details.
+See [Settings](/v3/develop/settings-and-profiles/) for details.
**Environment variables**
You can set `PREFECT_API_URL` and `PREFECT_API_KEY` just like any other environment variable.
-See [Overriding defaults with environment variables](/3.0/manage/settings-and-profiles/)
-for more information.
+Setting these environment variables is a good way to connect to Prefect Cloud in a remote serverless environment.
+See [Overriding defaults with environment variables](/v3/develop/settings-and-profiles/) for more information.
+
## Install requirements in execution environments
diff --git a/docs/v3/manage/cloud/index.mdx b/docs/v3/manage/cloud/index.mdx
new file mode 100644
index 000000000000..15afb4eae53a
--- /dev/null
+++ b/docs/v3/manage/cloud/index.mdx
@@ -0,0 +1,47 @@
+---
+title: Prefect Cloud overview
+description: Observe and orchestrate workflow applications with the Prefect Cloud platform.
+---
+
+Prefect Cloud is a hosted workflow application platform with all the capabilities of the open source Prefect server plus additional features.
+
+- [Connect to Prefect Cloud](/v3/manage/cloud/connect-to-cloud/) shows how to configure a local execution environment to access Prefect Cloud.
+
+The pages that follow provide detailed information about the features and capabilities of Prefect Cloud:
+
+- [Manage workspaces](/v3/manage/cloud/workspaces/) shows how to use isolated environments for organizing activity.
+
+The **Manage accounts** subsection covers aspects of Prefect Cloud accounts:
+- [Manage user accounts](/v3/manage/cloud/manage-users/) explains how to use Prefect Cloud's personal accounts.
+- [Manage service accounts](/v3/manage/cloud/manage-users/service-accounts/) describes how to configure API access for running workers or executing flow runs on remote infrastructure through API keys that are not associated with a user account.
+- [Manage teams](/v3/manage/cloud/manage-users/manage-teams/) demonstrates how groups of users can be managed together.
+- [Manage account roles](/v3/manage/cloud/manage-users/manage-roles/) shows how to use role-based access controls (RBAC): granular permissions to perform certain activities within an account or a workspace. Enterprise plans allow for custom roles with specific permissions.
+- [Manage API keys](/v3/manage/cloud/manage-users/api-keys/) explains how to grant access to Prefect Cloud from a local execution environment.
+- [Configure single sign-on](/v3/manage/cloud/manage-users/configure-sso/) describes how (SSO) authentication integration is supported through identity providers with OIDC and SAML. Directory Sync and SCIM provisioning is also available.
+- [Audit Cloud activity](/v3/manage/cloud/manage-users/audit-logs/) shows how Prefect provides a record of user activities to monitor for security and compliance.
+- [Object access control lists](/v3/manage/cloud/manage-users/object-access-control-lists/) (ACLs) allow privileged users to restrict deployment and block access to individual users.
+- [Secure access by IP](/v3/manage/cloud/manage-users/secure-access-by-ip-address/) shows how to restrict access to Prefect Cloud by IP address.
+
+The remaining Prefect Cloud pages in this section cover various aspects of Prefect Cloud:
+
+- [Rate limits and data retention](/v3/manage/cloud/rate-limits/) discusses rate limits and data retention for Prefect Cloud.
+- [Terraform provider](https://registry.terraform.io/providers/PrefectHQ/prefect/latest/docs/guides/getting-started) links to the Prefect Cloud Terraform provider docs for infrastructure as code.
+- [Troubleshoot Prefect Cloud](/v3/manage/cloud/troubleshoot-cloud/) contains common solutions for troubleshooting Prefect Cloud.
+
+Pages relevant to Prefect Cloud found elsewhere in the documentation include:
+
+- [Push work pools](/v3/deploy/infrastructure-examples/serverless/) allow you to run flows on your serverless infrastructure without running a worker.
+- [Managed work pools](/v3/deploy/infrastructure-examples/managed/) allow you to run flows on Prefect's infrastructure without running a worker.
+- [Webhooks](/v3/automate/events/webhook-triggers/) .
+- [Incidents](/v3/automate/incidents/) are a way to track and manage issues that arise during flow runs.
+
+Error summaries (enabled by Marvin AI) distill the error logs of `Failed` and `Crashed` flow runs into actionable information. To enable this feature visit the **Settings** page for your account.
+
+The [Prefect Cloud REST API](/v3/api-ref/rest-api/) is used to send and receive data for orchestration and monitoring between Prefect clients and Prefect Cloud.
+Prefect interactive Cloud REST API documentation is available at https://app.prefect.cloud/api/docs .
+
+
+## Try Prefect Cloud
+
+To try Prefect Cloud, create a free account at [app.prefect.cloud](https://app.prefect.cloud/).
+Follow the steps in the UI to create and run your first workflow in Prefect Cloud.
diff --git a/docs/3.0/manage/cloud/manage-users/api-keys.mdx b/docs/v3/manage/cloud/manage-users/api-keys.mdx
similarity index 78%
rename from docs/3.0/manage/cloud/manage-users/api-keys.mdx
rename to docs/v3/manage/cloud/manage-users/api-keys.mdx
index b6a93271cd39..f6124a7cde70 100644
--- a/docs/3.0/manage/cloud/manage-users/api-keys.mdx
+++ b/docs/v3/manage/cloud/manage-users/api-keys.mdx
@@ -9,7 +9,7 @@ If you run `prefect cloud login` from your CLI, you can authenticate through you
Authenticating through the browser directs you to an authorization page.
After you grant approval to connect, you're redirected to the CLI and the API key is saved to your local
-[Prefect profile](/3.0/manage/settings-and-profiles/).
+[Prefect profile](/v3/develop/settings-and-profiles/).
If you choose to authenticate by pasting an API key, you must create an API key in the Prefect Cloud UI first.
@@ -25,12 +25,16 @@ Provide a name for the key and an expiration date.
Copy the key to a secure location since an API key cannot be revealed again in the UI after it is generated.
-## Log into Prefect Cloud with an API Key
+## Log in to Prefect Cloud with an API Key
```bash
prefect cloud login -k ''
```
+Alternatively, if you don't have a CLI available - for example, if you are connecting to Prefect Cloud within a remote serverless environment - set the `PREFECT_API_KEY` environment variable.
+
+For more information see [Connect to Prefect Cloud](/v3/manage/cloud/connect-to-cloud).
+
## Service account API keys (Pro) (Enterprise)
Service accounts are a feature of Prefect Cloud [Pro and Enterprise tier plans](https://www.prefect.io/pricing) that enable you to
@@ -40,4 +44,4 @@ Service accounts are useful for configuring API access for running workers, or e
Events and logs for flow runs in those environments are associated with the service account rather than a user. Manage or revoke API access by
configuring or removing the service account without disrupting user access.
-See [service accounts](/3.0/manage/cloud/manage-users/service-accounts/) for more information.
+See [service accounts](/v3/manage/cloud/manage-users/service-accounts/) for more information.
diff --git a/docs/3.0/manage/cloud/manage-users/audit-logs.mdx b/docs/v3/manage/cloud/manage-users/audit-logs.mdx
similarity index 100%
rename from docs/3.0/manage/cloud/manage-users/audit-logs.mdx
rename to docs/v3/manage/cloud/manage-users/audit-logs.mdx
diff --git a/docs/3.0/manage/cloud/manage-users/configure-sso.mdx b/docs/v3/manage/cloud/manage-users/configure-sso.mdx
similarity index 95%
rename from docs/3.0/manage/cloud/manage-users/configure-sso.mdx
rename to docs/v3/manage/cloud/manage-users/configure-sso.mdx
index 94f132c7f8e8..5e6c9ca39ab7 100644
--- a/docs/3.0/manage/cloud/manage-users/configure-sso.mdx
+++ b/docs/v3/manage/cloud/manage-users/configure-sso.mdx
@@ -31,7 +31,7 @@ This step creates a link to configure SSO with your identity provider.
Using the provided link, navigate to the Identity Provider Configuration dashboard and select your identity provider to continue
configuration. If your provider isn't listed, try `SAML` or `Open ID Connect` instead.
-![Opening the Identity Provider Configuration dashboard.](/3.0/img/ui/cloud-sso-dashboard.png)
+![Opening the Identity Provider Configuration dashboard.](/v3/img/ui/cloud-sso-dashboard.png)
Once you complete SSO configuration, your users must authenticate through your identity provider when accessing account resources, giving you full control over application access.
diff --git a/docs/3.0/manage/cloud/manage-users/index.mdx b/docs/v3/manage/cloud/manage-users/index.mdx
similarity index 67%
rename from docs/3.0/manage/cloud/manage-users/index.mdx
rename to docs/v3/manage/cloud/manage-users/index.mdx
index 26126fd20703..09ea8c96b5e7 100644
--- a/docs/3.0/manage/cloud/manage-users/index.mdx
+++ b/docs/v3/manage/cloud/manage-users/index.mdx
@@ -14,7 +14,7 @@ An individual user can be invited to become a member of other accounts.
You can access your personal settings in the [profile menu](https://app.prefect.cloud/my/profile), including:
- **Profile**: View and edit basic information, such as name.
-- **API keys**: Create and view [API keys](/3.0/manage/cloud/manage-users/api-keys/)
+- **API keys**: Create and view [API keys](/v3/manage/cloud/manage-users/api-keys/)
for connecting to Prefect Cloud from the CLI or other environments.
- **Preferences**: Manage settings, such as color mode and default time zone.
- **Feature previews**: Enable or disable feature previews.
@@ -25,27 +25,27 @@ Users who are part of an account can hold the role of Admin or Member.
Admins can invite other users to join the account and manage the account's workspaces and teams.
Admins on Pro and Enterprise tier Prefect Cloud accounts can grant members of the account
-[roles](/3.0/manage/cloud/manage-users/manage-roles/) in a workspace, such as Runner or Viewer.
+[roles](/v3/manage/cloud/manage-users/manage-roles/) in a workspace, such as Runner or Viewer.
Custom roles are available on Enterprise tier accounts.
## API keys
-[API keys](/3.0/manage/cloud/manage-users/api-keys/) enable you to authenticate an environment to work with Prefect Cloud.
+[API keys](/v3/manage/cloud/manage-users/api-keys/) enable you to authenticate an environment to work with Prefect Cloud.
## Service accounts (Pro) (Enterprise)
-[Service accounts](/3.0/manage/cloud/manage-users/service-accounts/) enable you to create a
+[Service accounts](/v3/manage/cloud/manage-users/service-accounts/) enable you to create a
Prefect Cloud API key that is not associated with a user account.
## Single sign-on (Pro)
-Enterprise tier plans offer [single sign-on (SSO)](/3.0/manage/cloud/manage-users/configure-sso/)
+Enterprise tier plans offer [single sign-on (SSO)](/v3/manage/cloud/manage-users/configure-sso/)
integration with your team's identity provider, including options for
-[directory sync and SCIM provisioning](/3.0/manage/cloud/manage-users/configure-sso/#directory-sync).
+[directory sync and SCIM provisioning](/v3/manage/cloud/manage-users/configure-sso/#directory-sync).
## Audit log (Pro) (Enterprise)
-[Audit logs](/3.0/manage/cloud/manage-users/audit-logs/) provide a chronological record of
+[Audit logs](/v3/manage/cloud/manage-users/audit-logs/) provide a chronological record of
activities performed by Prefect Cloud users who are members of an account.
## Object-level access control lists (Enterprise)
@@ -56,4 +56,4 @@ restrict access to specific users and service accounts within a workspace.
## Teams (Enterprise)
Users of Enterprise tier Prefect Cloud accounts can be added to
-[Teams](/3.0/manage/cloud/manage-users/manage-teams/) to simplify access control governance.
+[Teams](/v3/manage/cloud/manage-users/manage-teams/) to simplify access control governance.
diff --git a/docs/3.0/manage/cloud/manage-users/manage-roles.mdx b/docs/v3/manage/cloud/manage-users/manage-roles.mdx
similarity index 98%
rename from docs/3.0/manage/cloud/manage-users/manage-roles.mdx
rename to docs/v3/manage/cloud/manage-users/manage-roles.mdx
index 3058e763fddf..d9b380268b50 100644
--- a/docs/3.0/manage/cloud/manage-users/manage-roles.mdx
+++ b/docs/v3/manage/cloud/manage-users/manage-roles.mdx
@@ -46,8 +46,7 @@ The following built-in roles have permissions within a given workspace in Prefec
| Viewer | - View flow runs within a workspace. - View deployments within a workspace. - View all work pools within a workspace. - View all blocks within a workspace. - View all automations within a workspace. - View workspace handle and description. |
| Runner | All Viewer abilities, _plus_: - Run deployments within a workspace. |
| Developer | All Runner abilities, _plus_: - Run flows within a workspace. - Delete flow runs within a workspace. - Create, edit, and delete deployments within a workspace. - Create, edit, and delete work pools within a workspace. - Create, edit, and delete all blocks and their secrets within a workspace. - Create, edit, and delete automations within a workspace. - View all workspace settings. |
-
-| Owner | All Developer abilities, _plus_: - Add and remove account members, and set their role within a workspace. - Set the workspace's default workspace role for all users in the account. - Set, view, edit workspace settings. |
+| Owner | All Developer abilities, _plus_: - Add and remove account members, and set their role within a workspace. - Set the workspace's default workspace role for all users in the account. - Set, view, edit workspace settings. |
| Worker | The minimum scopes required for a worker to poll for and submit work.|
## Custom workspace roles
diff --git a/docs/3.0/manage/cloud/manage-users/manage-teams.mdx b/docs/v3/manage/cloud/manage-users/manage-teams.mdx
similarity index 100%
rename from docs/3.0/manage/cloud/manage-users/manage-teams.mdx
rename to docs/v3/manage/cloud/manage-users/manage-teams.mdx
diff --git a/docs/3.0/manage/cloud/manage-users/object-access-control-lists.mdx b/docs/v3/manage/cloud/manage-users/object-access-control-lists.mdx
similarity index 100%
rename from docs/3.0/manage/cloud/manage-users/object-access-control-lists.mdx
rename to docs/v3/manage/cloud/manage-users/object-access-control-lists.mdx
diff --git a/docs/3.0/manage/cloud/manage-users/secure-access-by-ip-address.mdx b/docs/v3/manage/cloud/manage-users/secure-access-by-ip-address.mdx
similarity index 100%
rename from docs/3.0/manage/cloud/manage-users/secure-access-by-ip-address.mdx
rename to docs/v3/manage/cloud/manage-users/secure-access-by-ip-address.mdx
diff --git a/docs/3.0/manage/cloud/manage-users/service-accounts.mdx b/docs/v3/manage/cloud/manage-users/service-accounts.mdx
similarity index 84%
rename from docs/3.0/manage/cloud/manage-users/service-accounts.mdx
rename to docs/v3/manage/cloud/manage-users/service-accounts.mdx
index f4bb8dc6a4a2..80fd03937fce 100644
--- a/docs/3.0/manage/cloud/manage-users/service-accounts.mdx
+++ b/docs/v3/manage/cloud/manage-users/service-accounts.mdx
@@ -8,11 +8,11 @@ Service accounts are typically used to configure API access for running workers
Service accounts are non-user accounts that have the following features:
-- Prefect Cloud [API keys](/3.0/manage/cloud/manage-users/api-keys/)
-- [Roles](/3.0/manage/cloud/manage-users/manage-roles/) and permissions
+- Prefect Cloud [API keys](/v3/manage/cloud/manage-users/api-keys/)
+- [Roles](/v3/manage/cloud/manage-users/manage-roles/) and permissions
With service account credentials, you can
-[configure an execution environment](/3.0/manage/cloud/connect-to-cloud/#configure-a-local-execution-environment)
+[configure an execution environment](/v3/manage/cloud/connect-to-cloud/#configure-a-local-execution-environment)
to interact with your Prefect Cloud workspaces without a user manually logging in from that environment.
Service accounts may be created, added to workspaces, have their roles changed, or deleted without affecting other user accounts.
You may apply any valid _workspace-level_ role to a service account.
@@ -20,7 +20,7 @@ You may apply any valid _workspace-level_ role to a service account.
Select **Service Accounts** to view, create, or edit service accounts.
Service accounts are created at the account level, but individual workspaces may be shared with the service account.
-See [workspace sharing](/3.0/manage/cloud/workspaces/#workspace-sharing) for more information.
+See [workspace sharing](/v3/manage/cloud/workspaces/#workspace-sharing) for more information.
**Service account credentials**
@@ -43,7 +43,7 @@ On the **Service Accounts** page, select the **+** icon to create a new service
A service account may only be a Member of an account.
You may apply any valid _workspace-level_ role to a service account when it is
-[added to a workspace](/3.0/manage/cloud/workspaces/#workspace-sharing).
+[added to a workspace](/v3/manage/cloud/workspaces/#workspace-sharing).
Select **Create** to create the new service account.
diff --git a/docs/3.0/manage/cloud/rate-limits.mdx b/docs/v3/manage/cloud/rate-limits.mdx
similarity index 96%
rename from docs/3.0/manage/cloud/rate-limits.mdx
rename to docs/v3/manage/cloud/rate-limits.mdx
index 3894013eff18..24d8c70091cf 100644
--- a/docs/3.0/manage/cloud/rate-limits.mdx
+++ b/docs/v3/manage/cloud/rate-limits.mdx
@@ -36,7 +36,7 @@ The retention period applies to all workspaces that belong to the account.
The retention period is the number of days that metadata is available after it is created.
For flow and task runs, it is calculated from the time the run reaches a
-[terminal state](/3.0/develop/manage-states/#state-types).
+[terminal state](/v3/develop/manage-states/#state-types).
Subflow runs are retained independently from their parent flow runs. They are removed based on
the time each subflow run reaches a terminal state.
diff --git a/docs/3.0/manage/cloud/terraform-provider.mdx b/docs/v3/manage/cloud/terraform-provider.mdx
similarity index 100%
rename from docs/3.0/manage/cloud/terraform-provider.mdx
rename to docs/v3/manage/cloud/terraform-provider.mdx
diff --git a/docs/3.0/manage/cloud/troubleshoot-cloud.mdx b/docs/v3/manage/cloud/troubleshoot-cloud.mdx
similarity index 96%
rename from docs/3.0/manage/cloud/troubleshoot-cloud.mdx
rename to docs/v3/manage/cloud/troubleshoot-cloud.mdx
index fd708cedab5b..e5c68ea8dcea 100644
--- a/docs/3.0/manage/cloud/troubleshoot-cloud.mdx
+++ b/docs/v3/manage/cloud/troubleshoot-cloud.mdx
@@ -134,7 +134,7 @@ They should be upgraded simultaneously with the core library, using the same met
## Logs
-In many cases, there is an informative stack trace in Prefect's [logs](/3.0/develop/logging/).
+In many cases, there is an informative stack trace in Prefect's [logs](/v3/develop/logging/).
**Read it carefully**, locate the source of the error, and try to identify the cause.
There are two types of logs:
@@ -174,13 +174,13 @@ curl -s -H "Authorization: Bearer $PREFECT_API_KEY" "https://api.prefect.cloud/a
**Users vs Service Accounts**
-[Service accounts](/3.0/manage/cloud/manage-users/service-accounts/) (sometimes referred to as bots),
+[Service accounts](/v3/manage/cloud/manage-users/service-accounts/) (sometimes referred to as bots),
represent non-human actors that interact with Prefect such as workers and CI/CD systems.
Each human that interacts with Prefect should be represented as a user.
User API keys start with `pnu_` and service account API keys start with `pnb_`.
-Actors can be members of [workspaces](/3.0/manage/cloud/workspaces/). An actor attempting an action in a
+Actors can be members of [workspaces](/v3/manage/cloud/workspaces/). An actor attempting an action in a
workspace they are not a member of results in a 404 response. Use the following command to check your actor's workspace memberships:
```bash
@@ -195,14 +195,14 @@ Append the following to the end of the command above to make the output more rea
Make sure your actor is a member of the workspace you are working in. Within a workspace,
-an actor has a [role](/3.0/manage/cloud/manage-users/manage-roles/) which grants them certain permissions.
+an actor has a [role](/v3/manage/cloud/manage-users/manage-roles/) which grants them certain permissions.
Insufficient permissions result in an error. For example, starting a worker with the **Viewer** role results in errors.
## Execution
The user can execute flows locally, or remotely by a worker. Local execution generally means that you, the user,
run your flow directly with a command like `python flow.py`. Remote execution generally means that a worker runs your flow
-through a [deployment](/3.0/deploy/infrastructure-examples/docker/) (optionally on different infrastructure).
+through a [deployment](/v3/deploy/infrastructure-examples/docker/) (optionally on different infrastructure).
With remote execution, the creation of your flow run happens separately from its execution.
Flow runs are assigned to a work pool and a work queue. For flow runs to execute, a worker must be subscribed
diff --git a/docs/3.0/manage/cloud/workspaces.mdx b/docs/v3/manage/cloud/workspaces.mdx
similarity index 100%
rename from docs/3.0/manage/cloud/workspaces.mdx
rename to docs/v3/manage/cloud/workspaces.mdx
diff --git a/docs/v3/manage/index.mdx b/docs/v3/manage/index.mdx
new file mode 100644
index 000000000000..7e7556ae1b27
--- /dev/null
+++ b/docs/v3/manage/index.mdx
@@ -0,0 +1,13 @@
+---
+title: Cloud and server overview
+sidebarTitle: Overview
+description: Learn how to interact with Prefect Cloud or self-host a Prefect server instance.
+---
+
+The **Use Prefect Cloud** section covers how to interact with Prefect Cloud.
+See the [Prefect Cloud overview](/v3/manage/cloud/) for a discussion of the pages in this section.
+
+
+- [Host Prefect server](/v3/manage/self-host/) explains how to self-host Prefect server.
+- [Configure settings and profiles](/v3/manage/self-host/) shows how to configure API interactions through environment variables or Prefect profiles.
+- [Manage run metadata in Python](/v3/manage/self-host/) demonstrates how to interact with the API in Python through the `PrefectClient`.
\ No newline at end of file
diff --git a/docs/3.0/manage/interact-with-api.mdx b/docs/v3/manage/interact-with-api.mdx
similarity index 100%
rename from docs/3.0/manage/interact-with-api.mdx
rename to docs/v3/manage/interact-with-api.mdx
diff --git a/docs/3.0/manage/self-host.mdx b/docs/v3/manage/self-host.mdx
similarity index 93%
rename from docs/3.0/manage/self-host.mdx
rename to docs/v3/manage/self-host.mdx
index 3636fdd22207..03a4d60e988f 100644
--- a/docs/3.0/manage/self-host.mdx
+++ b/docs/v3/manage/self-host.mdx
@@ -8,54 +8,11 @@ To self-host a Prefect server instance on Kubernetes, check out the prefect-serv
After installing Prefect, you have a Python SDK client that can communicate with
-either [Prefect Cloud](/3.0/manage/cloud/) or a self-hosted Prefect server, backed by a database and a UI.
+either [Prefect Cloud](/v3/manage/cloud/) or a self-hosted Prefect server, backed by a database and a UI.
Prefect Cloud and self-hosted Prefect server share a common set of capabilities.
Prefect Cloud provides the additional features required by organizations such as RBAC, Audit logs, and SSO.
-See the [Prefect Cloud overview](/3.0/manage/cloud/) for more information.
-
-## Prefect server installation notes
-
-Your self-hosted server must meet the following requirements and configuration settings.
-
-### SQLite
-
-SQLite is not packaged with the Prefect installation. But most systems already have SQLite installed, and it is typically bundled with Python.
-
-If you self-host a Prefect server instance with a SQLite database, certain Linux versions of SQLite can be problematic.
-Compatible versions include Ubuntu 22.04 LTS and Ubuntu 20.04 LTS.
-
-To confirm SQLite is installed, run:
-
-```bash
-sqlite3 --version
-```
-
-### Use a self-signed SSL certificate
-
-When using a self-signed SSL certificate, you need to configure your environment to trust the certificate.
-Add the certificate to your system bundle and point your tools to use that bundle by configuring the
-`SSL_CERT_FILE` environment variable.
-
-If the certificate is not part of your system bundle, set the
-`PREFECT_API_TLS_INSECURE_SKIP_VERIFY` to `True` to disable certificate verification altogether.
-
-
-Disabling certificate validation is insecure and only suggested as an option for testing.
-
-
-### Proxies
-
-Prefect supports communicating with proxies through environment variables.
-Whether you use a Prefect Cloud account or self-host a Prefect server instance, set `HTTPS_PROXY` and
-`SSL_CERT_FILE` in your environment.
-Then the underlying network libraries will route Prefect's requests appropriately.
-
-Alternatively, the Prefect library connects to the API through any proxies you have listed in the `HTTP_PROXY` or
-`ALL_PROXY` environment variables.
-You may also use the `NO_PROXY` environment variable to specify which hosts should not pass through the proxy.
-
-For more information about these environment variables, see the [cURL documentation](https://everything.curl.dev/usingcurl/proxies/env).
+See the [Prefect Cloud overview](/v3/manage/cloud/) for more information.
## Self-host a Prefect server
@@ -67,13 +24,13 @@ prefect server start
2. Open the URL for the Prefect server UI ([http://127.0.0.1:4200](http://127.0.0.1:4200) by default) in a browser.
-![Viewing the dashboard in the Prefect UI.](/3.0/img/ui/self-hosted-server-dashboard.png)
+![Viewing the dashboard in the Prefect UI.](/v3/img/ui/self-hosted-server-dashboard.png)
3. Shut down the Prefect server with ctrl + c in the terminal.
### Configure self-hosted Prefect server
-Go to your terminal session and run this command to set the API URL to point to a sefl-hosted Prefect server instance:
+Go to your terminal session and run this command to set the API URL to point to a self-hosted Prefect server instance:
```bash
prefect config set PREFECT_API_URL="http://127.0.0.1:4200/api"
@@ -83,10 +40,9 @@ prefect config set PREFECT_API_URL="http://127.0.0.1:4200/api"
You must set the API server address, `PREFECT_API_URL`, to use Prefect within a container, such as a Docker container.
-You can save the API server address in a [Prefect profile](/3.0/manage/settings-and-profiles/). Whenever that profile is
-active, the API endpoint is at that address.
-
-See [Profiles and configuration](/3.0/manage/settings-and-profiles/) for more information on profiles and configurable Prefect settings.
+You can save the API server address in a Prefect profile.
+Whenever that profile is active, the API endpoint is at that address.
+See [Profiles and configuration](/v3/develop/settings-and-profiles/) for more information on profiles and configurable Prefect settings.
## The Prefect database
@@ -246,3 +202,47 @@ prefect server database downgrade -y -r d20618ce678e
To downgrade all migrations, use the `base` revision.
See the [contributing docs](/contribute/dev-contribute) to learn how to create a database migration.
+
+
+## Prefect server installation notes
+
+Your self-hosted server must meet the following requirements and configuration settings.
+
+### SQLite
+
+SQLite is not packaged with the Prefect installation. But most systems already have SQLite installed, and it is typically bundled with Python.
+
+If you self-host a Prefect server instance with a SQLite database, certain Linux versions of SQLite can be problematic.
+Compatible versions include Ubuntu 22.04 LTS and Ubuntu 20.04 LTS.
+
+To confirm SQLite is installed, run:
+
+```bash
+sqlite3 --version
+```
+
+### Use a self-signed SSL certificate
+
+When using a self-signed SSL certificate, you need to configure your environment to trust the certificate.
+Add the certificate to your system bundle and point your tools to use that bundle by configuring the
+`SSL_CERT_FILE` environment variable.
+
+If the certificate is not part of your system bundle, set the
+`PREFECT_API_TLS_INSECURE_SKIP_VERIFY` to `True` to disable certificate verification altogether.
+
+
+Disabling certificate validation is insecure and only suggested as an option for testing.
+
+
+### Proxies
+
+Prefect supports communicating with proxies through environment variables.
+Whether you use a Prefect Cloud account or self-host a Prefect server instance, set `HTTPS_PROXY` and
+`SSL_CERT_FILE` in your environment.
+Then the underlying network libraries will route Prefect's requests appropriately.
+
+Alternatively, the Prefect library connects to the API through any proxies you have listed in the `HTTP_PROXY` or
+`ALL_PROXY` environment variables.
+You may also use the `NO_PROXY` environment variable to specify which hosts should not pass through the proxy.
+
+For more information about these environment variables, see the [cURL documentation](https://everything.curl.dev/usingcurl/proxies/env).
diff --git a/docs/3.0/resources/big-data.mdx b/docs/v3/resources/big-data.mdx
similarity index 94%
rename from docs/3.0/resources/big-data.mdx
rename to docs/v3/resources/big-data.mdx
index 81dfcedc8a08..72f5ea4cc857 100644
--- a/docs/3.0/resources/big-data.mdx
+++ b/docs/v3/resources/big-data.mdx
@@ -13,7 +13,7 @@ storage, including:
1. Saving data to disk within a flow rather than using results.
1. Caching task results to save time and compute.
1. Compressing results written to disk to save space.
-1. Using a [task runner](/3.0/develop/task-runners/) for parallelizable
+1. Using a [task runner](/v3/develop/task-runners/) for parallelizable
operations to save time.
### Remove task introspection
@@ -73,7 +73,7 @@ Save memory by writing results to disk.
In production, it's recommended to write results to a cloud provider storage such as AWS S3.
Prefect lets you use a storage block from a Prefect Cloud integration library such as
[prefect-aws](/integrations/prefect-aws) to save your configuration information.
-Learn more about [blocks](/3.0/develop/blocks/).
+Learn more about [blocks](/v3/develop/blocks/).
Install the relevant library, register the block type with the server, and create your block.
Then reference the block in your flow:
@@ -111,7 +111,7 @@ from cloud object storage.
Caching saves you time and compute by allowing you to avoid re-running tasks unnecessarily.
Note that caching requires task result persistence.
-Learn more about [caching](/3.0/develop/write-tasks/).
+Learn more about [caching](/v3/develop/write-tasks/).
### Compress results written to disk
@@ -129,5 +129,5 @@ Note that compression takes time to compress and decompress the data.
Prefect's task runners allow you to use the Dask and Ray Python libraries to run tasks in parallel,
distributed across multiple machines.
This can save you time and compute when operating on large data structures.
-See the [guide to working with Dask and Ray Task Runners](/3.0/develop/task-runners/)
+See the [guide to working with Dask and Ray Task Runners](/v3/develop/task-runners/)
for details.
diff --git a/docs/3.0/resources/cancel.mdx b/docs/v3/resources/cancel.mdx
similarity index 96%
rename from docs/3.0/resources/cancel.mdx
rename to docs/v3/resources/cancel.mdx
index 3991e36eeba5..4a94222542a4 100644
--- a/docs/3.0/resources/cancel.mdx
+++ b/docs/v3/resources/cancel.mdx
@@ -28,7 +28,7 @@ A monitoring process must be running to enforce the cancellation.
Inline nested flow runs (those created without `run_deployment`), cannot be cancelled without cancelling the parent flow run.
To cancel a nested flow run independent of its parent flow run, we recommend deploying it separately
-and starting it using the [run_deployment](/3.0/deploy/index)
+and starting it using the [run_deployment](/v3/deploy/index)
function.
@@ -80,7 +80,7 @@ prefect flow-run cancel 'a55a4804-9e3c-4042-8b59-b3b6b7618736'
Navigate to the flow run's detail page and click `Cancel` in the upper right corner.
-![Prefect UI](/3.0/img/ui/flow-run-cancellation-ui.png)
+![Prefect UI](/v3/img/ui/flow-run-cancellation-ui.png)
## Timeouts
diff --git a/docs/3.0/resources/cli-shell.mdx b/docs/v3/resources/cli-shell.mdx
similarity index 90%
rename from docs/3.0/resources/cli-shell.mdx
rename to docs/v3/resources/cli-shell.mdx
index cab01692b80c..acc3165bc8aa 100644
--- a/docs/3.0/resources/cli-shell.mdx
+++ b/docs/v3/resources/cli-shell.mdx
@@ -12,10 +12,10 @@ and schedule shell commands as Prefect flows, including how to:
## Prerequisites
Before you begin, ensure you have:
-- A basic understanding of Prefect flows. Start with the [Getting started](/3.0/get-started/quickstart/)
+- A basic understanding of Prefect flows. Start with the [Getting started](/v3/get-started/quickstart/)
guide if necessary.
- A recent version of Prefect installed in your command line environment.
-Follow these [instructions](/3.0/get-started/install/) if you have any issues.
+Follow these [instructions](/v3/get-started/install/) if you have any issues.
## The `watch` command
The `watch` command wraps any shell command in a Prefect flow for instant execution.
@@ -40,7 +40,7 @@ shell tasks.
## Deploy with `serve`
To run shell commands on a schedule, the `serve` command creates a Prefect
-[deployment](/3.0/deploy/infrastructure-examples/docker/) for regular execution.
+[deployment](/v3/deploy/infrastructure-examples/docker/) for regular execution.
This is a quick way to create a deployment served by Prefect.
### Example usage
@@ -62,5 +62,5 @@ ensuring critical updates are generated and available on time.
- **Centralized workflow management:** Manage and monitor your scheduled shell commands
inside Prefect for a unified workflow overview.
- **Configurable execution:** Customize execution frequency,
-[concurrency limits](/3.0/develop/global-concurrency-limits/),
+[concurrency limits](/v3/develop/global-concurrency-limits/),
and other parameters to suit your project's needs and resources.
diff --git a/docs/3.0/resources/daemonize-processes.mdx b/docs/v3/resources/daemonize-processes.mdx
similarity index 97%
rename from docs/3.0/resources/daemonize-processes.mdx
rename to docs/v3/resources/daemonize-processes.mdx
index 5d2d064e8505..e3eb4978d3c3 100644
--- a/docs/3.0/resources/daemonize-processes.mdx
+++ b/docs/v3/resources/daemonize-processes.mdx
@@ -27,8 +27,8 @@ AWS Linux image, you can install Python and pip with `sudo yum install -y python
A systemd service is ideal for running a long-lived process on a Linux VM or physical Linux server.
You will use systemd and learn how to automatically start a
-[Prefect worker](/3.0/deploy/infrastructure-concepts/workers/) or
-long-lived [`serve` process](/3.0/develop/write-flows/#serving-a-flow) when Linux starts.
+[Prefect worker](/v3/deploy/infrastructure-concepts/workers/) or
+long-lived [`serve` process](/v3/develop/write-flows/#serving-a-flow) when Linux starts.
This approach provides resilience by automatically restarting the process if it crashes.
### Step 1: Add a user
diff --git a/docs/3.0/resources/recipes.mdx b/docs/v3/resources/recipes.mdx
similarity index 100%
rename from docs/3.0/resources/recipes.mdx
rename to docs/v3/resources/recipes.mdx
diff --git a/docs/3.0/resources/secrets.mdx b/docs/v3/resources/secrets.mdx
similarity index 97%
rename from docs/3.0/resources/secrets.mdx
rename to docs/v3/resources/secrets.mdx
index 1cdaf91dd418..1e8d60fa5d70 100644
--- a/docs/3.0/resources/secrets.mdx
+++ b/docs/v3/resources/secrets.mdx
@@ -15,7 +15,7 @@ credentials.
## Prerequisites
-1. Prefect [installed](/3.0/get-started/install)
+1. Prefect [installed](/v3/get-started/install)
1. CLI authenticated to your [Prefect Cloud](https://app.prefect.cloud) account
1. [Snowflake account](https://www.snowflake.com/)
1. [AWS account](https://aws.amazon.com/)
@@ -178,7 +178,7 @@ See [`prefect-snowflake`](/integrations/prefect-snowflake) for more examples of
## Next steps
-Now you can turn your flow into a [deployment](/3.0/deploy/infrastructure-examples/docker/) so you and your team
+Now you can turn your flow into a [deployment](/v3/deploy/infrastructure-examples/docker/) so you and your team
can run it remotely on a schedule, in response to an event, or manually.
Make sure to specify the `prefect-aws` and `prefect-snowflake` dependencies in your work pool or deployment
diff --git a/docs/3.0/resources/upgrade-agents-to-workers.mdx b/docs/v3/resources/upgrade-agents-to-workers.mdx
similarity index 81%
rename from docs/3.0/resources/upgrade-agents-to-workers.mdx
rename to docs/v3/resources/upgrade-agents-to-workers.mdx
index 4ad1d5738595..391bed8beea9 100644
--- a/docs/3.0/resources/upgrade-agents-to-workers.mdx
+++ b/docs/v3/resources/upgrade-agents-to-workers.mdx
@@ -9,11 +9,11 @@ by simplifying the specification of each flow's infrastructure and runtime envir
This guide is for users who are on `prefect<3.0` who are upgrading from agents to workers.
If you are new to Prefect, we recommend starting with the
-[Prefect Quickstart](/3.0/get-started/quickstart/).
+[Prefect Quickstart](/v3/get-started/quickstart/).
## About workers and agents
-A [worker](/3.0/deploy/infrastructure-concepts/workers/) is the fusion of an
+A [worker](/v3/deploy/infrastructure-concepts/workers/) is the fusion of an
agent with an infrastructure block.
Like agents, workers poll a work pool for flow runs that are scheduled to start.
Like infrastructure blocks, workers are typed. They work with only one kind of infrastructure,
@@ -22,7 +22,7 @@ and they specify the default configuration for jobs submitted to that infrastruc
Accordingly, workers are not a drop-in replacement for agents. **Using workers requires
deploying flows differently.** In particular, deploying a flow with a worker does not involve
specifying an infrastructure block. Instead, infrastructure configuration is specified on the
-[work pool](/3.0/deploy/infrastructure-concepts/work-pools/) and passed to each worker that polls work
+[work pool](/v3/deploy/infrastructure-concepts/work-pools/) and passed to each worker that polls work
from that pool.
## Upgrade enhancements
@@ -36,47 +36,47 @@ and when it last polled.
### Work pools
- Work pools allow greater customization and governance of infrastructure parameters for deployments
-through their [base job template](/3.0/deploy/infrastructure-concepts/work-pools/#base-job-template).
-- Prefect Cloud [push work pools](/3.0/deploy/infrastructure-examples/serverless/) enable flow
+through their [base job template](/v3/deploy/infrastructure-concepts/work-pools/#base-job-template).
+- Prefect Cloud [push work pools](/v3/deploy/infrastructure-examples/serverless/) enable flow
execution in your cloud provider environment without the need to host a worker.
-- Prefect Cloud [managed work pools](/3.0/deploy/infrastructure-examples/managed/) allow you to run flows on
+- Prefect Cloud [managed work pools](/v3/deploy/infrastructure-examples/managed/) allow you to run flows on
Prefect's infrastructure, without the need to host a worker or configure cloud provider infrastructure.
### Improved deployment interfaces
- The Python deployment experience with `.deploy()` or the alternative deployment experience with
`prefect.yaml` are more flexible and easier to use than block and agent-based deployments.
-- Both options allow you to [deploy multiple flows](/3.0/deploy/infrastructure-concepts/prefect-yaml/#work-with-multiple-deployments-with-prefect-yaml)
+- Both options allow you to [deploy multiple flows](/v3/deploy/infrastructure-concepts/prefect-yaml/#work-with-multiple-deployments-with-prefect-yaml)
with a single command.
- Both options allow you to build Docker images for your flows to create portable execution environments.
-- The YAML-based API supports [templating](/3.0/deploy/infrastructure-concepts/prefect-yaml/#templating-options)
-to enable [dryer deployment definitions](/3.0/deploy/infrastructure-examples/docker/#reusing-configuration-across-deployments).
+- The YAML-based API supports [templating](/v3/deploy/infrastructure-concepts/prefect-yaml/#templating-options)
+to enable [dryer deployment definitions](/v3/deploy/infrastructure-examples/docker/#reusing-configuration-across-deployments).
## Upgrade changes
1. **Deployment CLI and Python SDK:**
- `prefect deployment build `/`prefect deployment apply` --> [`prefect deploy`](/3.0/deploy/infrastructure-concepts/prefect-yaml/#deployment-declaration-reference)
+ `prefect deployment build `/`prefect deployment apply` --> [`prefect deploy`](/v3/deploy/infrastructure-concepts/prefect-yaml/#deployment-declaration-reference)
- Prefect now automatically detects flows in your repo and provides a [wizard](/3.0/#step-5-deploy-the-flow)
+ Prefect now automatically detects flows in your repo and provides a [wizard](/v3/#step-5-deploy-the-flow)
to guide you through setting required attributes for your deployments.
`Deployment.build_from_flow` --> [`flow.deploy`](https://prefect-python-sdk-docs.netlify.app/prefect/flows/#prefect.flows.Flow.deploy)
2. **Configuring remote flow code storage:**
- storage blocks --> [pull action](/3.0/deploy/infrastructure-concepts/prefect-yaml/#the-pull-action)
+ storage blocks --> [pull action](/v3/deploy/infrastructure-concepts/prefect-yaml/#the-pull-action)
When using the YAML-based deployment API, you can configure a pull action in your `prefect.yaml`
file to specify how to retrieve flow code for your deployments. You can use configuration from your
- existing storage blocks to define your pull action [through templating](/3.0/deploy/infrastructure-concepts/prefect-yaml/#templating-options).
+ existing storage blocks to define your pull action [through templating](/v3/deploy/infrastructure-concepts/prefect-yaml/#templating-options).
When using the Python deployment API, you can pass any storage block to the `flow.deploy` method to
specify how to retrieve flow code for your deployment.
3. **Configuring flow run infrastructure:**
- infrastructure blocks --> [typed work pool](/3.0/deploy/infrastructure-concepts/workers/#worker-types)
+ infrastructure blocks --> [typed work pool](/v3/deploy/infrastructure-concepts/workers/#worker-types)
Default infrastructure config is now set on the typed work pool, and can be overwritten by
individual deployments.
@@ -84,8 +84,8 @@ to enable [dryer deployment definitions](/3.0/deploy/infrastructure-examples/doc
4. **Managing multiple deployments:**
Create and/or update many deployments at once through a
- [`prefect.yaml`](/3.0/deploy/infrastructure-concepts/prefect-yaml/#work-with-multiple-deployments-with-prefect-yaml)
- file or use the [`deploy`](/3.0/deploy/infrastructure-examples/docker)
+ [`prefect.yaml`](/v3/deploy/infrastructure-concepts/prefect-yaml/#work-with-multiple-deployments-with-prefect-yaml)
+ file or use the [`deploy`](/v3/deploy/infrastructure-examples/docker)
function.
## What's similar
@@ -94,9 +94,9 @@ to enable [dryer deployment definitions](/3.0/deploy/infrastructure-examples/doc
- Infrastructure blocks have configuration fields similar to typed work pools.
- Deployment-level infrastructure overrides operate in much the same way.
- `infra_override` -> [`job_variable`](/3.0/deploy/infrastructure-concepts/prefect-yaml/#work-pool-fields)
+ `infra_override` -> [`job_variable`](/v3/deploy/infrastructure-concepts/prefect-yaml/#work-pool-fields)
-- The process for starting an agent and [starting a worker](/3.0/deploy/infrastructure-concepts/workers/#start-a-worker)
+- The process for starting an agent and [starting a worker](/v3/deploy/infrastructure-concepts/workers/#start-a-worker)
in your environment are virtually identical.
`prefect agent start --pool ` --> `prefect worker start --pool `
@@ -112,7 +112,7 @@ to host workers in your cluster.
If you have existing deployments that use infrastructure blocks, you can quickly upgrade them to
be compatible with workers by following these steps:
-1. **[Create a work pool](/3.0/deploy/infrastructure-concepts/work-pools/#work-pool-configuration)**
+1. **[Create a work pool](/v3/deploy/infrastructure-concepts/work-pools/#work-pool-configuration)**
This new work pool replaces your infrastructure block.
@@ -143,7 +143,7 @@ Running this script creates a work pool named 'my-k8s-job' with the same configu
**Serving flows**
If you are using a `Process` infrastructure block and a `LocalFilesystem` storage block
-(or aren't using an infrastructure and storage block at all), you can use [`flow.serve`](/3.0/deploy/index)
+(or aren't using an infrastructure and storage block at all), you can use [`flow.serve`](/v3/deploy/index)
to create a deployment without specifying a work pool name or start a worker.
This is a quick way to create a deployment for a flow and manage your
@@ -151,7 +151,7 @@ deployments if you don't need the dynamic infrastructure creation or configurati
by workers.
-2. **[Start a worker](/3.0/deploy/infrastructure-concepts/workers/#start-a-worker)**
+2. **[Start a worker](/v3/deploy/infrastructure-concepts/workers/#start-a-worker)**
This worker replaces your agent and polls your new work pool for flow runs to execute.
@@ -181,7 +181,7 @@ but here are some possible changes you may need:
- If you've used the `.publish_as_work_pool` method on your infrastructure block, use the
name of the created work pool.
- Replace `infra_overrides` with `job_variables`.
-- Replace `storage` with a call to [`flow.from_source`](/3.0/deploy/index).
+- Replace `storage` with a call to [`flow.from_source`](/v3/deploy/index).
- `flow.from_source` loads your flow from a remote storage location and makes it deployable.
You can pass your existing storage block to the `source` argument of `flow.from_source`.
@@ -239,7 +239,7 @@ prefect worker start --pool local
```
-If you'd like to immediately serve this flow as a deployment without running a worker or using work pools, you can [use `flow.serve`](/3.0/deploy/run-flows-in-local-processes/).
+If you'd like to immediately serve this flow as a deployment without running a worker or using work pools, you can [use `flow.serve`](/v3/deploy/run-flows-in-local-processes/).
#### Deploying using a storage block
@@ -248,7 +248,7 @@ If you currently use a storage block to load your flow code but no infrastructur
```python
from prefect import flow
-from prefect.storage import GitHub
+from prefect.filesystems import GitHub
@flow(log_prints=True)
@@ -270,6 +270,7 @@ create a deployment:
```python example.py
from prefect import flow
+from prefect.blocks.system import Secret
from prefect.runner.storage import GitRepository
@flow(log_prints=True)
@@ -280,7 +281,7 @@ if __name__ == "__main__":
flow.from_source(
source=GitRepository(
url="https://github.com/me/myrepo.git",
- credentials={"username": "oauth2", "access_token": "my-access-token"},
+ credentials={"username": "oauth2", "access_token": Secret.load("my-github-pat")},
),
entrypoint="example.py:my_flow"
).deploy(
@@ -299,7 +300,7 @@ For the code below, you need to create a work pool from your infrastructure bloc
```python example.py
from prefect import flow
from prefect.deployments import Deployment
-from prefect.filesystems import GitHub
+from prefect.filesystems import GitHub # this block class no longer exists
from prefect.infrastructure.kubernetes import KubernetesJob
@@ -307,12 +308,13 @@ from prefect.infrastructure.kubernetes import KubernetesJob
def my_flow(name: str = "world"):
print(f"Hello {name}! I'm a flow from a GitHub repo!")
+repo = GitHub.load("demo-repo")
if __name__ == "__main__":
Deployment.build_from_flow(
my_flow,
name="my-deployment",
- storage=GitHub.load("demo-repo"),
+ storage=repo,
entrypoint="example.py:my_flow",
infrastructure=KubernetesJob.load("my-k8s-job"),
infra_overrides=dict(pull_policy="Never"),
@@ -324,11 +326,10 @@ The equivalent deployment code using `flow.deploy` should look like this:
```python example.py
from prefect import flow
-from prefect.storage import GitHub
if __name__ == "__main__":
flow.from_source(
- source=GitHub.load("demo-repo"),
+ source="https://github.com/me/myrepo.git",
entrypoint="example.py:my_flow"
).deploy(
name="my-deployment",
@@ -384,7 +385,7 @@ flow is executed.
With agents, you may have multiple `deployment.yaml` files. But under worker deployment
patterns, each repo has a single `prefect.yaml` file located at the **root** of the repo
-that contains [deployment configuration](/3.0/deploy/infrastructure-concepts/prefect-yaml/#work-with-multiple-deployments-with-prefect-yaml)
+that contains [deployment configuration](/v3/deploy/infrastructure-concepts/prefect-yaml/#work-with-multiple-deployments-with-prefect-yaml)
for all flows in that repo.
@@ -401,12 +402,12 @@ This starts a wizard that guides you through setting up your deployment.
**For step 4, select `y` on the last prompt to save the configuration for the deployment.**
Saving the configuration for your deployment results in a `prefect.yaml` file populated
-with your first deployment. You can use this YAML file to edit and [define multiple deployments](/3.0/deploy/infrastructure-concepts/prefect-yaml/#work-with-multiple-deployments-with-prefect-yaml)
+with your first deployment. You can use this YAML file to edit and [define multiple deployments](/v3/deploy/infrastructure-concepts/prefect-yaml/#work-with-multiple-deployments-with-prefect-yaml)
for this repo.
-You can add more [deployments](/3.0/deploy/infrastructure-concepts/prefect-yaml/#deployment-declaration-reference)
+You can add more [deployments](/v3/deploy/infrastructure-concepts/prefect-yaml/#deployment-declaration-reference)
to the `deployments` list in your `prefect.yaml` file and/or by continuing to use the deployment
creation wizard.
-For more information on deployments, check out our [in-depth guide for deploying flows to work pools](/3.0/deploy/infrastructure-examples/docker/).
\ No newline at end of file
+For more information on deployments, check out our [in-depth guide for deploying flows to work pools](/v3/deploy/infrastructure-examples/docker/).
\ No newline at end of file
diff --git a/docs/3.0/resources/upgrade-to-prefect-3.mdx b/docs/v3/resources/upgrade-to-prefect-3.mdx
similarity index 98%
rename from docs/3.0/resources/upgrade-to-prefect-3.mdx
rename to docs/v3/resources/upgrade-to-prefect-3.mdx
index 9db6d2b92d87..375a7c09ea7e 100644
--- a/docs/3.0/resources/upgrade-to-prefect-3.mdx
+++ b/docs/v3/resources/upgrade-to-prefect-3.mdx
@@ -7,13 +7,13 @@ Prefect 3.0 introduces a number of enhancements to the OSS product: a new events
The majority of these enhancements maintain compatibility with most Prefect 2.0 workflows, but there are a few caveats that you may need to adjust for.
-To learn more about the enhanced performance and new features, see [What's new in Prefect 3.0](/3.0/resources/whats-new-prefect-3).
+To learn more about the enhanced performance and new features, see [What's new in Prefect 3.0](/v3/resources/whats-new-prefect-3).
For the majority of users, upgrading to Prefect 3.0 will be a seamless process that requires few or no code changes.
This guide highlights key changes that you may need to consider when upgrading.
-**Prefect 2.0** refers to the 2.x lineage of the open source prefect package, and **Prefect 3.0** refers exclusively to the 3.x lineage of the prefect package. Neither version is strictly tied to any aspect of Prefect's commercial product, [Prefect Cloud](/3.0/manage/cloud).
+**Prefect 2.0** refers to the 2.x lineage of the open source prefect package, and **Prefect 3.0** refers exclusively to the 3.x lineage of the prefect package. Neither version is strictly tied to any aspect of Prefect's commercial product, [Prefect Cloud](/v3/manage/cloud).
## Quickstart
@@ -177,7 +177,7 @@ Prefect 3.0 introduces a powerful idempotency engine. By default, tasks in a flo
This change affects you if: you're using agents from an early version of Prefect 2.0.
-In Prefect 2.0, agents were deprecated in favor of next-generation workers. Workers are now standard in Prefect 3. For detailed information on upgrading from agents to workers, please refer to our [upgrade guide](https://docs-3.prefect.io/3.0/resources/upgrade-agents-to-workers).
+In Prefect 2.0, agents were deprecated in favor of next-generation workers. Workers are now standard in Prefect 3. For detailed information on upgrading from agents to workers, please refer to our [upgrade guide](https://docs-3.prefect.io/v3/resources/upgrade-agents-to-workers).
### Resolving common gotchas
diff --git a/docs/3.0/resources/visualize-flow-structure.mdx b/docs/v3/resources/visualize-flow-structure.mdx
similarity index 92%
rename from docs/3.0/resources/visualize-flow-structure.mdx
rename to docs/v3/resources/visualize-flow-structure.mdx
index 06a06a4ee321..2b444f07f92c 100644
--- a/docs/3.0/resources/visualize-flow-structure.mdx
+++ b/docs/v3/resources/visualize-flow-structure.mdx
@@ -45,7 +45,7 @@ if __name__ == "__main__":
hello_world.visualize()
```
-![A simple flow visualized with the .visualize() method](/3.0/img/orchestration/hello-flow-viz.png)
+![A simple flow visualized with the .visualize() method](/v3/img/orchestration/hello-flow-viz.png)
Prefect cannot automatically produce a schematic for dynamic workflows, such as those with loops or
if/else control flow.
@@ -72,4 +72,4 @@ if __name__ == "__main__":
viz_return_value_tracked.visualize()
```
-![A flow with return values visualized with the .visualize() method](/3.0/img/orchestration/viz-return-value-tracked.png)
\ No newline at end of file
+![A flow with return values visualized with the .visualize() method](/v3/img/orchestration/viz-return-value-tracked.png)
\ No newline at end of file
diff --git a/docs/3.0/resources/whats-new-prefect-3.mdx b/docs/v3/resources/whats-new-prefect-3.mdx
similarity index 97%
rename from docs/3.0/resources/whats-new-prefect-3.mdx
rename to docs/v3/resources/whats-new-prefect-3.mdx
index 1e4c90d0d02f..7f73a289165f 100644
--- a/docs/3.0/resources/whats-new-prefect-3.mdx
+++ b/docs/v3/resources/whats-new-prefect-3.mdx
@@ -5,10 +5,10 @@ sidebarTitle: What's new
Prefect 3.0 introduces a number of enhancements to the OSS product: a new events & automations backend for event-driven workflows and observability, improved runtime performance, autonomous task execution and a streamlined caching layer based on transactional semantics.
-Most Prefect 2.0 users can upgrade without changes to their existing workflows. Please review the [upgrade guide](/3.0/resources/upgrade-to-prefect-3) for more information.
+Most Prefect 2.0 users can upgrade without changes to their existing workflows. Please review the [upgrade guide](/v3/resources/upgrade-to-prefect-3) for more information.
-**Prefect 2.0** refers to the 2.x lineage of the open source prefect package, and **Prefect 3.0** refers exclusively to the 3.x lineage of the prefect package. Neither version is strictly tied to any aspect of Prefect's commercial product, [Prefect Cloud](/3.0/manage/cloud).
+**Prefect 2.0** refers to the 2.x lineage of the open source prefect package, and **Prefect 3.0** refers exclusively to the 3.x lineage of the prefect package. Neither version is strictly tied to any aspect of Prefect's commercial product, [Prefect Cloud](/v3/manage/cloud).
## Open source events and automation system
diff --git a/docs/v3/tutorials/pipelines.mdx b/docs/v3/tutorials/pipelines.mdx
new file mode 100644
index 000000000000..65ef4a58eb24
--- /dev/null
+++ b/docs/v3/tutorials/pipelines.mdx
@@ -0,0 +1,421 @@
+---
+title: Build a data pipeline
+description: Learn how to build resilient and performant data pipelines with Prefect.
+---
+
+In the [Quickstart](/v3/get-started/quickstart), you created a Prefect flow to get stars for a list of GitHub repositories.
+And in [Schedule a flow](/v3/tutorials/schedule), you learned how to schedule runs of that flow on remote infrastructure.
+
+In this tutorial, you'll learn how to turn this flow into a resilient and performant data pipeline.
+The real world is messy, and Prefect is designed to handle that messiness.
+
+- Your API requests can fail.
+- Your API requests run too slowly.
+- Your API requests run too quickly and you get rate limited.
+- You waste time and money running the same tasks multiple times.
+
+Instead of solving these problems in the business logic itself, use Prefect's built-in features to handle them.
+
+## Retry on failure
+
+The first improvement you can make is to add retries to your flow.
+Whenever an HTTP request fails, you can retry it a few times before giving up.
+
+```python
+from prefect import task
+
+@task(retries=3)
+def fetch_stats(github_repo: str):
+ """Task 1: Fetch the statistics for a GitHub repo"""
+
+ api_response = httpx.get(f"https://api.github.com/repos/{github_repo}")
+ api_response.raise_for_status() # Force a retry if you don't get a 2xx status code
+ return api_response.json()
+```
+
+
+Run the following code to see retries in action:
+
+```python
+import httpx
+
+from prefect import flow, task # Prefect flow and task decorators
+
+
+@flow(log_prints=True)
+def show_stars(github_repos: list[str]):
+ """Flow: Show the number of stars that GitHub repos have"""
+
+ for repo in github_repos:
+ # Call Task 1
+ repo_stats = fetch_stats(repo)
+
+ # Call Task 2
+ stars = get_stars(repo_stats)
+
+ # Print the result
+ print(f"{repo}: {stars} stars")
+
+
+@task(retries=3)
+def fetch_stats(github_repo: str):
+ """Task 1: Fetch the statistics for a GitHub repo"""
+
+ api_response = httpx.get(f"https://api.github.com/repos/{github_repo}")
+ api_response.raise_for_status() # Force a retry if you don't get a 2xx status code
+ return api_response.json()
+
+
+@task
+def get_stars(repo_stats: dict):
+ """Task 2: Get the number of stars from GitHub repo statistics"""
+
+ return repo_stats['stargazers_count']
+
+
+# Run the flow
+if __name__ == "__main__":
+ show_stars([
+ "PrefectHQ/prefect",
+ "pydantic/pydantic",
+ "huggingface/transformers"
+ ])
+```
+
+
+## Concurrent execution of slow tasks
+
+If individual API requests are slow, you can speed them up in aggregate by making multiple requests concurrently.
+When you call the `submit` method on a task, the task is submitted to a task runner for execution.
+
+```python
+from prefect import flow
+
+@flow(log_prints=True)
+def show_stars(github_repos: list[str]):
+ """Flow: Show the number of stars that GitHub repos have"""
+
+ # Task 1: Make HTTP requests concurrently
+ repo_stats = []
+ for repo in github_repos:
+ repo_stats.append({
+ 'repo': repo,
+ 'task': fetch_stats.submit(repo) # Submit each task to a task runner
+ })
+
+ # Task 2: Once each concurrent task completes, show the results
+ for repo in repo_stats:
+ repo_name = repo['repo']
+ stars = get_stars(repo['task'].result()) # Block until the task has completed
+ print(f"{repo_name}: {stars} stars")
+```
+
+
+Run the following code to see concurrent tasks in action:
+
+```python
+import httpx
+
+from prefect import flow, task # Prefect flow and task decorators
+
+
+@flow(log_prints=True)
+def show_stars(github_repos: list[str]):
+ """Flow: Show the number of stars that GitHub repos have"""
+
+ # Task 1: Make HTTP requests concurrently
+ repo_stats = []
+ for repo in github_repos:
+ repo_stats.append({
+ 'repo': repo,
+ 'task': fetch_stats.submit(repo) # Submit each task to a task runner
+ })
+
+ # Task 2: Once each concurrent task completes, show the results
+ for repo in repo_stats:
+ repo_name = repo['repo']
+ stars = get_stars(repo['task'].result()) # Block until the task has completed
+ print(f"{repo_name}: {stars} stars")
+
+
+@task
+def fetch_stats(github_repo: str):
+ """Task 1: Fetch the statistics for a GitHub repo"""
+
+ return httpx.get(f"https://api.github.com/repos/{github_repo}").json()
+
+
+@task
+def get_stars(repo_stats: dict):
+ """Task 2: Get the number of stars from GitHub repo statistics"""
+
+ return repo_stats['stargazers_count']
+
+
+# Run the flow
+if __name__ == "__main__":
+ show_stars([
+ "PrefectHQ/prefect",
+ "pydantic/pydantic",
+ "huggingface/transformers"
+ ])
+```
+
+
+## Avoid getting rate limited
+
+One consequence of running tasks concurrently is that you're more likely to hit the rate limits of whatever API you're using.
+To avoid this, use Prefect to set a global concurrency limit.
+
+```bash
+# GitHub has a rate limit of 60 unauthenticated requests per hour (~0.016 requests per second)
+prefect gcl create github-api --limit 60 --slot-decay-per-second 0.016
+```
+
+Now, you can use this global concurrency limit in your code:
+
+```python
+from prefect import flow
+from prefect.concurrency.sync import rate_limit
+
+@flow(log_prints=True)
+def show_stars(github_repos: list[str]):
+ """Flow: Show the number of stars that GitHub repos have"""
+
+ repo_stats = []
+ for repo in github_repos:
+ # Apply the concurrency limit to this loop
+ rate_limit("github-api")
+
+ # Call Task 1
+ repo_stats.append({
+ 'repo': repo,
+ 'task': fetch_stats.submit(repo)
+ })
+
+ # ...
+```
+
+
+Run the following code to see concurrency limits in action:
+
+```python
+import httpx
+
+from prefect import flow, task # Prefect flow and task decorators
+from prefect.concurrency.sync import rate_limit
+
+
+@flow(log_prints=True)
+def show_stars(github_repos: list[str]):
+ """Flow: Show the number of stars that GitHub repos have"""
+
+ repo_stats = []
+ for repo in github_repos:
+ # Apply the concurrency limit to this loop
+ rate_limit("github-api")
+
+ # Call Task 1
+ repo_stats.append({
+ 'repo': repo,
+ 'task': fetch_stats.submit(repo)
+ })
+
+ # Call Task 2
+ stars = get_stars(repo_stats)
+
+ # Print the result
+ print(f"{repo}: {stars} stars")
+
+
+@task
+def fetch_stats(github_repo: str):
+ """Task 1: Fetch the statistics for a GitHub repo"""
+
+ return httpx.get(f"https://api.github.com/repos/{github_repo}").json()
+
+
+@task
+def get_stars(repo_stats: dict):
+ """Task 2: Get the number of stars from GitHub repo statistics"""
+
+ return repo_stats['stargazers_count']
+
+
+# Run the flow
+if __name__ == "__main__":
+ show_stars([
+ "PrefectHQ/prefect",
+ "pydantic/pydantic",
+ "huggingface/transformers"
+ ])
+```
+
+
+## Cache the results of a task
+
+For efficiency, you can skip tasks that have already run.
+For example, if you don't want to fetch the number of stars for a given repository more than once per day, you can cache those results for a day.
+
+```python
+from datetime import timedelta
+
+from prefect import task
+from prefect.cache_policies import INPUTS
+
+@task(cache_policy=INPUTS, cache_expiration=timedelta(days=1))
+def fetch_stats(github_repo: str):
+ """Task 1: Fetch the statistics for a GitHub repo"""
+ # ...
+```
+
+
+Run the following code to see caching in action:
+
+```python
+from datetime import timedelta
+import httpx
+
+from prefect import flow, task # Prefect flow and task decorators
+from prefect.cache_policies import INPUTS
+
+
+@flow(log_prints=True)
+def show_stars(github_repos: list[str]):
+ """Flow: Show the number of stars that GitHub repos have"""
+
+ for repo in github_repos:
+ # Call Task 1
+ repo_stats = fetch_stats(repo)
+
+ # Call Task 2
+ stars = get_stars(repo_stats)
+
+ # Print the result
+ print(f"{repo}: {stars} stars")
+
+
+@task(cache_policy=INPUTS, cache_expiration=timedelta(days=1))
+def fetch_stats(github_repo: str):
+ """Task 1: Fetch the statistics for a GitHub repo"""
+
+ return httpx.get(f"https://api.github.com/repos/{github_repo}").json()
+
+
+@task
+def get_stars(repo_stats: dict):
+ """Task 2: Get the number of stars from GitHub repo statistics"""
+
+ return repo_stats['stargazers_count']
+
+
+# Run the flow
+if __name__ == "__main__":
+ show_stars([
+ "PrefectHQ/prefect",
+ "pydantic/pydantic",
+ "huggingface/transformers"
+ ])
+```
+
+
+## Run your improved flow
+
+This is what your flow looks like after applying all of these improvements:
+
+```python my_data_pipeline.py
+from datetime import timedelta
+import httpx
+
+from prefect import flow, task
+from prefect.cache_policies import INPUTS
+from prefect.concurrency.sync import rate_limit
+
+
+@flow(log_prints=True)
+def show_stars(github_repos: list[str]):
+ """Flow: Show the number of stars that GitHub repos have"""
+
+ # Task 1: Make HTTP requests concurrently while respecting concurrency limits
+ repo_stats = []
+ for repo in github_repos:
+ rate_limit("github-api")
+ repo_stats.append({
+ 'repo': repo,
+ 'task': fetch_stats.submit(repo) # Submit each task to a task runner
+ })
+
+ # Task 2: Once each concurrent task completes, show the results
+ for repo in repo_stats:
+ repo_name = repo['repo']
+ stars = get_stars(repo['task'].result()) # Block until the task has completed
+ print(f"{repo_name}: {stars} stars")
+
+
+@task(retries=3, cache_policy=INPUTS, cache_expiration=timedelta(days=1))
+def fetch_stats(github_repo: str):
+ """Task 1: Fetch the statistics for a GitHub repo"""
+
+ api_response = httpx.get(f"https://api.github.com/repos/{github_repo}")
+ api_response.raise_for_status() # Force a retry if you don't get a 2xx status code
+ return api_response.json()
+
+
+@task
+def get_stars(repo_stats: dict):
+ """Task 2: Get the number of stars from GitHub repo statistics"""
+
+ return repo_stats['stargazers_count']
+
+
+# Run the flow
+if __name__ == "__main__":
+ show_stars([
+ "PrefectHQ/prefect",
+ "pydantic/pydantic",
+ "huggingface/transformers"
+ ])
+```
+
+Run your flow twice: once to run the tasks and cache the result, again to retrieve the results from the cache.
+
+```bash
+# Run the tasks and cache the results
+python my_data_pipeline.py
+
+# Retrieve the cached results
+python my_data_pipeline.py
+```
+
+The terminal output from the second flow run should look like this:
+
+```bash
+09:08:12.265 | INFO | prefect.engine - Created flow run 'laughing-nightingale' for flow 'show-stars'
+09:08:12.266 | INFO | prefect.engine - View at http://127.0.0.1:4200/runs/flow-run/541864e8-12f7-4890-9397-b2ed361f6b20
+09:08:12.322 | INFO | Task run 'fetch_stats-0c9' - Finished in state Cached(type=COMPLETED)
+09:08:12.359 | INFO | Task run 'fetch_stats-e89' - Finished in state Cached(type=COMPLETED)
+09:08:12.360 | INFO | Task run 'get_stars-b51' - Finished in state Completed()
+09:08:12.361 | INFO | Flow run 'laughing-nightingale' - PrefectHQ/prefect: 17320 stars
+09:08:12.372 | INFO | Task run 'fetch_stats-8ef' - Finished in state Cached(type=COMPLETED)
+09:08:12.374 | INFO | Task run 'get_stars-08d' - Finished in state Completed()
+09:08:12.374 | INFO | Flow run 'laughing-nightingale' - pydantic/pydantic: 186319 stars
+09:08:12.387 | INFO | Task run 'get_stars-2af' - Finished in state Completed()
+09:08:12.387 | INFO | Flow run 'laughing-nightingale' - huggingface/transformers: 134849 stars
+09:08:12.404 | INFO | Flow run 'laughing-nightingale' - Finished in state Completed()
+```
+
+## Next steps
+
+In this tutorial, you built a resilient and performant data pipeline which uses the following techniques:
+
+- [Retries](/v3/develop/write-tasks#retries) to handle transient errors
+- [Concurrency](/v3/develop/task-runners) to speed up slow tasks
+- [Concurrency limits](/v3/develop/global-concurrency-limits) to avoid hitting the rate limits of your APIs
+- [Caching](/v3/develop/task-caching) to skip repeated tasks
+
+Next, learn how to [handle data dependencies and ingest large amounts of data](/v3/tutorials/scraping).
+You'll use error handling, pagination, and nested flows to scrape data from GitHub.
+
+
+Need help? [Book a meeting](https://calendly.com/prefect-experts/prefect-product-advocates?utm_campaign=prefect_docs_cloud&utm_content=prefect_docs&utm_medium=docs&utm_source=docs) with a Prefect Product Advocate to get your questions answered.
+
diff --git a/docs/v3/tutorials/schedule.mdx b/docs/v3/tutorials/schedule.mdx
new file mode 100644
index 000000000000..064212cc17ac
--- /dev/null
+++ b/docs/v3/tutorials/schedule.mdx
@@ -0,0 +1,134 @@
+---
+title: Schedule a flow
+description: Deploy flows and run them on a schedule with Prefect.
+---
+
+In the [Quickstart](/v3/get-started/quickstart), you learned how to convert a Python script to a Prefect flow.
+
+In this tutorial, you'll learn how to get that flow off of your local machine and run it on a schedule with Prefect.
+
+## Publish your code to a remote repository
+
+First, you need to take the code from your local machine and publish it to a remote repository.
+We've already published the code to GitHub that you need for this tutorial:
+
+```
+https://github.com/prefecthq/demos.git
+```
+
+## Create a work pool
+
+Running a flow locally is a good start, but most use cases require a remote execution environment.
+A [work pool](/v3/deploy/infrastructure-concepts/work-pools/) is the most common interface for deploying flows to remote infrastructure.
+
+
+
+
+Deploy your flow to a self-hosted Prefect server instance using a `Process` work pool.
+All flow runs submitted to this work pool will run in a local subprocess (the creation mechanics are similar for other work pool types that run on remote infrastructure).
+
+1. Create a `Process` work pool:
+
+ ```bash
+ prefect work-pool create --type process my-work-pool
+ ```
+
+1. Verify that the work pool exists:
+
+ ```bash
+ prefect work-pool ls
+ ```
+
+1. Start a worker to poll the work pool:
+
+ ```bash
+ prefect worker start --pool my-work-pool
+ ```
+
+
+
+Deploy your flow to Prefect Cloud using a managed work pool.
+
+1. Create a [managed work pool](/v3/deploy/infrastructure-concepts/work-pools):
+
+ ```bash
+ prefect work-pool create my-work-pool --type prefect:managed
+ ```
+
+1. View your new work pool on the **Work Pools** page of the UI.
+
+
+
+
+You can also choose from other [work pool types](/concepts/work-pools/#worker-types).
+
+
+## Deploy and schedule your flow
+
+A [deployment](/v3/deploy/infrastructure-examples/docker/) is used to determine when, where, and how a flow should run.
+Deployments elevate flows to remotely configurable entities that have their own API.
+To set a flow to run on a schedule, you need to create a deployment.
+
+1. Create a deployment in code:
+
+ ```python create_deployment.py
+ from prefect import flow
+
+ # Source for the code to deploy (here, a GitHub repo)
+ SOURCE_REPO="https://github.com/prefecthq/demos.git"
+
+ if __name__ == "__main__":
+ flow.from_source(
+ source=SOURCE_REPO,
+ entrypoint="my_gh_workflow.py:repo_info", # Specific flow to run
+ ).deploy(
+ name="my-first-deployment",
+ work_pool_name="my-work-pool", # Work pool target
+ cron="* * * * *", # Cron schedule (every minute)
+ )
+ ```
+
+
+ You can store your flow code in nearly any location as long as Prefect can access it.
+ See [Where to store your flow code](/v3/deploy/infrastructure-concepts/store-flow-code) for more details.
+
+
+1. Run the script to create the deployment:
+
+ ```bash
+ python create_deployment.py
+ ```
+
+ Check the logs to ensure your deployment was created:
+
+ ```bash
+ Successfully created/updated all deployments!
+ ______________________________________________________
+ | Deployments |
+ ______________________________________________________
+ | Name | Status | Details |
+ ______________________________________________________
+ | repo-info/my-first-deployment | applied | |
+ ______________________________________________________
+ ```
+
+1. Schedule a run for the deployment:
+
+ ```bash
+ prefect deployment run 'repo-info/my-first-deployment'
+ ```
+
+ Soon you should see the flow run graph and logs on the **Flow Run** page in the UI.
+ Logs are also streamed to the terminal.
+
+ ![Flow run graph and logs](/v3/img/ui/qs-flow-run.png)
+
+## Next steps
+
+In this tutorial, you successfully deployed your flow to remote infrastructure and scheduled it to run automatically.
+
+Next, learn how to build a [resilient and performant data pipeline](/v3/tutorials/pipelines) with retries, concurrent tasks, concurrency limits, and caching.
+
+
+Need help? [Book a meeting](https://calendly.com/prefect-experts/prefect-product-advocates?utm_campaign=prefect_docs_cloud&utm_content=prefect_docs&utm_medium=docs&utm_source=docs) with a Prefect Product Advocate to get your questions answered.
+
diff --git a/docs/v3/tutorials/scraping.mdx b/docs/v3/tutorials/scraping.mdx
new file mode 100644
index 000000000000..1a0c7761fd3c
--- /dev/null
+++ b/docs/v3/tutorials/scraping.mdx
@@ -0,0 +1,514 @@
+---
+title: Extract data from websites
+description: Use Prefect to fetch and analyze large amounts of data
+---
+
+In the [Build a data pipeline](/v3/tutorials/pipelines) tutorial, you learned how to create resilient and performant data pipelines.
+Now you'll learn how to handle data dependencies and ingest large amounts of data by building a GitHub issue analysis pipeline.
+
+The real world can present additional challenges when dealing with web data:
+
+- API requests can fail or give a response with missing or malformed data.
+- You need to make multiple dependent API calls.
+- You need to ingest data when you don't know in advance how much data is available.
+
+## Set up error handling
+
+Throw and catch errors to handle them gracefully.
+For example, if you don't get a 2xx response from an API, throw an exception and log the error.
+
+```python
+from typing import Optional
+
+from prefect import task
+
+@task(log_prints=True)
+def fetch_page_of_issues(repo: str, page: int = 1) -> Optional[dict]:
+ """Fetch a page of issues for a GitHub repository"""
+ try:
+ response = httpx.get(
+ f"https://api.github.com/repos/{repo}/issues",
+ params={"page": page, "state": "all", "per_page": 100}
+ )
+ response.raise_for_status() # Raise an exception if the response is not a 2xx status code
+ return response.json()
+ except Exception as e:
+ print(f"Error fetching issues for {repo}: {e}")
+ return None
+```
+
+
+Run the following code to see error handling in action:
+
+```python
+from typing import List, Optional
+import httpx
+
+from prefect import flow, task
+
+
+@flow(log_prints=True)
+def analyze_repo_health(repos: List[str]):
+ """Analyze issue health metrics for GitHub repositories"""
+ for repo in repos:
+ print(f"Analyzing {repo}...")
+
+ # Fetch and analyze all issues
+ fetch_page_of_issues(repo)
+
+
+@task(log_prints=True)
+def fetch_page_of_issues(repo: str, page: int = 1) -> Optional[dict]:
+ """Fetch a page of issues for a GitHub repository"""
+ try:
+ response = httpx.get(
+ f"https://api.github.com/repos/{repo}/issues",
+ params={"page": page, "state": "all", "per_page": 100}
+ )
+ response.raise_for_status() # Raise an exception if the response is not a 2xx status code
+ return response.json()
+ except Exception as e:
+ print(f"Error fetching issues for {repo}: {e}")
+ return None
+
+
+if __name__ == "__main__":
+ analyze_repo_health([
+ "PrefectHQ/prefect",
+ "this-repo-does-not-exist/404" # This repo will trigger an error
+ ])
+```
+
+
+## Ingest large amounts of data
+
+Use pagination to fetch large amounts of data and run tasks concurrently to analyze the data efficiently:
+
+```python
+from typing import List
+
+from prefect import flow
+
+@flow(log_prints=True)
+def analyze_repo_health(repos: List[str]):
+ """Analyze issue health metrics for GitHub repositories"""
+ all_issues = []
+
+ for repo in repos:
+ for page in range(1, 3): # Get first 2 pages
+ issues = fetch_page_of_issues(repo, page)
+ if not issues:
+ break
+ all_issues.extend(issues)
+
+ # Run issue analysis tasks concurrently
+ for issue in all_issues:
+ analyze_issue.submit(issue) # Submit each task to a task runner
+
+ # Wait for all analysis tasks to complete
+ for detail in issue_details:
+ result = detail.result() # Block until the task has completed
+ print(f"Analyzed issue #{result['number']}")
+```
+
+
+Run the following code to see pagination and concurrent tasks in action:
+
+```python
+from typing import List, Optional
+import httpx
+
+from prefect import flow, task
+
+
+@flow(log_prints=True)
+def analyze_repo_health(repos: List[str]):
+ """Analyze issue health metrics for GitHub repositories"""
+ for repo in repos:
+ print(f"Analyzing {repo}...")
+
+ # Fetch and analyze all issues
+ fetch_repo_issues(repo)
+
+
+@flow
+def fetch_repo_issues(repo: str):
+ """Fetch all issues for a single repository"""
+ all_issues = []
+ page = 1
+
+ for page in range(1, 3): # Limit to 2 pages to avoid hitting rate limits
+ issues = fetch_page_of_issues(repo, page)
+ if not issues or len(issues) == 0:
+ break
+ all_issues.extend(issues)
+ page += 1
+
+ issue_details = []
+ for issue in all_issues[:5]: # Limit to 5 issues to avoid hitting rate limits
+ issue_details.append(
+ fetch_issue_details.submit(repo, issue['number']) # Submit each task to a task runner
+ )
+
+ details = []
+ for issue in issue_details:
+ details.append(issue.result())
+
+ return details
+
+
+@task(log_prints=True)
+def fetch_page_of_issues(repo: str, page: int = 1) -> Optional[dict]:
+ """Fetch a page of issues for a GitHub repository"""
+ try:
+ response = httpx.get(
+ f"https://api.github.com/repos/{repo}/issues",
+ params={"page": page, "state": "all", "per_page": 100}
+ )
+ response.raise_for_status()
+ return response.json()
+ except Exception as e:
+ print(f"Error fetching issues for {repo}: {e}")
+ return None
+
+
+@task
+def fetch_issue_details(repo: str, issue_number: int) -> dict:
+ """Fetch detailed information about a specific issue"""
+ response = httpx.get(f"https://api.github.com/repos/{repo}/issues/{issue_number}")
+ issue_data = response.json()
+
+ return issue_data
+
+
+if __name__ == "__main__":
+ analyze_repo_health([
+ "PrefectHQ/prefect",
+ "pydantic/pydantic",
+ "huggingface/transformers"
+ ])
+```
+
+
+## Structure your code with dependent nested flows and tasks
+
+Use nested flows and tasks to help distribute tasks more efficiently and aid with debugging.
+
+* Use nested flows for more complex operations that involve multiple steps.
+* Use tasks for simpler, atomic operations.
+
+Here's an example of how to use nested flows and tasks:
+
+```python
+from typing import List
+
+from prefect import flow, task
+
+
+@flow
+def analyze_repo_health(repos: List[str]):
+ """Analyze issue health metrics for GitHub repositories"""
+ for repo in repos:
+
+ # Fetch and analyze all issues
+ issues = fetch_repo_issues(repo)
+
+ # Calculate metrics
+ resolution_rate = calculate_resolution_rate(issues)
+ # ...
+
+
+@flow
+def fetch_repo_issues(repo: str):
+ """Nested flow: Fetch all data for a single repository"""
+
+ # ...
+
+
+@task
+def calculate_resolution_rate(issues: List[dict]) -> float:
+ """Task: Calculate the percentage of closed issues"""
+
+ # ...
+```
+
+
+Run the following code to see metrics calculation in action:
+
+```python
+from typing import List, Optional
+import httpx
+
+from prefect import flow, task
+
+
+@flow(log_prints=True)
+def analyze_repo_health(repos: List[str]):
+ """Analyze issue health metrics for GitHub repositories"""
+ for repo in repos:
+ print(f"Analyzing {repo}...")
+
+ # Fetch and analyze all issues
+ issues = fetch_repo_issues(repo)
+
+ # Calculate metrics
+ resolution_rate = calculate_resolution_rate(issues)
+
+ print(f"Resolution rate: {resolution_rate:.1f}%")
+
+
+@flow
+def fetch_repo_issues(repo: str):
+ """Fetch all issues for a single repository"""
+ all_issues = []
+ page = 1
+
+ for page in range(1, 3): # Limit to 2 pages to avoid hitting rate limits
+ issues = fetch_page_of_issues(repo, page)
+ if not issues or len(issues) == 0:
+ break
+ all_issues.extend(issues)
+ page += 1
+
+ issue_details = []
+ for issue in all_issues[:5]: # Limit to 5 issues to avoid hitting rate limits
+ issue_details.append(
+ fetch_issue_details.submit(repo, issue['number'])
+ )
+
+ details = []
+ for issue in issue_details:
+ details.append(issue.result())
+
+ return details
+
+
+@task(log_prints=True)
+def fetch_page_of_issues(repo: str, page: int = 1) -> Optional[dict]:
+ """Fetch a page of issues for a GitHub repository"""
+ try:
+ response = httpx.get(
+ f"https://api.github.com/repos/{repo}/issues",
+ params={"page": page, "state": "all", "per_page": 100}
+ )
+ response.raise_for_status()
+ return response.json()
+ except Exception as e:
+ print(f"Error fetching issues for {repo}: {e}")
+ return None
+
+
+@task
+def fetch_issue_details(repo: str, issue_number: int) -> dict:
+ """Fetch detailed information about a specific issue"""
+ response = httpx.get(f"https://api.github.com/repos/{repo}/issues/{issue_number}")
+ issue_data = response.json()
+
+ return issue_data
+
+
+@task
+def calculate_resolution_rate(issues: List[dict]) -> float:
+ """Calculate the percentage of closed issues"""
+ if not issues:
+ return 0
+ closed = sum(1 for issue in issues if issue['state'] == 'closed')
+ return (closed / len(issues)) * 100
+
+
+if __name__ == "__main__":
+ analyze_repo_health([
+ "PrefectHQ/prefect",
+ "pydantic/pydantic",
+ "huggingface/transformers"
+ ])
+```
+
+
+## Put it all together
+
+Here's the complete flow that combines all of these components.
+We'll also add retries, caching, and rate limiting to make the workflow more robust.
+
+```python repo_analysis.py
+from datetime import timedelta, datetime
+from statistics import mean
+from typing import List, Optional
+import httpx
+
+from prefect import flow, task
+from prefect.tasks import task_input_hash
+from prefect.concurrency.sync import rate_limit
+
+
+@flow(log_prints=True)
+def analyze_repo_health(repos: List[str]):
+ """Analyze issue health metrics for GitHub repositories"""
+ for repo in repos:
+ print(f"Analyzing {repo}...")
+
+ # Fetch and analyze all issues
+ issues = fetch_repo_issues(repo)
+
+ # Calculate metrics
+ avg_response_time = calculate_response_times(issues)
+ resolution_rate = calculate_resolution_rate(issues)
+
+ print(f"Average response time: {avg_response_time:.1f} hours")
+ print(f"Resolution rate: {resolution_rate:.1f}%")
+
+
+@flow
+def fetch_repo_issues(repo: str):
+ """Fetch all issues for a single repository"""
+ all_issues = []
+ page = 1
+
+ for page in range(1, 3): # Limit to 2 pages to avoid hitting rate limits
+ issues = fetch_page_of_issues(repo, page)
+ if not issues or len(issues) == 0:
+ break
+ all_issues.extend(issues)
+ page += 1
+
+ issue_details = []
+ for issue in all_issues[:5]: # Limit to 5 issues to avoid hitting rate limits
+ issue_details.append(
+ fetch_issue_details.submit(repo, issue['number'])
+ )
+
+ details = []
+ for issue in issue_details:
+ details.append(issue.result())
+
+ return details
+
+
+@task(log_prints=True, retries=3, cache_key_fn=task_input_hash, cache_expiration=timedelta(hours=1))
+def fetch_page_of_issues(repo: str, page: int = 1) -> Optional[dict]:
+ """Fetch a page of issues for a GitHub repository"""
+ rate_limit("github-api")
+ try:
+ response = httpx.get(
+ f"https://api.github.com/repos/{repo}/issues",
+ params={"page": page, "state": "all", "per_page": 100}
+ )
+ response.raise_for_status()
+ return response.json()
+ except Exception as e:
+ print(f"Error fetching issues for {repo}: {e}")
+ return None
+
+
+@task(retries=3, cache_key_fn=task_input_hash, cache_expiration=timedelta(hours=1))
+def fetch_issue_details(repo: str, issue_number: int) -> dict:
+ """Fetch detailed information about a specific issue"""
+ rate_limit("github-api")
+ response = httpx.get(f"https://api.github.com/repos/{repo}/issues/{issue_number}")
+ issue_data = response.json()
+
+ # Fetch comments for the issue
+ comments = fetch_comments(issue_data['comments_url'])
+ issue_data['comments_data'] = comments
+
+ return issue_data
+
+
+@task(log_prints=True, retries=3, cache_key_fn=task_input_hash, cache_expiration=timedelta(hours=1))
+def fetch_comments(comments_url: str) -> List[dict]:
+ """Fetch comments for an issue"""
+ rate_limit("github-api")
+ try:
+ response = httpx.get(comments_url)
+ response.raise_for_status()
+ return response.json()
+ except Exception as e:
+ print(f"Error fetching comments: {e}")
+ return []
+
+
+@task
+def calculate_response_times(issues: List[dict]) -> float:
+ """Calculate average time to first response for issues"""
+ response_times = []
+
+ for issue in issues:
+ comments_data = issue.get('comments_data', [])
+ if comments_data: # If there are comments
+ created = datetime.fromisoformat(issue['created_at'].replace('Z', '+00:00'))
+ first_comment = datetime.fromisoformat(
+ comments_data[0]['created_at'].replace('Z', '+00:00')
+ )
+ response_time = (first_comment - created).total_seconds() / 3600
+ response_times.append(response_time)
+
+ return mean(response_times) if response_times else 0
+
+
+@task
+def calculate_resolution_rate(issues: List[dict]) -> float:
+ """Calculate the percentage of closed issues"""
+ if not issues:
+ return 0
+ closed = sum(1 for issue in issues if issue['state'] == 'closed')
+ return (closed / len(issues)) * 100
+
+
+if __name__ == "__main__":
+ analyze_repo_health([
+ "PrefectHQ/prefect",
+ "pydantic/pydantic",
+ "huggingface/transformers"
+ ])
+```
+
+Before running this code, make sure to set up the GitHub API rate limit:
+
+```bash
+# GitHub has a rate limit of 60 unauthenticated requests per hour (~0.016 requests per second)
+prefect gcl create github-api --limit 60 --slot-decay-per-second 0.016
+```
+
+Run your analysis:
+
+```bash
+python repo_analysis.py
+```
+
+The output should look something like this:
+
+```bash
+10:59:13.933 | INFO | prefect.engine - Created flow run 'robust-kangaroo' for flow 'analyze-repo-health'
+10:59:13.934 | INFO | prefect.engine - View at http://127.0.0.1:4200/runs/flow-run/abdf7f46-6d59-4857-99cd-9e265cadc4a7
+10:59:13.954 | INFO | Flow run 'robust-kangaroo' - Analyzing PrefectHQ/prefect...
+...
+10:59:27.631 | INFO | Flow run 'robust-kangaroo' - Average response time: 0.4 hours
+10:59:27.631 | INFO | Flow run 'robust-kangaroo' - Resolution rate: 40.0%
+10:59:27.632 | INFO | Flow run 'robust-kangaroo' - Analyzing pydantic/pydantic...
+...
+10:59:40.990 | INFO | Flow run 'robust-kangaroo' - Average response time: 0.0 hours
+10:59:40.991 | INFO | Flow run 'robust-kangaroo' - Resolution rate: 0.0%
+10:59:40.991 | INFO | Flow run 'robust-kangaroo' - Analyzing huggingface/transformers...
+...
+10:59:54.225 | INFO | Flow run 'robust-kangaroo' - Average response time: 1.1 hours
+10:59:54.225 | INFO | Flow run 'robust-kangaroo' - Resolution rate: 0.0%
+10:59:54.240 | INFO | Flow run 'robust-kangaroo' - Finished in state Completed()
+```
+
+## Next steps
+
+In this tutorial, you built a complex data extraction pipeline which uses the following new techniques:
+
+- Error recovery with try/catch blocks
+- Modularized workflows with dependent [nested flows and tasks](/v3/develop/write-flows#nest-flows)
+- Efficient ingestion and processing of large data with pagination and [concurrent tasks](/v3/develop/task-runners)
+
+Now that you've finished this tutorial series, continue your learning journey by going deep on the following topics:
+
+- Write [flows](/v3/develop/write-flows) and [tasks](/v3/develop/write-tasks)
+- Manage [Prefect Cloud and server instances](/v3/manage)
+- Run workflows on [work pools](/v3/deploy/infrastructure-concepts/work-pools) using Kubernetes, Docker, and serverless infrastructure.
+
+
+Need help? [Book a meeting](https://calendly.com/prefect-experts/prefect-product-advocates?utm_campaign=prefect_docs_cloud&utm_content=prefect_docs&utm_medium=docs&utm_source=docs) with a Prefect Product Advocate to get your questions answered.
+
diff --git a/flows/worker.py b/flows/worker.py
index c11fdb7ea594..8a73f44029dd 100644
--- a/flows/worker.py
+++ b/flows/worker.py
@@ -1,9 +1,39 @@
+import asyncio
import subprocess
import sys
+from threading import Thread
+from typing import List
+
+from pydantic_extra_types.pendulum_dt import DateTime
+
+from prefect.events import Event
+from prefect.events.clients import get_events_subscriber
+from prefect.events.filters import EventFilter, EventNameFilter, EventOccurredFilter
+
+
+async def watch_worker_events(events: List[Event]):
+ """Watch for worker start/stop events and collect them"""
+ async with get_events_subscriber(
+ filter=EventFilter(
+ event=EventNameFilter(prefix=["prefect.worker."]),
+ occurred=EventOccurredFilter(since=DateTime.now()),
+ )
+ ) as events_subscriber:
+ async for event in events_subscriber:
+ events.append(event)
+
+
+def run_event_listener(events: List[Event]):
+ """Run the async event listener in a thread"""
+ asyncio.run(watch_worker_events(events))
-# Checks to make sure that collections are loaded prior to attempting to start a worker
def main():
+ events: List[Event] = []
+
+ listener_thread = Thread(target=run_event_listener, args=(events,), daemon=True)
+ listener_thread.start()
+
subprocess.check_call(
["python", "-m", "pip", "install", "prefect-kubernetes>=0.5.0"],
stdout=sys.stdout,
@@ -52,11 +82,28 @@ def main():
stderr=sys.stderr,
)
subprocess.check_call(
- ["prefect", "work-pool", "delete", "test-worker-pool"],
+ ["prefect", "--no-prompt", "work-pool", "delete", "test-worker-pool"],
stdout=sys.stdout,
stderr=sys.stderr,
)
+ worker_events = [e for e in events if e.event.startswith("prefect.worker.")]
+ assert (
+ len(worker_events) == 2
+ ), f"Expected 2 worker events, got {len(worker_events)}"
+
+ start_events = [e for e in worker_events if e.event == "prefect.worker.started"]
+ stop_events = [e for e in worker_events if e.event == "prefect.worker.stopped"]
+
+ assert len(start_events) == 1, "Expected 1 worker start event"
+ assert len(stop_events) == 1, "Expected 1 worker stop event"
+
+ print("Captured expected worker start and stop events!")
+
+ assert (
+ stop_events[0].follows == start_events[0].id
+ ), "Stop event should follow start event"
+
if __name__ == "__main__":
main()
diff --git a/old-sqlite.Dockerfile b/old-sqlite.Dockerfile
new file mode 100644
index 000000000000..23a85f34e977
--- /dev/null
+++ b/old-sqlite.Dockerfile
@@ -0,0 +1,59 @@
+# Build the Python distributable
+FROM python:3.9-slim AS python-builder
+
+WORKDIR /opt/prefect
+
+# Install git for version calculation
+RUN apt-get update && \
+ apt-get install --no-install-recommends -y \
+ git \
+ && apt-get clean && rm -rf /var/lib/apt/lists/*
+
+# Copy the repository for version calculation
+COPY . .
+
+# Create source distribution
+RUN python setup.py sdist && \
+ mv "dist/$(python setup.py --fullname).tar.gz" "dist/prefect.tar.gz"
+
+# Final image
+FROM python:3.9-slim
+
+# Accept SQLite version as build argument
+ARG SQLITE_VERSION="3310100"
+ARG SQLITE_YEAR="2020"
+
+# Install build dependencies
+RUN apt-get update && apt-get install -y \
+ build-essential \
+ wget
+
+# Download and compile SQLite
+RUN wget https://www.sqlite.org/${SQLITE_YEAR}/sqlite-autoconf-${SQLITE_VERSION}.tar.gz \
+ && tar xvfz sqlite-autoconf-${SQLITE_VERSION}.tar.gz \
+ && cd sqlite-autoconf-${SQLITE_VERSION} \
+ && ./configure \
+ && make \
+ && make install \
+ && ldconfig \
+ && cd .. \
+ && rm -rf sqlite-autoconf-${SQLITE_VERSION}*
+
+# Install uv for faster pip operations
+COPY --from=ghcr.io/astral-sh/uv:latest /uv /bin/uv
+ENV UV_SYSTEM_PYTHON=1
+
+# Set library path to use our compiled SQLite
+ENV LD_LIBRARY_PATH=/usr/local/lib
+
+WORKDIR /app
+
+# Copy the built distributable
+COPY --from=python-builder /opt/prefect/dist/prefect.tar.gz ./dist/
+
+# Install requirements and Prefect
+COPY requirements*.txt ./
+RUN uv pip install -r requirements.txt
+RUN uv pip install ./dist/prefect.tar.gz
+
+
diff --git a/requirements-client.txt b/requirements-client.txt
index 0e0b0ca443a5..2d412a4bc90d 100644
--- a/requirements-client.txt
+++ b/requirements-client.txt
@@ -3,7 +3,7 @@ asgi-lifespan >= 1.0, < 3.0
cachetools >= 5.3, < 6.0
cloudpickle >= 2.0, < 4.0
coolname >= 1.0.4, < 3.0.0
-croniter >= 1.0.12, < 4.0.0
+croniter >= 1.0.12, < 6.0.0
exceptiongroup >= 1.0.0
fastapi >= 0.111.0, < 1.0.0
fsspec >= 2022.5.0
@@ -14,6 +14,7 @@ httpx[http2] >= 0.23, != 0.23.2
importlib_metadata >= 4.4; python_version < '3.10'
jsonpatch >= 1.32, < 2.0
jsonschema >= 4.0.0, < 5.0.0
+opentelemetry-api >= 1.27.0, < 2.0.0
orjson >= 3.7, < 4.0
packaging >= 21.3, < 24.3
pathspec >= 0.8.0
diff --git a/requirements-dev.txt b/requirements-dev.txt
index 87a442d36d7e..3a1439fc2da8 100644
--- a/requirements-dev.txt
+++ b/requirements-dev.txt
@@ -12,7 +12,6 @@ pluggy >= 1.4.0
pytest >= 8.3
pytest-asyncio >= 0.24
pytest-benchmark
-pytest-codspeed
pytest-cov
pytest-env
pytest-flakefinder
@@ -37,3 +36,12 @@ mkdocs
mkdocs-material
mkdocstrings[python]
mkdocs-gen-files
+
+# OpenTelemetry
+# Other than the `test-utils` package these versions should match the versions
+# in `requirements-otel.txt`
+opentelemetry-distro >= 0.48b0, < 1.0.0
+opentelemetry-exporter-otlp >= 1.27.0, < 2.0.0
+opentelemetry-instrumentation >= 0.48b0, < 1.0.0
+opentelemetry-instrumentation-logging >= 0.48b0, < 1.0.0
+opentelemetry-test-utils >= 0.48b0, < 1.0.0
diff --git a/requirements-otel.txt b/requirements-otel.txt
new file mode 100644
index 000000000000..96146b3dae66
--- /dev/null
+++ b/requirements-otel.txt
@@ -0,0 +1,7 @@
+# When updating this file, please also bump the versions in
+# `requirements-dev.txt`
+
+opentelemetry-distro >= 0.48b0, < 1.0.0
+opentelemetry-exporter-otlp >= 1.27.0, < 2.0.0
+opentelemetry-instrumentation >= 0.48b0, < 1.0.0
+opentelemetry-instrumentation-logging >= 0.48b0, < 1.0.0
diff --git a/requirements.txt b/requirements.txt
index fbdf059cdeb5..f4cb6f0574f5 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -15,4 +15,4 @@ humanize >= 4.9.0, < 5.0.0
pytz >= 2021.1, < 2025
readchar >= 4.0.0, < 5.0.0
sqlalchemy[asyncio] >= 2.0, < 3.0.0
-typer >= 0.12.0, != 0.12.2, < 0.13.0
+typer >= 0.12.0, != 0.12.2, < 0.14.0
diff --git a/schemas/settings.schema.json b/schemas/settings.schema.json
new file mode 100644
index 000000000000..46ab12535ef7
--- /dev/null
+++ b/schemas/settings.schema.json
@@ -0,0 +1,2251 @@
+{
+ "$defs": {
+ "APISettings": {
+ "description": "Settings for interacting with the Prefect API",
+ "properties": {
+ "url": {
+ "anyOf": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "null"
+ }
+ ],
+ "default": null,
+ "description": "The URL of the Prefect API. If not set, the client will attempt to infer it.",
+ "supported_environment_variables": [
+ "PREFECT_API_URL"
+ ],
+ "title": "Url"
+ },
+ "key": {
+ "anyOf": [
+ {
+ "format": "password",
+ "type": "string",
+ "writeOnly": true
+ },
+ {
+ "type": "null"
+ }
+ ],
+ "default": null,
+ "description": "The API key used for authentication with the Prefect API. Should be kept secret.",
+ "supported_environment_variables": [
+ "PREFECT_API_KEY"
+ ],
+ "title": "Key"
+ },
+ "tls_insecure_skip_verify": {
+ "default": false,
+ "description": "If `True`, disables SSL checking to allow insecure requests. Setting to False is recommended only during development. For example, when using self-signed certificates.",
+ "supported_environment_variables": [
+ "PREFECT_API_TLS_INSECURE_SKIP_VERIFY"
+ ],
+ "title": "Tls Insecure Skip Verify",
+ "type": "boolean"
+ },
+ "ssl_cert_file": {
+ "anyOf": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "null"
+ }
+ ],
+ "default": null,
+ "description": "This configuration settings option specifies the path to an SSL certificate file.",
+ "supported_environment_variables": [
+ "PREFECT_API_SSL_CERT_FILE"
+ ],
+ "title": "Ssl Cert File"
+ },
+ "enable_http2": {
+ "default": false,
+ "description": "If true, enable support for HTTP/2 for communicating with an API. If the API does not support HTTP/2, this will have no effect and connections will be made via HTTP/1.1.",
+ "supported_environment_variables": [
+ "PREFECT_API_ENABLE_HTTP2"
+ ],
+ "title": "Enable Http2",
+ "type": "boolean"
+ },
+ "request_timeout": {
+ "default": 60.0,
+ "description": "The default timeout for requests to the API",
+ "supported_environment_variables": [
+ "PREFECT_API_REQUEST_TIMEOUT"
+ ],
+ "title": "Request Timeout",
+ "type": "number"
+ }
+ },
+ "title": "APISettings",
+ "type": "object"
+ },
+ "CLISettings": {
+ "description": "Settings for controlling CLI behavior",
+ "properties": {
+ "colors": {
+ "default": true,
+ "description": "If True, use colors in CLI output. If `False`, output will not include colors codes.",
+ "supported_environment_variables": [
+ "PREFECT_CLI_COLORS"
+ ],
+ "title": "Colors",
+ "type": "boolean"
+ },
+ "prompt": {
+ "anyOf": [
+ {
+ "type": "boolean"
+ },
+ {
+ "type": "null"
+ }
+ ],
+ "default": null,
+ "description": "If `True`, use interactive prompts in CLI commands. If `False`, no interactive prompts will be used. If `None`, the value will be dynamically determined based on the presence of an interactive-enabled terminal.",
+ "supported_environment_variables": [
+ "PREFECT_CLI_PROMPT"
+ ],
+ "title": "Prompt"
+ },
+ "wrap_lines": {
+ "default": true,
+ "description": "If `True`, wrap text by inserting new lines in long lines in CLI output. If `False`, output will not be wrapped.",
+ "supported_environment_variables": [
+ "PREFECT_CLI_WRAP_LINES"
+ ],
+ "title": "Wrap Lines",
+ "type": "boolean"
+ }
+ },
+ "title": "CLISettings",
+ "type": "object"
+ },
+ "ClientMetricsSettings": {
+ "description": "Settings for controlling metrics reporting from the client",
+ "properties": {
+ "enabled": {
+ "default": false,
+ "description": "Whether or not to enable Prometheus metrics in the client.",
+ "supported_environment_variables": [
+ "PREFECT_CLIENT_METRICS_ENABLED",
+ "PREFECT_CLIENT_ENABLE_METRICS"
+ ],
+ "title": "Enabled",
+ "type": "boolean"
+ },
+ "port": {
+ "default": 4201,
+ "description": "The port to expose the client Prometheus metrics on.",
+ "supported_environment_variables": [
+ "PREFECT_CLIENT_METRICS_PORT"
+ ],
+ "title": "Port",
+ "type": "integer"
+ }
+ },
+ "title": "ClientMetricsSettings",
+ "type": "object"
+ },
+ "ClientSettings": {
+ "description": "Settings for controlling API client behavior",
+ "properties": {
+ "max_retries": {
+ "default": 5,
+ "description": "\n The maximum number of retries to perform on failed HTTP requests.\n Defaults to 5. Set to 0 to disable retries.\n See `PREFECT_CLIENT_RETRY_EXTRA_CODES` for details on which HTTP status codes are\n retried.\n ",
+ "minimum": 0,
+ "supported_environment_variables": [
+ "PREFECT_CLIENT_MAX_RETRIES"
+ ],
+ "title": "Max Retries",
+ "type": "integer"
+ },
+ "retry_jitter_factor": {
+ "default": 0.2,
+ "description": "\n A value greater than or equal to zero to control the amount of jitter added to retried\n client requests. Higher values introduce larger amounts of jitter.\n Set to 0 to disable jitter. See `clamped_poisson_interval` for details on the how jitter\n can affect retry lengths.\n ",
+ "minimum": 0.0,
+ "supported_environment_variables": [
+ "PREFECT_CLIENT_RETRY_JITTER_FACTOR"
+ ],
+ "title": "Retry Jitter Factor",
+ "type": "number"
+ },
+ "retry_extra_codes": {
+ "anyOf": [
+ {
+ "type": "string"
+ },
+ {
+ "maximum": 599,
+ "minimum": 100,
+ "type": "integer"
+ },
+ {
+ "items": {
+ "maximum": 599,
+ "minimum": 100,
+ "type": "integer"
+ },
+ "type": "array",
+ "uniqueItems": true
+ },
+ {
+ "type": "null"
+ }
+ ],
+ "description": "\n A list of extra HTTP status codes to retry on. Defaults to an empty list.\n 429, 502 and 503 are always retried. Please note that not all routes are idempotent and retrying\n may result in unexpected behavior.\n ",
+ "examples": [
+ "404,429,503",
+ "429",
+ [
+ 404,
+ 429,
+ 503
+ ]
+ ],
+ "supported_environment_variables": [
+ "PREFECT_CLIENT_RETRY_EXTRA_CODES"
+ ],
+ "title": "Retry Extra Codes"
+ },
+ "csrf_support_enabled": {
+ "default": true,
+ "description": "\n Determines if CSRF token handling is active in the Prefect client for API\n requests.\n\n When enabled (`True`), the client automatically manages CSRF tokens by\n retrieving, storing, and including them in applicable state-changing requests\n ",
+ "supported_environment_variables": [
+ "PREFECT_CLIENT_CSRF_SUPPORT_ENABLED"
+ ],
+ "title": "Csrf Support Enabled",
+ "type": "boolean"
+ },
+ "metrics": {
+ "$ref": "#/$defs/ClientMetricsSettings",
+ "supported_environment_variables": []
+ }
+ },
+ "title": "ClientSettings",
+ "type": "object"
+ },
+ "CloudSettings": {
+ "description": "Settings for interacting with Prefect Cloud",
+ "properties": {
+ "api_url": {
+ "default": "https://api.prefect.cloud/api",
+ "description": "API URL for Prefect Cloud. Used for authentication with Prefect Cloud.",
+ "supported_environment_variables": [
+ "PREFECT_CLOUD_API_URL"
+ ],
+ "title": "Api Url",
+ "type": "string"
+ },
+ "ui_url": {
+ "anyOf": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "null"
+ }
+ ],
+ "default": null,
+ "description": "The URL of the Prefect Cloud UI. If not set, the client will attempt to infer it.",
+ "supported_environment_variables": [
+ "PREFECT_CLOUD_UI_URL"
+ ],
+ "title": "Ui Url"
+ }
+ },
+ "title": "CloudSettings",
+ "type": "object"
+ },
+ "DeploymentsSettings": {
+ "description": "Settings for configuring deployments defaults",
+ "properties": {
+ "default_work_pool_name": {
+ "anyOf": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "null"
+ }
+ ],
+ "default": null,
+ "description": "The default work pool to use when creating deployments.",
+ "supported_environment_variables": [
+ "PREFECT_DEPLOYMENTS_DEFAULT_WORK_POOL_NAME",
+ "PREFECT_DEFAULT_WORK_POOL_NAME"
+ ],
+ "title": "Default Work Pool Name"
+ },
+ "default_docker_build_namespace": {
+ "anyOf": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "null"
+ }
+ ],
+ "default": null,
+ "description": "The default Docker namespace to use when building images.",
+ "examples": [
+ "my-dockerhub-registry",
+ "4999999999999.dkr.ecr.us-east-2.amazonaws.com/my-ecr-repo"
+ ],
+ "supported_environment_variables": [
+ "PREFECT_DEPLOYMENTS_DEFAULT_DOCKER_BUILD_NAMESPACE",
+ "PREFECT_DEFAULT_DOCKER_BUILD_NAMESPACE"
+ ],
+ "title": "Default Docker Build Namespace"
+ }
+ },
+ "title": "DeploymentsSettings",
+ "type": "object"
+ },
+ "ExperimentsSettings": {
+ "description": "Settings for configuring experimental features",
+ "properties": {
+ "warn": {
+ "default": true,
+ "description": "If `True`, warn on usage of experimental features.",
+ "supported_environment_variables": [
+ "PREFECT_EXPERIMENTS_WARN",
+ "PREFECT_EXPERIMENTAL_WARN"
+ ],
+ "title": "Warn",
+ "type": "boolean"
+ },
+ "telemetry_enabled": {
+ "default": false,
+ "description": "Enables sending telemetry to Prefect Cloud.",
+ "supported_environment_variables": [
+ "PREFECT_EXPERIMENTS_TELEMETRY_ENABLED"
+ ],
+ "title": "Telemetry Enabled",
+ "type": "boolean"
+ }
+ },
+ "title": "ExperimentsSettings",
+ "type": "object"
+ },
+ "FlowsSettings": {
+ "description": "Settings for controlling flow behavior",
+ "properties": {
+ "default_retries": {
+ "default": 0,
+ "description": "This value sets the default number of retries for all flows.",
+ "minimum": 0,
+ "supported_environment_variables": [
+ "PREFECT_FLOWS_DEFAULT_RETRIES",
+ "PREFECT_FLOW_DEFAULT_RETRIES"
+ ],
+ "title": "Default Retries",
+ "type": "integer"
+ },
+ "default_retry_delay_seconds": {
+ "anyOf": [
+ {
+ "type": "integer"
+ },
+ {
+ "type": "number"
+ },
+ {
+ "items": {
+ "type": "number"
+ },
+ "type": "array"
+ }
+ ],
+ "default": 0,
+ "description": "This value sets the default retry delay seconds for all flows.",
+ "supported_environment_variables": [
+ "PREFECT_FLOWS_DEFAULT_RETRY_DELAY_SECONDS",
+ "PREFECT_FLOW_DEFAULT_RETRY_DELAY_SECONDS"
+ ],
+ "title": "Default Retry Delay Seconds"
+ }
+ },
+ "title": "FlowsSettings",
+ "type": "object"
+ },
+ "InternalSettings": {
+ "properties": {
+ "logging_level": {
+ "default": "ERROR",
+ "description": "The default logging level for Prefect's internal machinery loggers.",
+ "enum": [
+ "DEBUG",
+ "INFO",
+ "WARNING",
+ "ERROR",
+ "CRITICAL"
+ ],
+ "supported_environment_variables": [
+ "PREFECT_INTERNAL_LOGGING_LEVEL",
+ "PREFECT_LOGGING_INTERNAL_LEVEL"
+ ],
+ "title": "Logging Level",
+ "type": "string"
+ }
+ },
+ "title": "InternalSettings",
+ "type": "object"
+ },
+ "LoggingSettings": {
+ "description": "Settings for controlling logging behavior",
+ "properties": {
+ "level": {
+ "default": "INFO",
+ "description": "The default logging level for Prefect loggers.",
+ "enum": [
+ "DEBUG",
+ "INFO",
+ "WARNING",
+ "ERROR",
+ "CRITICAL"
+ ],
+ "supported_environment_variables": [
+ "PREFECT_LOGGING_LEVEL"
+ ],
+ "title": "Level",
+ "type": "string"
+ },
+ "config_path": {
+ "anyOf": [
+ {
+ "format": "path",
+ "type": "string"
+ },
+ {
+ "type": "null"
+ }
+ ],
+ "default": null,
+ "description": "The path to a custom YAML logging configuration file.",
+ "supported_environment_variables": [
+ "PREFECT_LOGGING_CONFIG_PATH",
+ "PREFECT_LOGGING_SETTINGS_PATH"
+ ],
+ "title": "Config Path"
+ },
+ "extra_loggers": {
+ "anyOf": [
+ {
+ "type": "string"
+ },
+ {
+ "items": {
+ "type": "string"
+ },
+ "type": "array"
+ },
+ {
+ "type": "null"
+ }
+ ],
+ "default": null,
+ "description": "Additional loggers to attach to Prefect logging at runtime.",
+ "supported_environment_variables": [
+ "PREFECT_LOGGING_EXTRA_LOGGERS"
+ ],
+ "title": "Extra Loggers"
+ },
+ "log_prints": {
+ "default": false,
+ "description": "If `True`, `print` statements in flows and tasks will be redirected to the Prefect logger for the given run.",
+ "supported_environment_variables": [
+ "PREFECT_LOGGING_LOG_PRINTS"
+ ],
+ "title": "Log Prints",
+ "type": "boolean"
+ },
+ "colors": {
+ "default": true,
+ "description": "If `True`, use colors in CLI output. If `False`, output will not include colors codes.",
+ "supported_environment_variables": [
+ "PREFECT_LOGGING_COLORS"
+ ],
+ "title": "Colors",
+ "type": "boolean"
+ },
+ "markup": {
+ "default": false,
+ "description": "\n Whether to interpret strings wrapped in square brackets as a style.\n This allows styles to be conveniently added to log messages, e.g.\n `[red]This is a red message.[/red]`. However, the downside is, if enabled,\n strings that contain square brackets may be inaccurately interpreted and\n lead to incomplete output, e.g.\n `[red]This is a red message.[/red]` may be interpreted as\n `[red]This is a red message.[/red]`.\n ",
+ "supported_environment_variables": [
+ "PREFECT_LOGGING_MARKUP"
+ ],
+ "title": "Markup",
+ "type": "boolean"
+ },
+ "to_api": {
+ "$ref": "#/$defs/LoggingToAPISettings",
+ "supported_environment_variables": []
+ }
+ },
+ "title": "LoggingSettings",
+ "type": "object"
+ },
+ "LoggingToAPISettings": {
+ "description": "Settings for controlling logging to the API",
+ "properties": {
+ "enabled": {
+ "default": true,
+ "description": "If `True`, logs will be sent to the API.",
+ "supported_environment_variables": [
+ "PREFECT_LOGGING_TO_API_ENABLED"
+ ],
+ "title": "Enabled",
+ "type": "boolean"
+ },
+ "batch_interval": {
+ "default": 2.0,
+ "description": "The number of seconds between batched writes of logs to the API.",
+ "supported_environment_variables": [
+ "PREFECT_LOGGING_TO_API_BATCH_INTERVAL"
+ ],
+ "title": "Batch Interval",
+ "type": "number"
+ },
+ "batch_size": {
+ "default": 4000000,
+ "description": "The number of logs to batch before sending to the API.",
+ "supported_environment_variables": [
+ "PREFECT_LOGGING_TO_API_BATCH_SIZE"
+ ],
+ "title": "Batch Size",
+ "type": "integer"
+ },
+ "max_log_size": {
+ "default": 1000000,
+ "description": "The maximum size in bytes for a single log.",
+ "supported_environment_variables": [
+ "PREFECT_LOGGING_TO_API_MAX_LOG_SIZE"
+ ],
+ "title": "Max Log Size",
+ "type": "integer"
+ },
+ "when_missing_flow": {
+ "default": "warn",
+ "description": "\n Controls the behavior when loggers attempt to send logs to the API handler from outside of a flow.\n \n All logs sent to the API must be associated with a flow run. The API log handler can\n only be used outside of a flow by manually providing a flow run identifier. Logs\n that are not associated with a flow run will not be sent to the API. This setting can\n be used to determine if a warning or error is displayed when the identifier is missing.\n\n The following options are available:\n\n - \"warn\": Log a warning message.\n - \"error\": Raise an error.\n - \"ignore\": Do not log a warning message or raise an error.\n ",
+ "enum": [
+ "warn",
+ "error",
+ "ignore"
+ ],
+ "supported_environment_variables": [
+ "PREFECT_LOGGING_TO_API_WHEN_MISSING_FLOW"
+ ],
+ "title": "When Missing Flow",
+ "type": "string"
+ }
+ },
+ "title": "LoggingToAPISettings",
+ "type": "object"
+ },
+ "ResultsSettings": {
+ "description": "Settings for controlling result storage behavior",
+ "properties": {
+ "default_serializer": {
+ "default": "pickle",
+ "description": "The default serializer to use when not otherwise specified.",
+ "supported_environment_variables": [
+ "PREFECT_RESULTS_DEFAULT_SERIALIZER"
+ ],
+ "title": "Default Serializer",
+ "type": "string"
+ },
+ "persist_by_default": {
+ "default": false,
+ "description": "The default setting for persisting results when not otherwise specified.",
+ "supported_environment_variables": [
+ "PREFECT_RESULTS_PERSIST_BY_DEFAULT"
+ ],
+ "title": "Persist By Default",
+ "type": "boolean"
+ },
+ "default_storage_block": {
+ "anyOf": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "null"
+ }
+ ],
+ "default": null,
+ "description": "The `block-type/block-document` slug of a block to use as the default result storage.",
+ "supported_environment_variables": [
+ "PREFECT_RESULTS_DEFAULT_STORAGE_BLOCK",
+ "PREFECT_DEFAULT_RESULT_STORAGE_BLOCK"
+ ],
+ "title": "Default Storage Block"
+ },
+ "local_storage_path": {
+ "anyOf": [
+ {
+ "format": "path",
+ "type": "string"
+ },
+ {
+ "type": "null"
+ }
+ ],
+ "default": null,
+ "description": "The path to a directory to store results in.",
+ "supported_environment_variables": [
+ "PREFECT_RESULTS_LOCAL_STORAGE_PATH",
+ "PREFECT_LOCAL_STORAGE_PATH"
+ ],
+ "title": "Local Storage Path"
+ }
+ },
+ "title": "ResultsSettings",
+ "type": "object"
+ },
+ "RunnerServerSettings": {
+ "description": "Settings for controlling runner server behavior",
+ "properties": {
+ "enable": {
+ "default": false,
+ "description": "Whether or not to enable the runner's webserver.",
+ "supported_environment_variables": [
+ "PREFECT_RUNNER_SERVER_ENABLE"
+ ],
+ "title": "Enable",
+ "type": "boolean"
+ },
+ "host": {
+ "default": "localhost",
+ "description": "The host address the runner's webserver should bind to.",
+ "supported_environment_variables": [
+ "PREFECT_RUNNER_SERVER_HOST"
+ ],
+ "title": "Host",
+ "type": "string"
+ },
+ "port": {
+ "default": 8080,
+ "description": "The port the runner's webserver should bind to.",
+ "supported_environment_variables": [
+ "PREFECT_RUNNER_SERVER_PORT"
+ ],
+ "title": "Port",
+ "type": "integer"
+ },
+ "log_level": {
+ "default": "error",
+ "description": "The log level of the runner's webserver.",
+ "enum": [
+ "DEBUG",
+ "INFO",
+ "WARNING",
+ "ERROR",
+ "CRITICAL"
+ ],
+ "supported_environment_variables": [
+ "PREFECT_RUNNER_SERVER_LOG_LEVEL"
+ ],
+ "title": "Log Level",
+ "type": "string"
+ },
+ "missed_polls_tolerance": {
+ "default": 2,
+ "description": "Number of missed polls before a runner is considered unhealthy by its webserver.",
+ "supported_environment_variables": [
+ "PREFECT_RUNNER_SERVER_MISSED_POLLS_TOLERANCE"
+ ],
+ "title": "Missed Polls Tolerance",
+ "type": "integer"
+ }
+ },
+ "title": "RunnerServerSettings",
+ "type": "object"
+ },
+ "RunnerSettings": {
+ "description": "Settings for controlling runner behavior",
+ "properties": {
+ "process_limit": {
+ "default": 5,
+ "description": "Maximum number of processes a runner will execute in parallel.",
+ "supported_environment_variables": [
+ "PREFECT_RUNNER_PROCESS_LIMIT"
+ ],
+ "title": "Process Limit",
+ "type": "integer"
+ },
+ "poll_frequency": {
+ "default": 10,
+ "description": "Number of seconds a runner should wait between queries for scheduled work.",
+ "supported_environment_variables": [
+ "PREFECT_RUNNER_POLL_FREQUENCY"
+ ],
+ "title": "Poll Frequency",
+ "type": "integer"
+ },
+ "server": {
+ "$ref": "#/$defs/RunnerServerSettings",
+ "supported_environment_variables": []
+ }
+ },
+ "title": "RunnerSettings",
+ "type": "object"
+ },
+ "ServerAPISettings": {
+ "description": "Settings for controlling API server behavior",
+ "properties": {
+ "host": {
+ "default": "127.0.0.1",
+ "description": "The API's host address (defaults to `127.0.0.1`).",
+ "supported_environment_variables": [
+ "PREFECT_SERVER_API_HOST"
+ ],
+ "title": "Host",
+ "type": "string"
+ },
+ "port": {
+ "default": 4200,
+ "description": "The API's port address (defaults to `4200`).",
+ "supported_environment_variables": [
+ "PREFECT_SERVER_API_PORT"
+ ],
+ "title": "Port",
+ "type": "integer"
+ },
+ "default_limit": {
+ "default": 200,
+ "description": "The default limit applied to queries that can return multiple objects, such as `POST /flow_runs/filter`.",
+ "supported_environment_variables": [
+ "PREFECT_SERVER_API_DEFAULT_LIMIT",
+ "PREFECT_API_DEFAULT_LIMIT"
+ ],
+ "title": "Default Limit",
+ "type": "integer"
+ },
+ "keepalive_timeout": {
+ "default": 5,
+ "description": "\n The API's keep alive timeout (defaults to `5`).\n Refer to https://www.uvicorn.org/settings/#timeouts for details.\n\n When the API is hosted behind a load balancer, you may want to set this to a value\n greater than the load balancer's idle timeout.\n\n Note this setting only applies when calling `prefect server start`; if hosting the\n API with another tool you will need to configure this there instead.\n ",
+ "supported_environment_variables": [
+ "PREFECT_SERVER_API_KEEPALIVE_TIMEOUT"
+ ],
+ "title": "Keepalive Timeout",
+ "type": "integer"
+ },
+ "csrf_protection_enabled": {
+ "default": false,
+ "description": "\n Controls the activation of CSRF protection for the Prefect server API.\n\n When enabled (`True`), the server enforces CSRF validation checks on incoming\n state-changing requests (POST, PUT, PATCH, DELETE), requiring a valid CSRF\n token to be included in the request headers or body. This adds a layer of\n security by preventing unauthorized or malicious sites from making requests on\n behalf of authenticated users.\n\n It is recommended to enable this setting in production environments where the\n API is exposed to web clients to safeguard against CSRF attacks.\n\n Note: Enabling this setting requires corresponding support in the client for\n CSRF token management. See PREFECT_CLIENT_CSRF_SUPPORT_ENABLED for more.\n ",
+ "supported_environment_variables": [
+ "PREFECT_SERVER_API_CSRF_PROTECTION_ENABLED",
+ "PREFECT_SERVER_CSRF_PROTECTION_ENABLED"
+ ],
+ "title": "Csrf Protection Enabled",
+ "type": "boolean"
+ },
+ "csrf_token_expiration": {
+ "default": "PT1H",
+ "description": "\n Specifies the duration for which a CSRF token remains valid after being issued\n by the server.\n\n The default expiration time is set to 1 hour, which offers a reasonable\n compromise. Adjust this setting based on your specific security requirements\n and usage patterns.\n ",
+ "format": "duration",
+ "supported_environment_variables": [
+ "PREFECT_SERVER_API_CSRF_TOKEN_EXPIRATION",
+ "PREFECT_SERVER_CSRF_TOKEN_EXPIRATION"
+ ],
+ "title": "Csrf Token Expiration",
+ "type": "string"
+ },
+ "cors_allowed_origins": {
+ "default": "*",
+ "description": "\n A comma-separated list of origins that are authorized to make cross-origin requests to the API.\n\n By default, this is set to `*`, which allows requests from all origins.\n ",
+ "supported_environment_variables": [
+ "PREFECT_SERVER_API_CORS_ALLOWED_ORIGINS",
+ "PREFECT_SERVER_CORS_ALLOWED_ORIGINS"
+ ],
+ "title": "Cors Allowed Origins",
+ "type": "string"
+ },
+ "cors_allowed_methods": {
+ "default": "*",
+ "description": "\n A comma-separated list of methods that are authorized to make cross-origin requests to the API.\n\n By default, this is set to `*`, which allows requests from all methods.\n ",
+ "supported_environment_variables": [
+ "PREFECT_SERVER_API_CORS_ALLOWED_METHODS",
+ "PREFECT_SERVER_CORS_ALLOWED_METHODS"
+ ],
+ "title": "Cors Allowed Methods",
+ "type": "string"
+ },
+ "cors_allowed_headers": {
+ "default": "*",
+ "description": "\n A comma-separated list of headers that are authorized to make cross-origin requests to the API.\n\n By default, this is set to `*`, which allows requests from all headers.\n ",
+ "supported_environment_variables": [
+ "PREFECT_SERVER_API_CORS_ALLOWED_HEADERS",
+ "PREFECT_SERVER_CORS_ALLOWED_HEADERS"
+ ],
+ "title": "Cors Allowed Headers",
+ "type": "string"
+ }
+ },
+ "title": "ServerAPISettings",
+ "type": "object"
+ },
+ "ServerDatabaseSettings": {
+ "description": "Settings for controlling server database behavior",
+ "properties": {
+ "connection_url": {
+ "anyOf": [
+ {
+ "format": "password",
+ "type": "string",
+ "writeOnly": true
+ },
+ {
+ "type": "null"
+ }
+ ],
+ "default": null,
+ "description": "\n A database connection URL in a SQLAlchemy-compatible\n format. Prefect currently supports SQLite and Postgres. Note that all\n Prefect database engines must use an async driver - for SQLite, use\n `sqlite+aiosqlite` and for Postgres use `postgresql+asyncpg`.\n\n SQLite in-memory databases can be used by providing the url\n `sqlite+aiosqlite:///file::memory:?cache=shared&uri=true&check_same_thread=false`,\n which will allow the database to be accessed by multiple threads. Note\n that in-memory databases can not be accessed from multiple processes and\n should only be used for simple tests.\n ",
+ "supported_environment_variables": [
+ "PREFECT_SERVER_DATABASE_CONNECTION_URL",
+ "PREFECT_API_DATABASE_CONNECTION_URL"
+ ],
+ "title": "Connection Url"
+ },
+ "driver": {
+ "anyOf": [
+ {
+ "enum": [
+ "postgresql+asyncpg",
+ "sqlite+aiosqlite"
+ ],
+ "type": "string"
+ },
+ {
+ "type": "null"
+ }
+ ],
+ "default": null,
+ "description": "The database driver to use when connecting to the database. If not set, the driver will be inferred from the connection URL.",
+ "supported_environment_variables": [
+ "PREFECT_SERVER_DATABASE_DRIVER",
+ "PREFECT_API_DATABASE_DRIVER"
+ ],
+ "title": "Driver"
+ },
+ "host": {
+ "anyOf": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "null"
+ }
+ ],
+ "default": null,
+ "description": "The database server host.",
+ "supported_environment_variables": [
+ "PREFECT_SERVER_DATABASE_HOST",
+ "PREFECT_API_DATABASE_HOST"
+ ],
+ "title": "Host"
+ },
+ "port": {
+ "anyOf": [
+ {
+ "type": "integer"
+ },
+ {
+ "type": "null"
+ }
+ ],
+ "default": null,
+ "description": "The database server port.",
+ "supported_environment_variables": [
+ "PREFECT_SERVER_DATABASE_PORT",
+ "PREFECT_API_DATABASE_PORT"
+ ],
+ "title": "Port"
+ },
+ "user": {
+ "anyOf": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "null"
+ }
+ ],
+ "default": null,
+ "description": "The user to use when connecting to the database.",
+ "supported_environment_variables": [
+ "PREFECT_SERVER_DATABASE_USER",
+ "PREFECT_API_DATABASE_USER"
+ ],
+ "title": "User"
+ },
+ "name": {
+ "anyOf": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "null"
+ }
+ ],
+ "default": null,
+ "description": "The name of the Prefect database on the remote server, or the path to the database file for SQLite.",
+ "supported_environment_variables": [
+ "PREFECT_SERVER_DATABASE_NAME",
+ "PREFECT_API_DATABASE_NAME"
+ ],
+ "title": "Name"
+ },
+ "password": {
+ "anyOf": [
+ {
+ "format": "password",
+ "type": "string",
+ "writeOnly": true
+ },
+ {
+ "type": "null"
+ }
+ ],
+ "default": null,
+ "description": "The password to use when connecting to the database. Should be kept secret.",
+ "supported_environment_variables": [
+ "PREFECT_SERVER_DATABASE_PASSWORD",
+ "PREFECT_API_DATABASE_PASSWORD"
+ ],
+ "title": "Password"
+ },
+ "echo": {
+ "default": false,
+ "description": "If `True`, SQLAlchemy will log all SQL issued to the database. Defaults to `False`.",
+ "supported_environment_variables": [
+ "PREFECT_SERVER_DATABASE_ECHO",
+ "PREFECT_API_DATABASE_ECHO"
+ ],
+ "title": "Echo",
+ "type": "boolean"
+ },
+ "migrate_on_start": {
+ "default": true,
+ "description": "If `True`, the database will be migrated on application startup.",
+ "supported_environment_variables": [
+ "PREFECT_SERVER_DATABASE_MIGRATE_ON_START",
+ "PREFECT_API_DATABASE_MIGRATE_ON_START"
+ ],
+ "title": "Migrate On Start",
+ "type": "boolean"
+ },
+ "timeout": {
+ "anyOf": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "null"
+ }
+ ],
+ "default": 10.0,
+ "description": "A statement timeout, in seconds, applied to all database interactions made by the API. Defaults to 10 seconds.",
+ "supported_environment_variables": [
+ "PREFECT_SERVER_DATABASE_TIMEOUT",
+ "PREFECT_API_DATABASE_TIMEOUT"
+ ],
+ "title": "Timeout"
+ },
+ "connection_timeout": {
+ "anyOf": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "null"
+ }
+ ],
+ "default": 5,
+ "description": "A connection timeout, in seconds, applied to database connections. Defaults to `5`.",
+ "supported_environment_variables": [
+ "PREFECT_SERVER_DATABASE_CONNECTION_TIMEOUT",
+ "PREFECT_API_DATABASE_CONNECTION_TIMEOUT"
+ ],
+ "title": "Connection Timeout"
+ },
+ "sqlalchemy_pool_size": {
+ "anyOf": [
+ {
+ "type": "integer"
+ },
+ {
+ "type": "null"
+ }
+ ],
+ "default": null,
+ "description": "Controls connection pool size when using a PostgreSQL database with the Prefect API. If not set, the default SQLAlchemy pool size will be used.",
+ "supported_environment_variables": [
+ "PREFECT_SERVER_DATABASE_SQLALCHEMY_POOL_SIZE",
+ "PREFECT_SQLALCHEMY_POOL_SIZE"
+ ],
+ "title": "Sqlalchemy Pool Size"
+ },
+ "sqlalchemy_max_overflow": {
+ "anyOf": [
+ {
+ "type": "integer"
+ },
+ {
+ "type": "null"
+ }
+ ],
+ "default": null,
+ "description": "Controls maximum overflow of the connection pool when using a PostgreSQL database with the Prefect API. If not set, the default SQLAlchemy maximum overflow value will be used.",
+ "supported_environment_variables": [
+ "PREFECT_SERVER_DATABASE_SQLALCHEMY_MAX_OVERFLOW",
+ "PREFECT_SQLALCHEMY_MAX_OVERFLOW"
+ ],
+ "title": "Sqlalchemy Max Overflow"
+ }
+ },
+ "title": "ServerDatabaseSettings",
+ "type": "object"
+ },
+ "ServerDeploymentsSettings": {
+ "properties": {
+ "concurrency_slot_wait_seconds": {
+ "default": 30.0,
+ "description": "The number of seconds to wait before retrying when a deployment flow run cannot secure a concurrency slot from the server.",
+ "minimum": 0.0,
+ "supported_environment_variables": [
+ "PREFECT_SERVER_DEPLOYMENTS_CONCURRENCY_SLOT_WAIT_SECONDS",
+ "PREFECT_DEPLOYMENT_CONCURRENCY_SLOT_WAIT_SECONDS"
+ ],
+ "title": "Concurrency Slot Wait Seconds",
+ "type": "number"
+ }
+ },
+ "title": "ServerDeploymentsSettings",
+ "type": "object"
+ },
+ "ServerEphemeralSettings": {
+ "description": "Settings for controlling ephemeral server behavior",
+ "properties": {
+ "enabled": {
+ "default": false,
+ "description": "\n Controls whether or not a subprocess server can be started when no API URL is provided.\n ",
+ "supported_environment_variables": [
+ "PREFECT_SERVER_EPHEMERAL_ENABLED",
+ "PREFECT_SERVER_ALLOW_EPHEMERAL_MODE"
+ ],
+ "title": "Enabled",
+ "type": "boolean"
+ },
+ "startup_timeout_seconds": {
+ "default": 20,
+ "description": "\n The number of seconds to wait for the server to start when ephemeral mode is enabled.\n Defaults to `10`.\n ",
+ "supported_environment_variables": [
+ "PREFECT_SERVER_EPHEMERAL_STARTUP_TIMEOUT_SECONDS"
+ ],
+ "title": "Startup Timeout Seconds",
+ "type": "integer"
+ }
+ },
+ "title": "ServerEphemeralSettings",
+ "type": "object"
+ },
+ "ServerEventsSettings": {
+ "description": "Settings for controlling behavior of the events subsystem",
+ "properties": {
+ "stream_out_enabled": {
+ "default": true,
+ "description": "Whether or not to stream events out to the API via websockets.",
+ "supported_environment_variables": [
+ "PREFECT_SERVER_EVENTS_STREAM_OUT_ENABLED",
+ "PREFECT_API_EVENTS_STREAM_OUT_ENABLED"
+ ],
+ "title": "Stream Out Enabled",
+ "type": "boolean"
+ },
+ "related_resource_cache_ttl": {
+ "default": "PT5M",
+ "description": "The number of seconds to cache related resources for in the API.",
+ "format": "duration",
+ "supported_environment_variables": [
+ "PREFECT_SERVER_EVENTS_RELATED_RESOURCE_CACHE_TTL",
+ "PREFECT_API_EVENTS_RELATED_RESOURCE_CACHE_TTL"
+ ],
+ "title": "Related Resource Cache Ttl",
+ "type": "string"
+ },
+ "maximum_labels_per_resource": {
+ "default": 500,
+ "description": "The maximum number of labels a resource may have.",
+ "supported_environment_variables": [
+ "PREFECT_SERVER_EVENTS_MAXIMUM_LABELS_PER_RESOURCE",
+ "PREFECT_EVENTS_MAXIMUM_LABELS_PER_RESOURCE"
+ ],
+ "title": "Maximum Labels Per Resource",
+ "type": "integer"
+ },
+ "maximum_related_resources": {
+ "default": 500,
+ "description": "The maximum number of related resources an Event may have.",
+ "supported_environment_variables": [
+ "PREFECT_SERVER_EVENTS_MAXIMUM_RELATED_RESOURCES",
+ "PREFECT_EVENTS_MAXIMUM_RELATED_RESOURCES"
+ ],
+ "title": "Maximum Related Resources",
+ "type": "integer"
+ },
+ "maximum_size_bytes": {
+ "default": 1500000,
+ "description": "The maximum size of an Event when serialized to JSON",
+ "supported_environment_variables": [
+ "PREFECT_SERVER_EVENTS_MAXIMUM_SIZE_BYTES",
+ "PREFECT_EVENTS_MAXIMUM_SIZE_BYTES"
+ ],
+ "title": "Maximum Size Bytes",
+ "type": "integer"
+ },
+ "expired_bucket_buffer": {
+ "default": "PT1M",
+ "description": "The amount of time to retain expired automation buckets",
+ "format": "duration",
+ "supported_environment_variables": [
+ "PREFECT_SERVER_EVENTS_EXPIRED_BUCKET_BUFFER",
+ "PREFECT_EVENTS_EXPIRED_BUCKET_BUFFER"
+ ],
+ "title": "Expired Bucket Buffer",
+ "type": "string"
+ },
+ "proactive_granularity": {
+ "default": "PT5S",
+ "description": "How frequently proactive automations are evaluated",
+ "format": "duration",
+ "supported_environment_variables": [
+ "PREFECT_SERVER_EVENTS_PROACTIVE_GRANULARITY",
+ "PREFECT_EVENTS_PROACTIVE_GRANULARITY"
+ ],
+ "title": "Proactive Granularity",
+ "type": "string"
+ },
+ "retention_period": {
+ "default": "P7D",
+ "description": "The amount of time to retain events in the database.",
+ "format": "duration",
+ "supported_environment_variables": [
+ "PREFECT_SERVER_EVENTS_RETENTION_PERIOD",
+ "PREFECT_EVENTS_RETENTION_PERIOD"
+ ],
+ "title": "Retention Period",
+ "type": "string"
+ },
+ "maximum_websocket_backfill": {
+ "default": "PT15M",
+ "description": "The maximum range to look back for backfilling events for a websocket subscriber.",
+ "format": "duration",
+ "supported_environment_variables": [
+ "PREFECT_SERVER_EVENTS_MAXIMUM_WEBSOCKET_BACKFILL",
+ "PREFECT_EVENTS_MAXIMUM_WEBSOCKET_BACKFILL"
+ ],
+ "title": "Maximum Websocket Backfill",
+ "type": "string"
+ },
+ "websocket_backfill_page_size": {
+ "default": 250,
+ "description": "The page size for the queries to backfill events for websocket subscribers.",
+ "exclusiveMinimum": 0,
+ "supported_environment_variables": [
+ "PREFECT_SERVER_EVENTS_WEBSOCKET_BACKFILL_PAGE_SIZE",
+ "PREFECT_EVENTS_WEBSOCKET_BACKFILL_PAGE_SIZE"
+ ],
+ "title": "Websocket Backfill Page Size",
+ "type": "integer"
+ },
+ "messaging_broker": {
+ "default": "prefect.server.utilities.messaging.memory",
+ "description": "Which message broker implementation to use for the messaging system, should point to a module that exports a Publisher and Consumer class.",
+ "supported_environment_variables": [
+ "PREFECT_SERVER_EVENTS_MESSAGING_BROKER",
+ "PREFECT_MESSAGING_BROKER"
+ ],
+ "title": "Messaging Broker",
+ "type": "string"
+ },
+ "messaging_cache": {
+ "default": "prefect.server.utilities.messaging.memory",
+ "description": "Which cache implementation to use for the events system. Should point to a module that exports a Cache class.",
+ "supported_environment_variables": [
+ "PREFECT_SERVER_EVENTS_MESSAGING_CACHE",
+ "PREFECT_MESSAGING_CACHE"
+ ],
+ "title": "Messaging Cache",
+ "type": "string"
+ }
+ },
+ "title": "ServerEventsSettings",
+ "type": "object"
+ },
+ "ServerFlowRunGraphSettings": {
+ "description": "Settings for controlling behavior of the flow run graph",
+ "properties": {
+ "max_nodes": {
+ "default": 10000,
+ "description": "The maximum size of a flow run graph on the v2 API",
+ "supported_environment_variables": [
+ "PREFECT_SERVER_FLOW_RUN_GRAPH_MAX_NODES",
+ "PREFECT_API_MAX_FLOW_RUN_GRAPH_NODES"
+ ],
+ "title": "Max Nodes",
+ "type": "integer"
+ },
+ "max_artifacts": {
+ "default": 10000,
+ "description": "The maximum number of artifacts to show on a flow run graph on the v2 API",
+ "supported_environment_variables": [
+ "PREFECT_SERVER_FLOW_RUN_GRAPH_MAX_ARTIFACTS",
+ "PREFECT_API_MAX_FLOW_RUN_GRAPH_ARTIFACTS"
+ ],
+ "title": "Max Artifacts",
+ "type": "integer"
+ }
+ },
+ "title": "ServerFlowRunGraphSettings",
+ "type": "object"
+ },
+ "ServerServicesCancellationCleanupSettings": {
+ "description": "Settings for controlling the cancellation cleanup service",
+ "properties": {
+ "enabled": {
+ "default": true,
+ "description": "Whether or not to start the cancellation cleanup service in the server application.",
+ "supported_environment_variables": [
+ "PREFECT_SERVER_SERVICES_CANCELLATION_CLEANUP_ENABLED",
+ "PREFECT_API_SERVICES_CANCELLATION_CLEANUP_ENABLED"
+ ],
+ "title": "Enabled",
+ "type": "boolean"
+ },
+ "loop_seconds": {
+ "default": 20,
+ "description": "The cancellation cleanup service will look for non-terminal tasks and subflows this often. Defaults to `20`.",
+ "supported_environment_variables": [
+ "PREFECT_SERVER_SERVICES_CANCELLATION_CLEANUP_LOOP_SECONDS",
+ "PREFECT_API_SERVICES_CANCELLATION_CLEANUP_LOOP_SECONDS"
+ ],
+ "title": "Loop Seconds",
+ "type": "number"
+ }
+ },
+ "title": "ServerServicesCancellationCleanupSettings",
+ "type": "object"
+ },
+ "ServerServicesEventPersisterSettings": {
+ "description": "Settings for controlling the event persister service",
+ "properties": {
+ "enabled": {
+ "default": true,
+ "description": "Whether or not to start the event persister service in the server application.",
+ "supported_environment_variables": [
+ "PREFECT_SERVER_SERVICES_EVENT_PERSISTER_ENABLED",
+ "PREFECT_API_SERVICES_EVENT_PERSISTER_ENABLED"
+ ],
+ "title": "Enabled",
+ "type": "boolean"
+ },
+ "batch_size": {
+ "default": 20,
+ "description": "The number of events the event persister will attempt to insert in one batch.",
+ "exclusiveMinimum": 0,
+ "supported_environment_variables": [
+ "PREFECT_SERVER_SERVICES_EVENT_PERSISTER_BATCH_SIZE",
+ "PREFECT_API_SERVICES_EVENT_PERSISTER_BATCH_SIZE"
+ ],
+ "title": "Batch Size",
+ "type": "integer"
+ },
+ "flush_interval": {
+ "default": 5,
+ "description": "The maximum number of seconds between flushes of the event persister.",
+ "exclusiveMinimum": 0.0,
+ "supported_environment_variables": [
+ "PREFECT_SERVER_SERVICES_EVENT_PERSISTER_FLUSH_INTERVAL",
+ "PREFECT_API_SERVICES_EVENT_PERSISTER_FLUSH_INTERVAL"
+ ],
+ "title": "Flush Interval",
+ "type": "number"
+ }
+ },
+ "title": "ServerServicesEventPersisterSettings",
+ "type": "object"
+ },
+ "ServerServicesFlowRunNotificationsSettings": {
+ "description": "Settings for controlling the flow run notifications service",
+ "properties": {
+ "enabled": {
+ "default": true,
+ "description": "Whether or not to start the flow run notifications service in the server application.",
+ "supported_environment_variables": [
+ "PREFECT_SERVER_SERVICES_FLOW_RUN_NOTIFICATIONS_ENABLED",
+ "PREFECT_API_SERVICES_FLOW_RUN_NOTIFICATIONS_ENABLED"
+ ],
+ "title": "Enabled",
+ "type": "boolean"
+ }
+ },
+ "title": "ServerServicesFlowRunNotificationsSettings",
+ "type": "object"
+ },
+ "ServerServicesForemanSettings": {
+ "description": "Settings for controlling the foreman service",
+ "properties": {
+ "enabled": {
+ "default": true,
+ "description": "Whether or not to start the foreman service in the server application.",
+ "supported_environment_variables": [
+ "PREFECT_SERVER_SERVICES_FOREMAN_ENABLED",
+ "PREFECT_API_SERVICES_FOREMAN_ENABLED"
+ ],
+ "title": "Enabled",
+ "type": "boolean"
+ },
+ "loop_seconds": {
+ "default": 15,
+ "description": "The foreman service will check for offline workers this often. Defaults to `15`.",
+ "supported_environment_variables": [
+ "PREFECT_SERVER_SERVICES_FOREMAN_LOOP_SECONDS",
+ "PREFECT_API_SERVICES_FOREMAN_LOOP_SECONDS"
+ ],
+ "title": "Loop Seconds",
+ "type": "number"
+ },
+ "inactivity_heartbeat_multiple": {
+ "default": 3,
+ "description": "\n The number of heartbeats that must be missed before a worker is marked as offline. Defaults to `3`.\n ",
+ "supported_environment_variables": [
+ "PREFECT_SERVER_SERVICES_FOREMAN_INACTIVITY_HEARTBEAT_MULTIPLE",
+ "PREFECT_API_SERVICES_FOREMAN_INACTIVITY_HEARTBEAT_MULTIPLE"
+ ],
+ "title": "Inactivity Heartbeat Multiple",
+ "type": "integer"
+ },
+ "fallback_heartbeat_interval_seconds": {
+ "default": 30,
+ "description": "\n The number of seconds to use for online/offline evaluation if a worker's heartbeat\n interval is not set. Defaults to `30`.\n ",
+ "supported_environment_variables": [
+ "PREFECT_SERVER_SERVICES_FOREMAN_FALLBACK_HEARTBEAT_INTERVAL_SECONDS",
+ "PREFECT_API_SERVICES_FOREMAN_FALLBACK_HEARTBEAT_INTERVAL_SECONDS"
+ ],
+ "title": "Fallback Heartbeat Interval Seconds",
+ "type": "integer"
+ },
+ "deployment_last_polled_timeout_seconds": {
+ "default": 60,
+ "description": "\n The number of seconds before a deployment is marked as not ready if it has not been\n polled. Defaults to `60`.\n ",
+ "supported_environment_variables": [
+ "PREFECT_SERVER_SERVICES_FOREMAN_DEPLOYMENT_LAST_POLLED_TIMEOUT_SECONDS",
+ "PREFECT_API_SERVICES_FOREMAN_DEPLOYMENT_LAST_POLLED_TIMEOUT_SECONDS"
+ ],
+ "title": "Deployment Last Polled Timeout Seconds",
+ "type": "integer"
+ },
+ "work_queue_last_polled_timeout_seconds": {
+ "default": 60,
+ "description": "\n The number of seconds before a work queue is marked as not ready if it has not been\n polled. Defaults to `60`.\n ",
+ "supported_environment_variables": [
+ "PREFECT_SERVER_SERVICES_FOREMAN_WORK_QUEUE_LAST_POLLED_TIMEOUT_SECONDS",
+ "PREFECT_API_SERVICES_FOREMAN_WORK_QUEUE_LAST_POLLED_TIMEOUT_SECONDS"
+ ],
+ "title": "Work Queue Last Polled Timeout Seconds",
+ "type": "integer"
+ }
+ },
+ "title": "ServerServicesForemanSettings",
+ "type": "object"
+ },
+ "ServerServicesLateRunsSettings": {
+ "description": "Settings for controlling the late runs service",
+ "properties": {
+ "enabled": {
+ "default": true,
+ "description": "Whether or not to start the late runs service in the server application.",
+ "supported_environment_variables": [
+ "PREFECT_SERVER_SERVICES_LATE_RUNS_ENABLED",
+ "PREFECT_API_SERVICES_LATE_RUNS_ENABLED"
+ ],
+ "title": "Enabled",
+ "type": "boolean"
+ },
+ "loop_seconds": {
+ "default": 5,
+ "description": "\n The late runs service will look for runs to mark as late this often. Defaults to `5`.\n ",
+ "supported_environment_variables": [
+ "PREFECT_SERVER_SERVICES_LATE_RUNS_LOOP_SECONDS",
+ "PREFECT_API_SERVICES_LATE_RUNS_LOOP_SECONDS"
+ ],
+ "title": "Loop Seconds",
+ "type": "number"
+ },
+ "after_seconds": {
+ "default": "PT15S",
+ "description": "\n The late runs service will mark runs as late after they have exceeded their scheduled start time by this many seconds. Defaults to `5` seconds.\n ",
+ "format": "duration",
+ "supported_environment_variables": [
+ "PREFECT_SERVER_SERVICES_LATE_RUNS_AFTER_SECONDS",
+ "PREFECT_API_SERVICES_LATE_RUNS_AFTER_SECONDS"
+ ],
+ "title": "After Seconds",
+ "type": "string"
+ }
+ },
+ "title": "ServerServicesLateRunsSettings",
+ "type": "object"
+ },
+ "ServerServicesPauseExpirationsSettings": {
+ "description": "Settings for controlling the pause expiration service",
+ "properties": {
+ "enabled": {
+ "default": true,
+ "description": "\n Whether or not to start the paused flow run expiration service in the server\n application. If disabled, paused flows that have timed out will remain in a Paused state\n until a resume attempt.\n ",
+ "supported_environment_variables": [
+ "PREFECT_SERVER_SERVICES_PAUSE_EXPIRATIONS_ENABLED",
+ "PREFECT_API_SERVICES_PAUSE_EXPIRATIONS_ENABLED"
+ ],
+ "title": "Enabled",
+ "type": "boolean"
+ },
+ "loop_seconds": {
+ "default": 5,
+ "description": "\n The pause expiration service will look for runs to mark as failed this often. Defaults to `5`.\n ",
+ "supported_environment_variables": [
+ "PREFECT_SERVER_SERVICES_PAUSE_EXPIRATIONS_LOOP_SECONDS",
+ "PREFECT_API_SERVICES_PAUSE_EXPIRATIONS_LOOP_SECONDS"
+ ],
+ "title": "Loop Seconds",
+ "type": "number"
+ }
+ },
+ "title": "ServerServicesPauseExpirationsSettings",
+ "type": "object"
+ },
+ "ServerServicesSchedulerSettings": {
+ "description": "Settings for controlling the scheduler service",
+ "properties": {
+ "enabled": {
+ "default": true,
+ "description": "Whether or not to start the scheduler service in the server application.",
+ "supported_environment_variables": [
+ "PREFECT_SERVER_SERVICES_SCHEDULER_ENABLED",
+ "PREFECT_API_SERVICES_SCHEDULER_ENABLED"
+ ],
+ "title": "Enabled",
+ "type": "boolean"
+ },
+ "loop_seconds": {
+ "default": 60,
+ "description": "\n The scheduler loop interval, in seconds. This determines\n how often the scheduler will attempt to schedule new flow runs, but has no\n impact on how quickly either flow runs or task runs are actually executed.\n Defaults to `60`.\n ",
+ "supported_environment_variables": [
+ "PREFECT_SERVER_SERVICES_SCHEDULER_LOOP_SECONDS",
+ "PREFECT_API_SERVICES_SCHEDULER_LOOP_SECONDS"
+ ],
+ "title": "Loop Seconds",
+ "type": "number"
+ },
+ "deployment_batch_size": {
+ "default": 100,
+ "description": "\n The number of deployments the scheduler will attempt to\n schedule in a single batch. If there are more deployments than the batch\n size, the scheduler immediately attempts to schedule the next batch; it\n does not sleep for `scheduler_loop_seconds` until it has visited every\n deployment once. Defaults to `100`.\n ",
+ "supported_environment_variables": [
+ "PREFECT_SERVER_SERVICES_SCHEDULER_DEPLOYMENT_BATCH_SIZE",
+ "PREFECT_API_SERVICES_SCHEDULER_DEPLOYMENT_BATCH_SIZE"
+ ],
+ "title": "Deployment Batch Size",
+ "type": "integer"
+ },
+ "max_runs": {
+ "default": 100,
+ "description": "\n The scheduler will attempt to schedule up to this many\n auto-scheduled runs in the future. Note that runs may have fewer than\n this many scheduled runs, depending on the value of\n `scheduler_max_scheduled_time`. Defaults to `100`.\n ",
+ "supported_environment_variables": [
+ "PREFECT_SERVER_SERVICES_SCHEDULER_MAX_RUNS",
+ "PREFECT_API_SERVICES_SCHEDULER_MAX_RUNS"
+ ],
+ "title": "Max Runs",
+ "type": "integer"
+ },
+ "min_runs": {
+ "default": 3,
+ "description": "\n The scheduler will attempt to schedule at least this many\n auto-scheduled runs in the future. Note that runs may have more than\n this many scheduled runs, depending on the value of\n `scheduler_min_scheduled_time`. Defaults to `3`.\n ",
+ "supported_environment_variables": [
+ "PREFECT_SERVER_SERVICES_SCHEDULER_MIN_RUNS",
+ "PREFECT_API_SERVICES_SCHEDULER_MIN_RUNS"
+ ],
+ "title": "Min Runs",
+ "type": "integer"
+ },
+ "max_scheduled_time": {
+ "default": "P100D",
+ "description": "\n The scheduler will create new runs up to this far in the\n future. Note that this setting will take precedence over\n `scheduler_max_runs`: if a flow runs once a month and\n `scheduler_max_scheduled_time` is three months, then only three runs will be\n scheduled. Defaults to 100 days (`8640000` seconds).\n ",
+ "format": "duration",
+ "supported_environment_variables": [
+ "PREFECT_SERVER_SERVICES_SCHEDULER_MAX_SCHEDULED_TIME",
+ "PREFECT_API_SERVICES_SCHEDULER_MAX_SCHEDULED_TIME"
+ ],
+ "title": "Max Scheduled Time",
+ "type": "string"
+ },
+ "min_scheduled_time": {
+ "default": "PT1H",
+ "description": "\n The scheduler will create new runs at least this far in the\n future. Note that this setting will take precedence over `scheduler_min_runs`:\n if a flow runs every hour and `scheduler_min_scheduled_time` is three hours,\n then three runs will be scheduled even if `scheduler_min_runs` is 1. Defaults to\n ",
+ "format": "duration",
+ "supported_environment_variables": [
+ "PREFECT_SERVER_SERVICES_SCHEDULER_MIN_SCHEDULED_TIME",
+ "PREFECT_API_SERVICES_SCHEDULER_MIN_SCHEDULED_TIME"
+ ],
+ "title": "Min Scheduled Time",
+ "type": "string"
+ },
+ "insert_batch_size": {
+ "default": 500,
+ "description": "\n The number of runs the scheduler will attempt to insert in a single batch.\n Defaults to `500`.\n ",
+ "supported_environment_variables": [
+ "PREFECT_SERVER_SERVICES_SCHEDULER_INSERT_BATCH_SIZE",
+ "PREFECT_API_SERVICES_SCHEDULER_INSERT_BATCH_SIZE"
+ ],
+ "title": "Insert Batch Size",
+ "type": "integer"
+ }
+ },
+ "title": "ServerServicesSchedulerSettings",
+ "type": "object"
+ },
+ "ServerServicesSettings": {
+ "description": "Settings for controlling server services",
+ "properties": {
+ "cancellation_cleanup": {
+ "$ref": "#/$defs/ServerServicesCancellationCleanupSettings",
+ "supported_environment_variables": []
+ },
+ "event_persister": {
+ "$ref": "#/$defs/ServerServicesEventPersisterSettings",
+ "supported_environment_variables": []
+ },
+ "flow_run_notifications": {
+ "$ref": "#/$defs/ServerServicesFlowRunNotificationsSettings",
+ "supported_environment_variables": []
+ },
+ "foreman": {
+ "$ref": "#/$defs/ServerServicesForemanSettings",
+ "supported_environment_variables": []
+ },
+ "late_runs": {
+ "$ref": "#/$defs/ServerServicesLateRunsSettings",
+ "supported_environment_variables": []
+ },
+ "scheduler": {
+ "$ref": "#/$defs/ServerServicesSchedulerSettings",
+ "supported_environment_variables": []
+ },
+ "pause_expirations": {
+ "$ref": "#/$defs/ServerServicesPauseExpirationsSettings",
+ "supported_environment_variables": []
+ },
+ "task_run_recorder": {
+ "$ref": "#/$defs/ServerServicesTaskRunRecorderSettings",
+ "supported_environment_variables": []
+ },
+ "triggers": {
+ "$ref": "#/$defs/ServerServicesTriggersSettings",
+ "supported_environment_variables": []
+ }
+ },
+ "title": "ServerServicesSettings",
+ "type": "object"
+ },
+ "ServerServicesTaskRunRecorderSettings": {
+ "description": "Settings for controlling the task run recorder service",
+ "properties": {
+ "enabled": {
+ "default": true,
+ "description": "Whether or not to start the task run recorder service in the server application.",
+ "supported_environment_variables": [
+ "PREFECT_SERVER_SERVICES_TASK_RUN_RECORDER_ENABLED",
+ "PREFECT_API_SERVICES_TASK_RUN_RECORDER_ENABLED"
+ ],
+ "title": "Enabled",
+ "type": "boolean"
+ }
+ },
+ "title": "ServerServicesTaskRunRecorderSettings",
+ "type": "object"
+ },
+ "ServerServicesTriggersSettings": {
+ "description": "Settings for controlling the triggers service",
+ "properties": {
+ "enabled": {
+ "default": true,
+ "description": "Whether or not to start the triggers service in the server application.",
+ "supported_environment_variables": [
+ "PREFECT_SERVER_SERVICES_TRIGGERS_ENABLED",
+ "PREFECT_API_SERVICES_TRIGGERS_ENABLED"
+ ],
+ "title": "Enabled",
+ "type": "boolean"
+ }
+ },
+ "title": "ServerServicesTriggersSettings",
+ "type": "object"
+ },
+ "ServerSettings": {
+ "description": "Settings for controlling server behavior",
+ "properties": {
+ "logging_level": {
+ "default": "WARNING",
+ "description": "The default logging level for the Prefect API server.",
+ "enum": [
+ "DEBUG",
+ "INFO",
+ "WARNING",
+ "ERROR",
+ "CRITICAL"
+ ],
+ "supported_environment_variables": [
+ "PREFECT_SERVER_LOGGING_LEVEL",
+ "PREFECT_LOGGING_SERVER_LEVEL"
+ ],
+ "title": "Logging Level",
+ "type": "string"
+ },
+ "analytics_enabled": {
+ "default": true,
+ "description": "\n When enabled, Prefect sends anonymous data (e.g. count of flow runs, package version)\n on server startup to help us improve our product.\n ",
+ "supported_environment_variables": [
+ "PREFECT_SERVER_ANALYTICS_ENABLED"
+ ],
+ "title": "Analytics Enabled",
+ "type": "boolean"
+ },
+ "metrics_enabled": {
+ "default": false,
+ "description": "Whether or not to enable Prometheus metrics in the API.",
+ "supported_environment_variables": [
+ "PREFECT_SERVER_METRICS_ENABLED",
+ "PREFECT_API_ENABLE_METRICS"
+ ],
+ "title": "Metrics Enabled",
+ "type": "boolean"
+ },
+ "log_retryable_errors": {
+ "default": false,
+ "description": "If `True`, log retryable errors in the API and it's services.",
+ "supported_environment_variables": [
+ "PREFECT_SERVER_LOG_RETRYABLE_ERRORS",
+ "PREFECT_API_LOG_RETRYABLE_ERRORS"
+ ],
+ "title": "Log Retryable Errors",
+ "type": "boolean"
+ },
+ "register_blocks_on_start": {
+ "default": true,
+ "description": "If set, any block types that have been imported will be registered with the backend on application startup. If not set, block types must be manually registered.",
+ "supported_environment_variables": [
+ "PREFECT_SERVER_REGISTER_BLOCKS_ON_START",
+ "PREFECT_API_BLOCKS_REGISTER_ON_START"
+ ],
+ "title": "Register Blocks On Start",
+ "type": "boolean"
+ },
+ "memoize_block_auto_registration": {
+ "default": true,
+ "description": "Controls whether or not block auto-registration on start",
+ "supported_environment_variables": [
+ "PREFECT_SERVER_MEMOIZE_BLOCK_AUTO_REGISTRATION",
+ "PREFECT_MEMOIZE_BLOCK_AUTO_REGISTRATION"
+ ],
+ "title": "Memoize Block Auto Registration",
+ "type": "boolean"
+ },
+ "memo_store_path": {
+ "anyOf": [
+ {
+ "format": "path",
+ "type": "string"
+ },
+ {
+ "type": "null"
+ }
+ ],
+ "default": null,
+ "description": "The path to the memo store file.",
+ "supported_environment_variables": [
+ "PREFECT_SERVER_MEMO_STORE_PATH",
+ "PREFECT_MEMO_STORE_PATH"
+ ],
+ "title": "Memo Store Path"
+ },
+ "deployment_schedule_max_scheduled_runs": {
+ "default": 50,
+ "description": "The maximum number of scheduled runs to create for a deployment.",
+ "supported_environment_variables": [
+ "PREFECT_SERVER_DEPLOYMENT_SCHEDULE_MAX_SCHEDULED_RUNS",
+ "PREFECT_DEPLOYMENT_SCHEDULE_MAX_SCHEDULED_RUNS"
+ ],
+ "title": "Deployment Schedule Max Scheduled Runs",
+ "type": "integer"
+ },
+ "api": {
+ "$ref": "#/$defs/ServerAPISettings",
+ "supported_environment_variables": []
+ },
+ "database": {
+ "$ref": "#/$defs/ServerDatabaseSettings",
+ "supported_environment_variables": []
+ },
+ "deployments": {
+ "$ref": "#/$defs/ServerDeploymentsSettings",
+ "description": "Settings for controlling server deployments behavior",
+ "supported_environment_variables": []
+ },
+ "ephemeral": {
+ "$ref": "#/$defs/ServerEphemeralSettings",
+ "supported_environment_variables": []
+ },
+ "events": {
+ "$ref": "#/$defs/ServerEventsSettings",
+ "description": "Settings for controlling server events behavior",
+ "supported_environment_variables": []
+ },
+ "flow_run_graph": {
+ "$ref": "#/$defs/ServerFlowRunGraphSettings",
+ "description": "Settings for controlling flow run graph behavior",
+ "supported_environment_variables": []
+ },
+ "services": {
+ "$ref": "#/$defs/ServerServicesSettings",
+ "description": "Settings for controlling server services behavior",
+ "supported_environment_variables": []
+ },
+ "tasks": {
+ "$ref": "#/$defs/ServerTasksSettings",
+ "description": "Settings for controlling server tasks behavior",
+ "supported_environment_variables": []
+ },
+ "ui": {
+ "$ref": "#/$defs/ServerUISettings",
+ "description": "Settings for controlling server UI behavior",
+ "supported_environment_variables": []
+ }
+ },
+ "title": "ServerSettings",
+ "type": "object"
+ },
+ "ServerTasksSchedulingSettings": {
+ "description": "Settings for controlling server-side behavior related to task scheduling",
+ "properties": {
+ "max_scheduled_queue_size": {
+ "default": 1000,
+ "description": "The maximum number of scheduled tasks to queue for submission.",
+ "supported_environment_variables": [
+ "PREFECT_SERVER_TASKS_SCHEDULING_MAX_SCHEDULED_QUEUE_SIZE",
+ "PREFECT_TASK_SCHEDULING_MAX_SCHEDULED_QUEUE_SIZE"
+ ],
+ "title": "Max Scheduled Queue Size",
+ "type": "integer"
+ },
+ "max_retry_queue_size": {
+ "default": 100,
+ "description": "The maximum number of retries to queue for submission.",
+ "supported_environment_variables": [
+ "PREFECT_SERVER_TASKS_SCHEDULING_MAX_RETRY_QUEUE_SIZE",
+ "PREFECT_TASK_SCHEDULING_MAX_RETRY_QUEUE_SIZE"
+ ],
+ "title": "Max Retry Queue Size",
+ "type": "integer"
+ },
+ "pending_task_timeout": {
+ "default": "PT0S",
+ "description": "How long before a PENDING task are made available to another task worker.",
+ "format": "duration",
+ "supported_environment_variables": [
+ "PREFECT_SERVER_TASKS_SCHEDULING_PENDING_TASK_TIMEOUT",
+ "PREFECT_TASK_SCHEDULING_PENDING_TASK_TIMEOUT"
+ ],
+ "title": "Pending Task Timeout",
+ "type": "string"
+ }
+ },
+ "title": "ServerTasksSchedulingSettings",
+ "type": "object"
+ },
+ "ServerTasksSettings": {
+ "description": "Settings for controlling server-side behavior related to tasks",
+ "properties": {
+ "tag_concurrency_slot_wait_seconds": {
+ "default": 30,
+ "description": "The number of seconds to wait before retrying when a task run cannot secure a concurrency slot from the server.",
+ "minimum": 0.0,
+ "supported_environment_variables": [
+ "PREFECT_SERVER_TASKS_TAG_CONCURRENCY_SLOT_WAIT_SECONDS",
+ "PREFECT_TASK_RUN_TAG_CONCURRENCY_SLOT_WAIT_SECONDS"
+ ],
+ "title": "Tag Concurrency Slot Wait Seconds",
+ "type": "number"
+ },
+ "max_cache_key_length": {
+ "default": 2000,
+ "description": "The maximum number of characters allowed for a task run cache key.",
+ "supported_environment_variables": [
+ "PREFECT_SERVER_TASKS_MAX_CACHE_KEY_LENGTH",
+ "PREFECT_API_TASK_CACHE_KEY_MAX_LENGTH"
+ ],
+ "title": "Max Cache Key Length",
+ "type": "integer"
+ },
+ "scheduling": {
+ "$ref": "#/$defs/ServerTasksSchedulingSettings",
+ "supported_environment_variables": []
+ }
+ },
+ "title": "ServerTasksSettings",
+ "type": "object"
+ },
+ "ServerUISettings": {
+ "properties": {
+ "enabled": {
+ "default": true,
+ "description": "Whether or not to serve the Prefect UI.",
+ "supported_environment_variables": [
+ "PREFECT_SERVER_UI_ENABLED",
+ "PREFECT_UI_ENABLED"
+ ],
+ "title": "Enabled",
+ "type": "boolean"
+ },
+ "api_url": {
+ "anyOf": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "null"
+ }
+ ],
+ "default": null,
+ "description": "The connection url for communication from the UI to the API. Defaults to `PREFECT_API_URL` if set. Otherwise, the default URL is generated from `PREFECT_SERVER_API_HOST` and `PREFECT_SERVER_API_PORT`.",
+ "supported_environment_variables": [
+ "PREFECT_SERVER_UI_API_URL",
+ "PREFECT_UI_API_URL"
+ ],
+ "title": "Api Url"
+ },
+ "serve_base": {
+ "default": "/",
+ "description": "The base URL path to serve the Prefect UI from.",
+ "supported_environment_variables": [
+ "PREFECT_SERVER_UI_SERVE_BASE",
+ "PREFECT_UI_SERVE_BASE"
+ ],
+ "title": "Serve Base",
+ "type": "string"
+ },
+ "static_directory": {
+ "anyOf": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "null"
+ }
+ ],
+ "default": null,
+ "description": "The directory to serve static files from. This should be used when running into permissions issues when attempting to serve the UI from the default directory (for example when running in a Docker container).",
+ "supported_environment_variables": [
+ "PREFECT_SERVER_UI_STATIC_DIRECTORY",
+ "PREFECT_UI_STATIC_DIRECTORY"
+ ],
+ "title": "Static Directory"
+ }
+ },
+ "title": "ServerUISettings",
+ "type": "object"
+ },
+ "TasksRunnerSettings": {
+ "properties": {
+ "thread_pool_max_workers": {
+ "anyOf": [
+ {
+ "exclusiveMinimum": 0,
+ "type": "integer"
+ },
+ {
+ "type": "null"
+ }
+ ],
+ "default": null,
+ "description": "The maximum number of workers for ThreadPoolTaskRunner.",
+ "supported_environment_variables": [
+ "PREFECT_TASKS_RUNNER_THREAD_POOL_MAX_WORKERS",
+ "PREFECT_TASK_RUNNER_THREAD_POOL_MAX_WORKERS"
+ ],
+ "title": "Thread Pool Max Workers"
+ }
+ },
+ "title": "TasksRunnerSettings",
+ "type": "object"
+ },
+ "TasksSchedulingSettings": {
+ "properties": {
+ "default_storage_block": {
+ "anyOf": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "null"
+ }
+ ],
+ "default": null,
+ "description": "The `block-type/block-document` slug of a block to use as the default storage for autonomous tasks.",
+ "supported_environment_variables": [
+ "PREFECT_TASKS_SCHEDULING_DEFAULT_STORAGE_BLOCK",
+ "PREFECT_TASK_SCHEDULING_DEFAULT_STORAGE_BLOCK"
+ ],
+ "title": "Default Storage Block"
+ },
+ "delete_failed_submissions": {
+ "default": true,
+ "description": "Whether or not to delete failed task submissions from the database.",
+ "supported_environment_variables": [
+ "PREFECT_TASKS_SCHEDULING_DELETE_FAILED_SUBMISSIONS",
+ "PREFECT_TASK_SCHEDULING_DELETE_FAILED_SUBMISSIONS"
+ ],
+ "title": "Delete Failed Submissions",
+ "type": "boolean"
+ }
+ },
+ "title": "TasksSchedulingSettings",
+ "type": "object"
+ },
+ "TasksSettings": {
+ "properties": {
+ "refresh_cache": {
+ "default": false,
+ "description": "If `True`, enables a refresh of cached results: re-executing the task will refresh the cached results.",
+ "supported_environment_variables": [
+ "PREFECT_TASKS_REFRESH_CACHE"
+ ],
+ "title": "Refresh Cache",
+ "type": "boolean"
+ },
+ "default_retries": {
+ "default": 0,
+ "description": "This value sets the default number of retries for all tasks.",
+ "minimum": 0,
+ "supported_environment_variables": [
+ "PREFECT_TASKS_DEFAULT_RETRIES",
+ "PREFECT_TASK_DEFAULT_RETRIES"
+ ],
+ "title": "Default Retries",
+ "type": "integer"
+ },
+ "default_retry_delay_seconds": {
+ "anyOf": [
+ {
+ "type": "integer"
+ },
+ {
+ "type": "number"
+ },
+ {
+ "items": {
+ "type": "number"
+ },
+ "type": "array"
+ }
+ ],
+ "default": 0,
+ "description": "This value sets the default retry delay seconds for all tasks.",
+ "supported_environment_variables": [
+ "PREFECT_TASKS_DEFAULT_RETRY_DELAY_SECONDS",
+ "PREFECT_TASK_DEFAULT_RETRY_DELAY_SECONDS"
+ ],
+ "title": "Default Retry Delay Seconds"
+ },
+ "default_persist_result": {
+ "anyOf": [
+ {
+ "type": "boolean"
+ },
+ {
+ "type": "null"
+ }
+ ],
+ "default": null,
+ "description": "If `True`, results will be persisted by default for all tasks. Set to `False` to disable persistence by default. Note that setting to `False` will override the behavior set by a parent flow or task.",
+ "supported_environment_variables": [
+ "PREFECT_TASKS_DEFAULT_PERSIST_RESULT"
+ ],
+ "title": "Default Persist Result"
+ },
+ "runner": {
+ "$ref": "#/$defs/TasksRunnerSettings",
+ "description": "Settings for controlling task runner behavior",
+ "supported_environment_variables": []
+ },
+ "scheduling": {
+ "$ref": "#/$defs/TasksSchedulingSettings",
+ "description": "Settings for controlling client-side task scheduling behavior",
+ "supported_environment_variables": []
+ }
+ },
+ "title": "TasksSettings",
+ "type": "object"
+ },
+ "TestingSettings": {
+ "properties": {
+ "test_mode": {
+ "default": false,
+ "description": "If `True`, places the API in test mode. This may modify behavior to facilitate testing.",
+ "supported_environment_variables": [
+ "PREFECT_TESTING_TEST_MODE",
+ "PREFECT_TEST_MODE"
+ ],
+ "title": "Test Mode",
+ "type": "boolean"
+ },
+ "unit_test_mode": {
+ "default": false,
+ "description": "This setting only exists to facilitate unit testing. If `True`, code is executing in a unit test context. Defaults to `False`.",
+ "supported_environment_variables": [
+ "PREFECT_TESTING_UNIT_TEST_MODE",
+ "PREFECT_UNIT_TEST_MODE"
+ ],
+ "title": "Unit Test Mode",
+ "type": "boolean"
+ },
+ "unit_test_loop_debug": {
+ "default": true,
+ "description": "If `True` turns on debug mode for the unit testing event loop.",
+ "supported_environment_variables": [
+ "PREFECT_TESTING_UNIT_TEST_LOOP_DEBUG",
+ "PREFECT_UNIT_TEST_LOOP_DEBUG"
+ ],
+ "title": "Unit Test Loop Debug",
+ "type": "boolean"
+ },
+ "test_setting": {
+ "anyOf": [
+ {},
+ {
+ "type": "null"
+ }
+ ],
+ "default": "FOO",
+ "description": "This setting only exists to facilitate unit testing. If in test mode, this setting will return its value. Otherwise, it returns `None`.",
+ "supported_environment_variables": [
+ "PREFECT_TESTING_TEST_SETTING",
+ "PREFECT_TEST_SETTING"
+ ],
+ "title": "Test Setting"
+ }
+ },
+ "title": "TestingSettings",
+ "type": "object"
+ },
+ "WorkerSettings": {
+ "properties": {
+ "heartbeat_seconds": {
+ "default": 30,
+ "description": "Number of seconds a worker should wait between sending a heartbeat.",
+ "supported_environment_variables": [
+ "PREFECT_WORKER_HEARTBEAT_SECONDS"
+ ],
+ "title": "Heartbeat Seconds",
+ "type": "number"
+ },
+ "query_seconds": {
+ "default": 10,
+ "description": "Number of seconds a worker should wait between queries for scheduled work.",
+ "supported_environment_variables": [
+ "PREFECT_WORKER_QUERY_SECONDS"
+ ],
+ "title": "Query Seconds",
+ "type": "number"
+ },
+ "prefetch_seconds": {
+ "default": 10,
+ "description": "The number of seconds into the future a worker should query for scheduled work.",
+ "supported_environment_variables": [
+ "PREFECT_WORKER_PREFETCH_SECONDS"
+ ],
+ "title": "Prefetch Seconds",
+ "type": "number"
+ },
+ "webserver": {
+ "$ref": "#/$defs/WorkerWebserverSettings",
+ "description": "Settings for a worker's webserver",
+ "supported_environment_variables": []
+ }
+ },
+ "title": "WorkerSettings",
+ "type": "object"
+ },
+ "WorkerWebserverSettings": {
+ "properties": {
+ "host": {
+ "default": "0.0.0.0",
+ "description": "The host address the worker's webserver should bind to.",
+ "supported_environment_variables": [
+ "PREFECT_WORKER_WEBSERVER_HOST"
+ ],
+ "title": "Host",
+ "type": "string"
+ },
+ "port": {
+ "default": 8080,
+ "description": "The port the worker's webserver should bind to.",
+ "supported_environment_variables": [
+ "PREFECT_WORKER_WEBSERVER_PORT"
+ ],
+ "title": "Port",
+ "type": "integer"
+ }
+ },
+ "title": "WorkerWebserverSettings",
+ "type": "object"
+ }
+ },
+ "description": "Settings for Prefect using Pydantic settings.\n\nSee https://docs.pydantic.dev/latest/concepts/pydantic_settings",
+ "properties": {
+ "home": {
+ "default": "~/.prefect",
+ "description": "The path to the Prefect home directory. Defaults to ~/.prefect",
+ "format": "path",
+ "supported_environment_variables": [
+ "PREFECT_HOME"
+ ],
+ "title": "Home",
+ "type": "string"
+ },
+ "profiles_path": {
+ "anyOf": [
+ {
+ "format": "path",
+ "type": "string"
+ },
+ {
+ "type": "null"
+ }
+ ],
+ "default": null,
+ "description": "The path to a profiles configuration file.",
+ "supported_environment_variables": [
+ "PREFECT_PROFILES_PATH"
+ ],
+ "title": "Profiles Path"
+ },
+ "debug_mode": {
+ "default": false,
+ "description": "If True, enables debug mode which may provide additional logging and debugging features.",
+ "supported_environment_variables": [
+ "PREFECT_DEBUG_MODE"
+ ],
+ "title": "Debug Mode",
+ "type": "boolean"
+ },
+ "api": {
+ "$ref": "#/$defs/APISettings",
+ "supported_environment_variables": []
+ },
+ "cli": {
+ "$ref": "#/$defs/CLISettings",
+ "supported_environment_variables": []
+ },
+ "client": {
+ "$ref": "#/$defs/ClientSettings",
+ "supported_environment_variables": []
+ },
+ "cloud": {
+ "$ref": "#/$defs/CloudSettings",
+ "supported_environment_variables": []
+ },
+ "deployments": {
+ "$ref": "#/$defs/DeploymentsSettings",
+ "supported_environment_variables": []
+ },
+ "experiments": {
+ "$ref": "#/$defs/ExperimentsSettings",
+ "description": "Settings for controlling experimental features",
+ "supported_environment_variables": []
+ },
+ "flows": {
+ "$ref": "#/$defs/FlowsSettings",
+ "supported_environment_variables": []
+ },
+ "internal": {
+ "$ref": "#/$defs/InternalSettings",
+ "description": "Settings for internal Prefect machinery",
+ "supported_environment_variables": []
+ },
+ "logging": {
+ "$ref": "#/$defs/LoggingSettings",
+ "supported_environment_variables": []
+ },
+ "results": {
+ "$ref": "#/$defs/ResultsSettings",
+ "supported_environment_variables": []
+ },
+ "runner": {
+ "$ref": "#/$defs/RunnerSettings",
+ "supported_environment_variables": []
+ },
+ "server": {
+ "$ref": "#/$defs/ServerSettings",
+ "supported_environment_variables": []
+ },
+ "tasks": {
+ "$ref": "#/$defs/TasksSettings",
+ "description": "Settings for controlling task behavior",
+ "supported_environment_variables": []
+ },
+ "testing": {
+ "$ref": "#/$defs/TestingSettings",
+ "description": "Settings used during testing",
+ "supported_environment_variables": []
+ },
+ "worker": {
+ "$ref": "#/$defs/WorkerSettings",
+ "description": "Settings for controlling worker behavior",
+ "supported_environment_variables": []
+ },
+ "ui_url": {
+ "anyOf": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "null"
+ }
+ ],
+ "default": null,
+ "description": "The URL of the Prefect UI. If not set, the client will attempt to infer it.",
+ "supported_environment_variables": [
+ "PREFECT_UI_URL"
+ ],
+ "title": "Ui Url"
+ },
+ "silence_api_url_misconfiguration": {
+ "default": false,
+ "description": "\n If `True`, disable the warning when a user accidentally misconfigure its `PREFECT_API_URL`\n Sometimes when a user manually set `PREFECT_API_URL` to a custom url,reverse-proxy for example,\n we would like to silence this warning so we will set it to `FALSE`.\n ",
+ "supported_environment_variables": [
+ "PREFECT_SILENCE_API_URL_MISCONFIGURATION"
+ ],
+ "title": "Silence Api Url Misconfiguration",
+ "type": "boolean"
+ }
+ },
+ "title": "Prefect Settings",
+ "type": "object",
+ "$schema": "https://json-schema.org/draft/2020-12/schema",
+ "$id": "https://github.com/PrefectHQ/prefect/schemas/settings.schema.json"
+}
\ No newline at end of file
diff --git a/scripts/generate_mintlify_openapi_docs.py b/scripts/generate_mintlify_openapi_docs.py
index c9f9afaf976f..147d986da906 100755
--- a/scripts/generate_mintlify_openapi_docs.py
+++ b/scripts/generate_mintlify_openapi_docs.py
@@ -7,7 +7,6 @@
from packaging.version import Version
-import prefect
from prefect.server.api.server import create_app
Mint = dict[str, Any]
@@ -30,10 +29,11 @@ def docs_path() -> Path:
def current_version() -> str:
"""
Return a high-level version string for the current Prefect version,
- such as "3.1" or "3.1rc".
+ such as "3" or "3.1.0rc".
"""
- version = Version(prefect.__version__)
- return f"{version.major}.{version.minor}{version.pre[0] if version.pre else ''}"
+
+ version = Version("3.0.0")
+ return f"v{version.major}{version.minor if version.pre else ''}{version.pre[0] if version.pre else ''}"
def main():
diff --git a/scripts/generate_oss_openapi_schema.py b/scripts/generate_oss_openapi_schema.py
index e453e0124635..78847e0434e7 100644
--- a/scripts/generate_oss_openapi_schema.py
+++ b/scripts/generate_oss_openapi_schema.py
@@ -1,8 +1,13 @@
+# /// script
+# dependencies = [
+# "prefect @ file:${PROJECT_ROOT}/../",
+# ]
+# ///
import json
-from prefect.server.api.server import create_app
+from prefect.server.api.server import create_api_app
-app = create_app()
+app = create_api_app()
openapi_schema = app.openapi()
with open("oss_schema.json", "w") as f:
diff --git a/scripts/generate_sdk_docs.py b/scripts/generate_sdk_docs.py
index 4b14374eb9aa..4fd21871ef39 100644
--- a/scripts/generate_sdk_docs.py
+++ b/scripts/generate_sdk_docs.py
@@ -34,8 +34,6 @@ def main():
continue
modules.append(submodule)
- print(modules)
-
package_docs = docs_path() / "mkdocs"
for module in sorted(modules):
diff --git a/scripts/generate_settings_ref.py b/scripts/generate_settings_ref.py
new file mode 100644
index 000000000000..d4db8f3a4489
--- /dev/null
+++ b/scripts/generate_settings_ref.py
@@ -0,0 +1,192 @@
+from typing import Any, Dict
+
+from prefect import __development_base_path__
+from prefect.settings import Settings
+
+
+def resolve_ref(schema: Dict[Any, Any], ref_path: str) -> Dict[str, Any]:
+ """Resolve a reference to a nested model."""
+ return schema.get("$defs", {}).get(ref_path.split("/")[-1], {})
+
+
+def build_ref_paths(schema: Dict[Any, Any]) -> Dict[str, str]:
+ """Build a mapping of reference paths for all nested models."""
+ paths = {}
+ to_process = [("", "", schema)]
+
+ defs = schema.get("$defs", {})
+
+ while to_process:
+ current_path, current_name, current_schema = to_process.pop(0)
+
+ if "properties" in current_schema:
+ for prop_name, prop_info in current_schema["properties"].items():
+ new_path = f"{current_path}.{prop_name}" if current_path else prop_name
+
+ if "$ref" in prop_info:
+ ref_name = prop_info["$ref"].split("/")[-1]
+ paths[ref_name] = new_path
+ if ref_name in defs:
+ to_process.append((new_path, ref_name, defs[ref_name]))
+
+ return paths
+
+
+def process_property_constraints(prop_info: Dict[Any, Any]) -> list:
+ """Extract constraints from a property's schema information."""
+ constraints = []
+
+ # Handle basic constraints
+ for constraint in ["minimum", "maximum", "pattern", "enum"]:
+ if constraint in prop_info:
+ if constraint == "enum":
+ constraints.append(
+ f"Allowed values: {', '.join(repr(v) for v in prop_info[constraint])}"
+ )
+ else:
+ constraints.append(
+ f"{constraint.capitalize()}: {prop_info[constraint]}"
+ )
+
+ return constraints
+
+
+def generate_property_docs(
+ prop_name: str, prop_info: Dict[Any, Any], level: int = 3, parent_path: str = ""
+) -> str:
+ """Generate documentation for a single property."""
+ docs = []
+ header = "#" * level
+ docs.append(f"{header} `{prop_name}`")
+
+ # Description
+ if "description" in prop_info:
+ docs.append(f"{prop_info['description']}")
+
+ # Type information
+ if "$ref" in prop_info:
+ ref_name = prop_info["$ref"].split("/")[-1]
+ docs.append(f"\n**Type**: [{ref_name}](#{ref_name.lower()})")
+ elif "type" in prop_info:
+ prop_type = prop_info["type"]
+ docs.append(f"\n**Type**: `{prop_type}`")
+ elif "anyOf" in prop_info:
+ # Handle complex type constraints
+ types = []
+ for type_info in prop_info["anyOf"]:
+ if "type" in type_info:
+ if type_info["type"] == "null":
+ types.append("None")
+ else:
+ types.append(type_info["type"])
+ if types:
+ docs.append(f"\n**Type**: `{' | '.join(types)}`")
+ else:
+ docs.append("\n**Type**: `any`")
+ else:
+ docs.append("\n**Type**: `any`")
+
+ # Default value
+ if "default" in prop_info:
+ docs.append(f"\n**Default**: `{prop_info['default']}`")
+
+ # Constraints
+ constraints = process_property_constraints(prop_info)
+ if constraints:
+ docs.append("\n**Constraints**:")
+ for constraint in constraints:
+ docs.append(f"- {constraint}")
+
+ # Access path
+ access_path = f"{parent_path}.{prop_name}" if parent_path else prop_name
+ docs.append(f"\n**TOML dotted key path**: `{access_path}`")
+
+ if supported_env_vars := prop_info.get("supported_environment_variables"):
+ docs.append("\n**Supported environment variables**:")
+ docs.append(", ".join(f"`{env_var}`" for env_var in supported_env_vars))
+
+ return "\n".join(docs) + "\n"
+
+
+def generate_model_docs(
+ schema: Dict[Any, Any], level: int = 1, parent_path: str = ""
+) -> str:
+ """Generate documentation for a model and its properties."""
+ docs = []
+ header = "#" * level
+
+ if not schema.get("properties"):
+ return ""
+
+ # Model title and description
+ title = schema.get("title", "Settings")
+ docs.append(f"{header} {title}")
+
+ if "description" in schema:
+ docs.append(f"{schema['description']}")
+
+ # Process all properties
+ if "properties" in schema:
+ for prop_name, prop_info in schema["properties"].items():
+ docs.append(
+ generate_property_docs(
+ prop_name, prop_info, level=level + 1, parent_path=parent_path
+ )
+ )
+
+ docs.append("---")
+
+ return "\n".join(docs)
+
+
+def process_definitions(defs: Dict[Any, Any], schema: Dict[Any, Any]) -> str:
+ """Process all model definitions and generate their documentation."""
+ docs = []
+
+ # Build complete reference paths
+ ref_paths = build_ref_paths(schema)
+
+ docs.append("---")
+ for model_name, model_schema in defs.items():
+ parent_path = ref_paths.get(model_name, "")
+ docs.append(generate_model_docs(model_schema, level=2, parent_path=parent_path))
+
+ return "\n".join(docs)
+
+
+def main():
+ schema = Settings.model_json_schema()
+ # Generate main documentation
+ docs_content = [
+ "---",
+ "title: Settings reference",
+ "description: Reference for all available settings for Prefect.",
+ "---",
+ "{/* This page is generated by `scripts/generate_settings_ref.py`. Update the generation script to update this page. */}",
+ "To use `prefect.toml` or `pyproject.toml` for configuration, `prefect>=3.1` must be installed. ",
+ "## Root Settings",
+ ]
+
+ # Generate documentation for top-level properties
+ if "properties" in schema:
+ for prop_name, prop_info in schema["properties"].items():
+ if "$ref" in prop_info and not resolve_ref(schema, prop_info["$ref"]).get(
+ "properties"
+ ):
+ # Exclude nested models with no properties (like `experiments` sometimes)
+ continue
+ docs_content.append(generate_property_docs(prop_name, prop_info, level=3))
+
+ # Generate documentation for nested models
+ if "$defs" in schema:
+ docs_content.append(process_definitions(schema["$defs"], schema))
+
+ with open(
+ __development_base_path__ / "docs" / "v3" / "develop" / "settings-ref.mdx",
+ "w",
+ ) as f:
+ f.write("\n".join(docs_content))
+
+
+if __name__ == "__main__":
+ main()
diff --git a/scripts/generate_settings_schema.py b/scripts/generate_settings_schema.py
new file mode 100644
index 000000000000..cb366fb1df3b
--- /dev/null
+++ b/scripts/generate_settings_schema.py
@@ -0,0 +1,30 @@
+import json
+
+from pydantic.json_schema import GenerateJsonSchema
+
+from prefect import __development_base_path__
+from prefect.settings import Settings
+
+
+class SettingsGenerateJsonSchema(GenerateJsonSchema):
+ def generate(self, schema, mode="validation"):
+ json_schema = super().generate(schema, mode=mode)
+ json_schema["title"] = "Prefect Settings"
+ json_schema["$schema"] = self.schema_dialect
+ json_schema[
+ "$id"
+ ] = "https://github.com/PrefectHQ/prefect/schemas/settings.schema.json"
+ return json_schema
+
+
+def main():
+ with open(__development_base_path__ / "schemas" / "settings.schema.json", "w") as f:
+ json.dump(
+ Settings.model_json_schema(schema_generator=SettingsGenerateJsonSchema),
+ f,
+ indent=4,
+ )
+
+
+if __name__ == "__main__":
+ main()
diff --git a/scripts/test_unc_paths.py b/scripts/test_unc_paths.py
new file mode 100644
index 000000000000..31cb6f058e56
--- /dev/null
+++ b/scripts/test_unc_paths.py
@@ -0,0 +1,66 @@
+import os
+import sys
+from pathlib import Path
+
+from prefect import flow
+from prefect.flows import Flow
+
+
+def setup_unc_share(base_path: Path) -> Path:
+ """
+ Creates a test UNC path and returns it.
+ Requires admin privileges on Windows.
+ """
+ if os.name != "nt":
+ print("This script only works on Windows")
+ sys.exit(1)
+
+ # Create a test directory structure in the share
+ unc_path = Path(r"\\localhost\PrefectTest")
+
+ # Create a src directory in the share for our test flow
+ src_dir = unc_path / "src"
+ src_dir.mkdir(parents=True, exist_ok=True)
+
+ return unc_path
+
+
+def create_test_flow_file(path: Path):
+ """Create a test flow file in the given path"""
+ flow_code = """
+from prefect import flow
+
+@flow
+def remote_test_flow(name: str = "remote"):
+ print(f"Hello from {name} in remote flow!")
+ return "Remote Success!"
+"""
+ # Create the flow file in src/app.py
+ flow_file = path / "src" / "app.py"
+ flow_file.write_text(flow_code)
+ return flow_file
+
+
+if __name__ == "__main__":
+ try:
+ # Setup UNC share
+ unc_path = setup_unc_share(Path.cwd())
+ print(f"Created UNC path structure at: {unc_path}")
+
+ # Create a test flow file in the share
+ flow_file = create_test_flow_file(unc_path)
+ print(f"Created test flow file at: {flow_file}")
+
+ # Try to load and run flow from UNC path
+ print("Attempting to load flow from UNC path...")
+ remote_flow = flow.from_source(
+ source=unc_path, entrypoint="src/app.py:remote_test_flow"
+ )
+
+ print("Testing if flow was loaded correctly...")
+ assert isinstance(remote_flow, Flow), "Flow was not loaded correctly"
+ print("Flow loaded successfully!")
+
+ except Exception as e:
+ print(f"Error: {type(e).__name__}: {e}")
+ raise
diff --git a/settings_schema.json b/settings_schema.json
new file mode 100644
index 000000000000..392929ddd0b2
--- /dev/null
+++ b/settings_schema.json
@@ -0,0 +1,1645 @@
+{
+ "$defs": {
+ "APISettings": {
+ "description": "Settings for interacting with the Prefect API",
+ "properties": {
+ "url": {
+ "anyOf": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "null"
+ }
+ ],
+ "default": null,
+ "description": "The URL of the Prefect API. If not set, the client will attempt to infer it.",
+ "title": "Url"
+ },
+ "key": {
+ "anyOf": [
+ {
+ "format": "password",
+ "type": "string",
+ "writeOnly": true
+ },
+ {
+ "type": "null"
+ }
+ ],
+ "default": null,
+ "description": "The API key used for authentication with the Prefect API. Should be kept secret.",
+ "title": "Key"
+ },
+ "tls_insecure_skip_verify": {
+ "default": false,
+ "description": "If `True`, disables SSL checking to allow insecure requests. This is recommended only during development, e.g. when using self-signed certificates.",
+ "title": "Tls Insecure Skip Verify",
+ "type": "boolean"
+ },
+ "ssl_cert_file": {
+ "anyOf": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "null"
+ }
+ ],
+ "default": null,
+ "description": "This configuration settings option specifies the path to an SSL certificate file.",
+ "title": "Ssl Cert File"
+ },
+ "enable_http2": {
+ "default": false,
+ "description": "If true, enable support for HTTP/2 for communicating with an API. If the API does not support HTTP/2, this will have no effect and connections will be made via HTTP/1.1.",
+ "title": "Enable Http2",
+ "type": "boolean"
+ },
+ "request_timeout": {
+ "default": 60.0,
+ "description": "The default timeout for requests to the API",
+ "title": "Request Timeout",
+ "type": "number"
+ }
+ },
+ "title": "APISettings",
+ "type": "object"
+ },
+ "CLISettings": {
+ "description": "Settings for controlling CLI behavior",
+ "properties": {
+ "colors": {
+ "default": true,
+ "description": "If True, use colors in CLI output. If `False`, output will not include colors codes.",
+ "title": "Colors",
+ "type": "boolean"
+ },
+ "prompt": {
+ "anyOf": [
+ {
+ "type": "boolean"
+ },
+ {
+ "type": "null"
+ }
+ ],
+ "default": null,
+ "description": "If `True`, use interactive prompts in CLI commands. If `False`, no interactive prompts will be used. If `None`, the value will be dynamically determined based on the presence of an interactive-enabled terminal.",
+ "title": "Prompt"
+ },
+ "wrap_lines": {
+ "default": true,
+ "description": "If `True`, wrap text by inserting new lines in long lines in CLI output. If `False`, output will not be wrapped.",
+ "title": "Wrap Lines",
+ "type": "boolean"
+ }
+ },
+ "title": "CLISettings",
+ "type": "object"
+ },
+ "ClientMetricsSettings": {
+ "description": "Settings for controlling metrics reporting from the client",
+ "properties": {
+ "enabled": {
+ "default": false,
+ "description": "Whether or not to enable Prometheus metrics in the client.",
+ "title": "Enabled",
+ "type": "boolean"
+ },
+ "port": {
+ "default": 4201,
+ "description": "The port to expose the client Prometheus metrics on.",
+ "title": "Port",
+ "type": "integer"
+ }
+ },
+ "title": "ClientMetricsSettings",
+ "type": "object"
+ },
+ "ClientSettings": {
+ "description": "Settings for controlling API client behavior",
+ "properties": {
+ "max_retries": {
+ "default": 5,
+ "description": "\n The maximum number of retries to perform on failed HTTP requests.\n Defaults to 5. Set to 0 to disable retries.\n See `PREFECT_CLIENT_RETRY_EXTRA_CODES` for details on which HTTP status codes are\n retried.\n ",
+ "minimum": 0,
+ "title": "Max Retries",
+ "type": "integer"
+ },
+ "retry_jitter_factor": {
+ "default": 0.2,
+ "description": "\n A value greater than or equal to zero to control the amount of jitter added to retried\n client requests. Higher values introduce larger amounts of jitter.\n Set to 0 to disable jitter. See `clamped_poisson_interval` for details on the how jitter\n can affect retry lengths.\n ",
+ "minimum": 0.0,
+ "title": "Retry Jitter Factor",
+ "type": "number"
+ },
+ "retry_extra_codes": {
+ "anyOf": [
+ {
+ "type": "string"
+ },
+ {
+ "maximum": 599,
+ "minimum": 100,
+ "type": "integer"
+ },
+ {
+ "items": {
+ "maximum": 599,
+ "minimum": 100,
+ "type": "integer"
+ },
+ "type": "array",
+ "uniqueItems": true
+ },
+ {
+ "type": "null"
+ }
+ ],
+ "description": "\n A list of extra HTTP status codes to retry on. Defaults to an empty list.\n 429, 502 and 503 are always retried. Please note that not all routes are idempotent and retrying\n may result in unexpected behavior.\n ",
+ "examples": [
+ "404,429,503",
+ "429",
+ [
+ 404,
+ 429,
+ 503
+ ]
+ ],
+ "title": "Retry Extra Codes"
+ },
+ "csrf_support_enabled": {
+ "default": true,
+ "description": "\n Determines if CSRF token handling is active in the Prefect client for API\n requests.\n\n When enabled (`True`), the client automatically manages CSRF tokens by\n retrieving, storing, and including them in applicable state-changing requests\n ",
+ "title": "Csrf Support Enabled",
+ "type": "boolean"
+ },
+ "metrics": {
+ "$ref": "#/$defs/ClientMetricsSettings"
+ }
+ },
+ "title": "ClientSettings",
+ "type": "object"
+ },
+ "CloudSettings": {
+ "description": "Settings for interacting with Prefect Cloud",
+ "properties": {
+ "api_url": {
+ "default": "https://api.prefect.cloud/api",
+ "description": "API URL for Prefect Cloud. Used for authentication with Prefect Cloud.",
+ "title": "Api Url",
+ "type": "string"
+ },
+ "ui_url": {
+ "anyOf": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "null"
+ }
+ ],
+ "default": null,
+ "description": "The URL of the Prefect Cloud UI. If not set, the client will attempt to infer it.",
+ "title": "Ui Url"
+ }
+ },
+ "title": "CloudSettings",
+ "type": "object"
+ },
+ "DeploymentsSettings": {
+ "description": "Settings for configuring deployments defaults",
+ "properties": {
+ "default_work_pool_name": {
+ "anyOf": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "null"
+ }
+ ],
+ "default": null,
+ "description": "The default work pool to use when creating deployments.",
+ "title": "Default Work Pool Name"
+ },
+ "default_docker_build_namespace": {
+ "anyOf": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "null"
+ }
+ ],
+ "default": null,
+ "description": "The default Docker namespace to use when building images.",
+ "examples": [
+ "my-dockerhub-registry",
+ "4999999999999.dkr.ecr.us-east-2.amazonaws.com/my-ecr-repo"
+ ],
+ "title": "Default Docker Build Namespace"
+ }
+ },
+ "title": "DeploymentsSettings",
+ "type": "object"
+ },
+ "FlowsSettings": {
+ "description": "Settings for controlling flow behavior",
+ "properties": {
+ "default_retries": {
+ "default": 0,
+ "description": "This value sets the default number of retries for all flows.",
+ "minimum": 0,
+ "title": "Default Retries",
+ "type": "integer"
+ },
+ "default_retry_delay_seconds": {
+ "anyOf": [
+ {
+ "type": "integer"
+ },
+ {
+ "type": "number"
+ },
+ {
+ "items": {
+ "type": "number"
+ },
+ "type": "array"
+ }
+ ],
+ "default": 0,
+ "description": "This value sets the default retry delay seconds for all flows.",
+ "title": "Default Retry Delay Seconds"
+ }
+ },
+ "title": "FlowsSettings",
+ "type": "object"
+ },
+ "InternalSettings": {
+ "properties": {
+ "logging_level": {
+ "default": "ERROR",
+ "description": "The default logging level for Prefect's internal machinery loggers.",
+ "enum": [
+ "DEBUG",
+ "INFO",
+ "WARNING",
+ "ERROR",
+ "CRITICAL"
+ ],
+ "title": "Logging Level",
+ "type": "string"
+ }
+ },
+ "title": "InternalSettings",
+ "type": "object"
+ },
+ "LoggingSettings": {
+ "description": "Settings for controlling logging behavior",
+ "properties": {
+ "level": {
+ "default": "INFO",
+ "description": "The default logging level for Prefect loggers.",
+ "enum": [
+ "DEBUG",
+ "INFO",
+ "WARNING",
+ "ERROR",
+ "CRITICAL"
+ ],
+ "title": "Level",
+ "type": "string"
+ },
+ "config_path": {
+ "anyOf": [
+ {
+ "format": "path",
+ "type": "string"
+ },
+ {
+ "type": "null"
+ }
+ ],
+ "default": null,
+ "description": "The path to a custom YAML logging configuration file.",
+ "title": "Config Path"
+ },
+ "extra_loggers": {
+ "anyOf": [
+ {
+ "type": "string"
+ },
+ {
+ "items": {
+ "type": "string"
+ },
+ "type": "array"
+ },
+ {
+ "type": "null"
+ }
+ ],
+ "default": null,
+ "description": "Additional loggers to attach to Prefect logging at runtime.",
+ "title": "Extra Loggers"
+ },
+ "log_prints": {
+ "default": false,
+ "description": "If `True`, `print` statements in flows and tasks will be redirected to the Prefect logger for the given run.",
+ "title": "Log Prints",
+ "type": "boolean"
+ },
+ "colors": {
+ "default": true,
+ "description": "If `True`, use colors in CLI output. If `False`, output will not include colors codes.",
+ "title": "Colors",
+ "type": "boolean"
+ },
+ "markup": {
+ "default": false,
+ "description": "\n Whether to interpret strings wrapped in square brackets as a style.\n This allows styles to be conveniently added to log messages, e.g.\n `[red]This is a red message.[/red]`. However, the downside is, if enabled,\n strings that contain square brackets may be inaccurately interpreted and\n lead to incomplete output, e.g.\n `[red]This is a red message.[/red]` may be interpreted as\n `[red]This is a red message.[/red]`.\n ",
+ "title": "Markup",
+ "type": "boolean"
+ },
+ "to_api": {
+ "$ref": "#/$defs/LoggingToAPISettings"
+ }
+ },
+ "title": "LoggingSettings",
+ "type": "object"
+ },
+ "LoggingToAPISettings": {
+ "description": "Settings for controlling logging to the API",
+ "properties": {
+ "enabled": {
+ "default": true,
+ "description": "If `True`, logs will be sent to the API.",
+ "title": "Enabled",
+ "type": "boolean"
+ },
+ "batch_interval": {
+ "default": 2.0,
+ "description": "The number of seconds between batched writes of logs to the API.",
+ "title": "Batch Interval",
+ "type": "number"
+ },
+ "batch_size": {
+ "default": 4000000,
+ "description": "The number of logs to batch before sending to the API.",
+ "title": "Batch Size",
+ "type": "integer"
+ },
+ "max_log_size": {
+ "default": 1000000,
+ "description": "The maximum size in bytes for a single log.",
+ "title": "Max Log Size",
+ "type": "integer"
+ },
+ "when_missing_flow": {
+ "default": "warn",
+ "description": "\n Controls the behavior when loggers attempt to send logs to the API handler from outside of a flow.\n \n All logs sent to the API must be associated with a flow run. The API log handler can\n only be used outside of a flow by manually providing a flow run identifier. Logs\n that are not associated with a flow run will not be sent to the API. This setting can\n be used to determine if a warning or error is displayed when the identifier is missing.\n\n The following options are available:\n\n - \"warn\": Log a warning message.\n - \"error\": Raise an error.\n - \"ignore\": Do not log a warning message or raise an error.\n ",
+ "enum": [
+ "warn",
+ "error",
+ "ignore"
+ ],
+ "title": "When Missing Flow",
+ "type": "string"
+ }
+ },
+ "title": "LoggingToAPISettings",
+ "type": "object"
+ },
+ "ResultsSettings": {
+ "description": "Settings for controlling result storage behavior",
+ "properties": {
+ "default_serializer": {
+ "default": "pickle",
+ "description": "The default serializer to use when not otherwise specified.",
+ "title": "Default Serializer",
+ "type": "string"
+ },
+ "persist_by_default": {
+ "default": false,
+ "description": "The default setting for persisting results when not otherwise specified.",
+ "title": "Persist By Default",
+ "type": "boolean"
+ },
+ "default_storage_block": {
+ "anyOf": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "null"
+ }
+ ],
+ "default": null,
+ "description": "The `block-type/block-document` slug of a block to use as the default result storage.",
+ "title": "Default Storage Block"
+ },
+ "local_storage_path": {
+ "anyOf": [
+ {
+ "format": "path",
+ "type": "string"
+ },
+ {
+ "type": "null"
+ }
+ ],
+ "default": null,
+ "description": "The path to a directory to store results in.",
+ "title": "Local Storage Path"
+ }
+ },
+ "title": "ResultsSettings",
+ "type": "object"
+ },
+ "RunnerServerSettings": {
+ "description": "Settings for controlling runner server behavior",
+ "properties": {
+ "enable": {
+ "default": false,
+ "description": "Whether or not to enable the runner's webserver.",
+ "title": "Enable",
+ "type": "boolean"
+ },
+ "host": {
+ "default": "localhost",
+ "description": "The host address the runner's webserver should bind to.",
+ "title": "Host",
+ "type": "string"
+ },
+ "port": {
+ "default": 8080,
+ "description": "The port the runner's webserver should bind to.",
+ "title": "Port",
+ "type": "integer"
+ },
+ "log_level": {
+ "default": "error",
+ "description": "The log level of the runner's webserver.",
+ "enum": [
+ "DEBUG",
+ "INFO",
+ "WARNING",
+ "ERROR",
+ "CRITICAL"
+ ],
+ "title": "Log Level",
+ "type": "string"
+ },
+ "missed_polls_tolerance": {
+ "default": 2,
+ "description": "Number of missed polls before a runner is considered unhealthy by its webserver.",
+ "title": "Missed Polls Tolerance",
+ "type": "integer"
+ }
+ },
+ "title": "RunnerServerSettings",
+ "type": "object"
+ },
+ "RunnerSettings": {
+ "description": "Settings for controlling runner behavior",
+ "properties": {
+ "process_limit": {
+ "default": 5,
+ "description": "Maximum number of processes a runner will execute in parallel.",
+ "title": "Process Limit",
+ "type": "integer"
+ },
+ "poll_frequency": {
+ "default": 10,
+ "description": "Number of seconds a runner should wait between queries for scheduled work.",
+ "title": "Poll Frequency",
+ "type": "integer"
+ },
+ "server": {
+ "$ref": "#/$defs/RunnerServerSettings"
+ }
+ },
+ "title": "RunnerSettings",
+ "type": "object"
+ },
+ "ServerAPISettings": {
+ "description": "Settings for controlling API server behavior",
+ "properties": {
+ "host": {
+ "default": "127.0.0.1",
+ "description": "The API's host address (defaults to `127.0.0.1`).",
+ "title": "Host",
+ "type": "string"
+ },
+ "port": {
+ "default": 4200,
+ "description": "The API's port address (defaults to `4200`).",
+ "title": "Port",
+ "type": "integer"
+ },
+ "default_limit": {
+ "default": 200,
+ "description": "The default limit applied to queries that can return multiple objects, such as `POST /flow_runs/filter`.",
+ "title": "Default Limit",
+ "type": "integer"
+ },
+ "keepalive_timeout": {
+ "default": 5,
+ "description": "\n The API's keep alive timeout (defaults to `5`).\n Refer to https://www.uvicorn.org/settings/#timeouts for details.\n\n When the API is hosted behind a load balancer, you may want to set this to a value\n greater than the load balancer's idle timeout.\n\n Note this setting only applies when calling `prefect server start`; if hosting the\n API with another tool you will need to configure this there instead.\n ",
+ "title": "Keepalive Timeout",
+ "type": "integer"
+ },
+ "csrf_protection_enabled": {
+ "default": false,
+ "description": "\n Controls the activation of CSRF protection for the Prefect server API.\n\n When enabled (`True`), the server enforces CSRF validation checks on incoming\n state-changing requests (POST, PUT, PATCH, DELETE), requiring a valid CSRF\n token to be included in the request headers or body. This adds a layer of\n security by preventing unauthorized or malicious sites from making requests on\n behalf of authenticated users.\n\n It is recommended to enable this setting in production environments where the\n API is exposed to web clients to safeguard against CSRF attacks.\n\n Note: Enabling this setting requires corresponding support in the client for\n CSRF token management. See PREFECT_CLIENT_CSRF_SUPPORT_ENABLED for more.\n ",
+ "title": "Csrf Protection Enabled",
+ "type": "boolean"
+ },
+ "csrf_token_expiration": {
+ "default": "PT1H",
+ "description": "\n Specifies the duration for which a CSRF token remains valid after being issued\n by the server.\n\n The default expiration time is set to 1 hour, which offers a reasonable\n compromise. Adjust this setting based on your specific security requirements\n and usage patterns.\n ",
+ "format": "duration",
+ "title": "Csrf Token Expiration",
+ "type": "string"
+ },
+ "cors_allowed_origins": {
+ "default": "*",
+ "description": "\n A comma-separated list of origins that are authorized to make cross-origin requests to the API.\n\n By default, this is set to `*`, which allows requests from all origins.\n ",
+ "title": "Cors Allowed Origins",
+ "type": "string"
+ },
+ "cors_allowed_methods": {
+ "default": "*",
+ "description": "\n A comma-separated list of methods that are authorized to make cross-origin requests to the API.\n\n By default, this is set to `*`, which allows requests from all methods.\n ",
+ "title": "Cors Allowed Methods",
+ "type": "string"
+ },
+ "cors_allowed_headers": {
+ "default": "*",
+ "description": "\n A comma-separated list of headers that are authorized to make cross-origin requests to the API.\n\n By default, this is set to `*`, which allows requests from all headers.\n ",
+ "title": "Cors Allowed Headers",
+ "type": "string"
+ }
+ },
+ "title": "ServerAPISettings",
+ "type": "object"
+ },
+ "ServerDatabaseSettings": {
+ "description": "Settings for controlling server database behavior",
+ "properties": {
+ "connection_url": {
+ "anyOf": [
+ {
+ "format": "password",
+ "type": "string",
+ "writeOnly": true
+ },
+ {
+ "type": "null"
+ }
+ ],
+ "default": null,
+ "description": "\n A database connection URL in a SQLAlchemy-compatible\n format. Prefect currently supports SQLite and Postgres. Note that all\n Prefect database engines must use an async driver - for SQLite, use\n `sqlite+aiosqlite` and for Postgres use `postgresql+asyncpg`.\n\n SQLite in-memory databases can be used by providing the url\n `sqlite+aiosqlite:///file::memory:?cache=shared&uri=true&check_same_thread=false`,\n which will allow the database to be accessed by multiple threads. Note\n that in-memory databases can not be accessed from multiple processes and\n should only be used for simple tests.\n ",
+ "title": "Connection Url"
+ },
+ "driver": {
+ "anyOf": [
+ {
+ "enum": [
+ "postgresql+asyncpg",
+ "sqlite+aiosqlite"
+ ],
+ "type": "string"
+ },
+ {
+ "type": "null"
+ }
+ ],
+ "default": null,
+ "description": "The database driver to use when connecting to the database. If not set, the driver will be inferred from the connection URL.",
+ "title": "Driver"
+ },
+ "host": {
+ "anyOf": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "null"
+ }
+ ],
+ "default": null,
+ "description": "The database server host.",
+ "title": "Host"
+ },
+ "port": {
+ "anyOf": [
+ {
+ "type": "integer"
+ },
+ {
+ "type": "null"
+ }
+ ],
+ "default": null,
+ "description": "The database server port.",
+ "title": "Port"
+ },
+ "user": {
+ "anyOf": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "null"
+ }
+ ],
+ "default": null,
+ "description": "The user to use when connecting to the database.",
+ "title": "User"
+ },
+ "name": {
+ "anyOf": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "null"
+ }
+ ],
+ "default": null,
+ "description": "The name of the Prefect database on the remote server, or the path to the database file for SQLite.",
+ "title": "Name"
+ },
+ "password": {
+ "anyOf": [
+ {
+ "format": "password",
+ "type": "string",
+ "writeOnly": true
+ },
+ {
+ "type": "null"
+ }
+ ],
+ "default": null,
+ "description": "The password to use when connecting to the database. Should be kept secret.",
+ "title": "Password"
+ },
+ "echo": {
+ "default": false,
+ "description": "If `True`, SQLAlchemy will log all SQL issued to the database. Defaults to `False`.",
+ "title": "Echo",
+ "type": "boolean"
+ },
+ "migrate_on_start": {
+ "default": true,
+ "description": "If `True`, the database will be migrated on application startup.",
+ "title": "Migrate On Start",
+ "type": "boolean"
+ },
+ "timeout": {
+ "anyOf": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "null"
+ }
+ ],
+ "default": 10.0,
+ "description": "A statement timeout, in seconds, applied to all database interactions made by the API. Defaults to 10 seconds.",
+ "title": "Timeout"
+ },
+ "connection_timeout": {
+ "anyOf": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "null"
+ }
+ ],
+ "default": 5,
+ "description": "A connection timeout, in seconds, applied to database connections. Defaults to `5`.",
+ "title": "Connection Timeout"
+ },
+ "sqlalchemy_pool_size": {
+ "anyOf": [
+ {
+ "type": "integer"
+ },
+ {
+ "type": "null"
+ }
+ ],
+ "default": null,
+ "description": "Controls connection pool size when using a PostgreSQL database with the Prefect API. If not set, the default SQLAlchemy pool size will be used.",
+ "title": "Sqlalchemy Pool Size"
+ },
+ "sqlalchemy_max_overflow": {
+ "anyOf": [
+ {
+ "type": "integer"
+ },
+ {
+ "type": "null"
+ }
+ ],
+ "default": null,
+ "description": "Controls maximum overflow of the connection pool when using a PostgreSQL database with the Prefect API. If not set, the default SQLAlchemy maximum overflow value will be used.",
+ "title": "Sqlalchemy Max Overflow"
+ }
+ },
+ "title": "ServerDatabaseSettings",
+ "type": "object"
+ },
+ "ServerDeploymentsSettings": {
+ "properties": {
+ "concurrency_slot_wait_seconds": {
+ "default": 30.0,
+ "description": "The number of seconds to wait before retrying when a deployment flow run cannot secure a concurrency slot from the server.",
+ "minimum": 0.0,
+ "title": "Concurrency Slot Wait Seconds",
+ "type": "number"
+ }
+ },
+ "title": "ServerDeploymentsSettings",
+ "type": "object"
+ },
+ "ServerEphemeralSettings": {
+ "description": "Settings for controlling ephemeral server behavior",
+ "properties": {
+ "enabled": {
+ "default": false,
+ "description": "\n Controls whether or not a subprocess server can be started when no API URL is provided.\n ",
+ "title": "Enabled",
+ "type": "boolean"
+ },
+ "startup_timeout_seconds": {
+ "default": 20,
+ "description": "\n The number of seconds to wait for the server to start when ephemeral mode is enabled.\n Defaults to `10`.\n ",
+ "title": "Startup Timeout Seconds",
+ "type": "integer"
+ }
+ },
+ "title": "ServerEphemeralSettings",
+ "type": "object"
+ },
+ "ServerEventsSettings": {
+ "description": "Settings for controlling behavior of the events subsystem",
+ "properties": {
+ "stream_out_enabled": {
+ "default": true,
+ "description": "Whether or not to stream events out to the API via websockets.",
+ "title": "Stream Out Enabled",
+ "type": "boolean"
+ },
+ "related_resource_cache_ttl": {
+ "default": "PT5M",
+ "description": "The number of seconds to cache related resources for in the API.",
+ "format": "duration",
+ "title": "Related Resource Cache Ttl",
+ "type": "string"
+ },
+ "maximum_labels_per_resource": {
+ "default": 500,
+ "description": "The maximum number of labels a resource may have.",
+ "title": "Maximum Labels Per Resource",
+ "type": "integer"
+ },
+ "maximum_related_resources": {
+ "default": 500,
+ "description": "The maximum number of related resources an Event may have.",
+ "title": "Maximum Related Resources",
+ "type": "integer"
+ },
+ "maximum_size_bytes": {
+ "default": 1500000,
+ "description": "The maximum size of an Event when serialized to JSON",
+ "title": "Maximum Size Bytes",
+ "type": "integer"
+ },
+ "expired_bucket_buffer": {
+ "default": "PT1M",
+ "description": "The amount of time to retain expired automation buckets",
+ "format": "duration",
+ "title": "Expired Bucket Buffer",
+ "type": "string"
+ },
+ "proactive_granularity": {
+ "default": "PT5S",
+ "description": "How frequently proactive automations are evaluated",
+ "format": "duration",
+ "title": "Proactive Granularity",
+ "type": "string"
+ },
+ "retention_period": {
+ "default": "P7D",
+ "description": "The amount of time to retain events in the database.",
+ "format": "duration",
+ "title": "Retention Period",
+ "type": "string"
+ },
+ "maximum_websocket_backfill": {
+ "default": "PT15M",
+ "description": "The maximum range to look back for backfilling events for a websocket subscriber.",
+ "format": "duration",
+ "title": "Maximum Websocket Backfill",
+ "type": "string"
+ },
+ "websocket_backfill_page_size": {
+ "default": 250,
+ "description": "The page size for the queries to backfill events for websocket subscribers.",
+ "exclusiveMinimum": 0,
+ "title": "Websocket Backfill Page Size",
+ "type": "integer"
+ },
+ "messaging_broker": {
+ "default": "prefect.server.utilities.messaging.memory",
+ "description": "Which message broker implementation to use for the messaging system, should point to a module that exports a Publisher and Consumer class.",
+ "title": "Messaging Broker",
+ "type": "string"
+ },
+ "messaging_cache": {
+ "default": "prefect.server.utilities.messaging.memory",
+ "description": "Which cache implementation to use for the events system. Should point to a module that exports a Cache class.",
+ "title": "Messaging Cache",
+ "type": "string"
+ }
+ },
+ "title": "ServerEventsSettings",
+ "type": "object"
+ },
+ "ServerFlowRunGraphSettings": {
+ "description": "Settings for controlling behavior of the flow run graph",
+ "properties": {
+ "max_nodes": {
+ "default": 10000,
+ "description": "The maximum size of a flow run graph on the v2 API",
+ "title": "Max Nodes",
+ "type": "integer"
+ },
+ "max_artifacts": {
+ "default": 10000,
+ "description": "The maximum number of artifacts to show on a flow run graph on the v2 API",
+ "title": "Max Artifacts",
+ "type": "integer"
+ }
+ },
+ "title": "ServerFlowRunGraphSettings",
+ "type": "object"
+ },
+ "ServerServicesCancellationCleanupSettings": {
+ "description": "Settings for controlling the cancellation cleanup service",
+ "properties": {
+ "enabled": {
+ "default": true,
+ "description": "Whether or not to start the cancellation cleanup service in the server application.",
+ "title": "Enabled",
+ "type": "boolean"
+ },
+ "loop_seconds": {
+ "default": 20,
+ "description": "The cancellation cleanup service will look for non-terminal tasks and subflows this often. Defaults to `20`.",
+ "title": "Loop Seconds",
+ "type": "number"
+ }
+ },
+ "title": "ServerServicesCancellationCleanupSettings",
+ "type": "object"
+ },
+ "ServerServicesEventPersisterSettings": {
+ "description": "Settings for controlling the event persister service",
+ "properties": {
+ "enabled": {
+ "default": true,
+ "description": "Whether or not to start the event persister service in the server application.",
+ "title": "Enabled",
+ "type": "boolean"
+ },
+ "batch_size": {
+ "default": 20,
+ "description": "The number of events the event persister will attempt to insert in one batch.",
+ "exclusiveMinimum": 0,
+ "title": "Batch Size",
+ "type": "integer"
+ },
+ "flush_interval": {
+ "default": 5,
+ "description": "The maximum number of seconds between flushes of the event persister.",
+ "exclusiveMinimum": 0.0,
+ "title": "Flush Interval",
+ "type": "number"
+ }
+ },
+ "title": "ServerServicesEventPersisterSettings",
+ "type": "object"
+ },
+ "ServerServicesFlowRunNotificationsSettings": {
+ "description": "Settings for controlling the flow run notifications service",
+ "properties": {
+ "enabled": {
+ "default": true,
+ "description": "Whether or not to start the flow run notifications service in the server application.",
+ "title": "Enabled",
+ "type": "boolean"
+ }
+ },
+ "title": "ServerServicesFlowRunNotificationsSettings",
+ "type": "object"
+ },
+ "ServerServicesForemanSettings": {
+ "description": "Settings for controlling the foreman service",
+ "properties": {
+ "enabled": {
+ "default": true,
+ "description": "Whether or not to start the foreman service in the server application.",
+ "title": "Enabled",
+ "type": "boolean"
+ },
+ "loop_seconds": {
+ "default": 15,
+ "description": "The foreman service will check for offline workers this often. Defaults to `15`.",
+ "title": "Loop Seconds",
+ "type": "number"
+ },
+ "inactivity_heartbeat_multiple": {
+ "default": 3,
+ "description": "\n The number of heartbeats that must be missed before a worker is marked as offline. Defaults to `3`.\n ",
+ "title": "Inactivity Heartbeat Multiple",
+ "type": "integer"
+ },
+ "fallback_heartbeat_interval_seconds": {
+ "default": 30,
+ "description": "\n The number of seconds to use for online/offline evaluation if a worker's heartbeat\n interval is not set. Defaults to `30`.\n ",
+ "title": "Fallback Heartbeat Interval Seconds",
+ "type": "integer"
+ },
+ "deployment_last_polled_timeout_seconds": {
+ "default": 60,
+ "description": "\n The number of seconds before a deployment is marked as not ready if it has not been\n polled. Defaults to `60`.\n ",
+ "title": "Deployment Last Polled Timeout Seconds",
+ "type": "integer"
+ },
+ "work_queue_last_polled_timeout_seconds": {
+ "default": 60,
+ "description": "\n The number of seconds before a work queue is marked as not ready if it has not been\n polled. Defaults to `60`.\n ",
+ "title": "Work Queue Last Polled Timeout Seconds",
+ "type": "integer"
+ }
+ },
+ "title": "ServerServicesForemanSettings",
+ "type": "object"
+ },
+ "ServerServicesLateRunsSettings": {
+ "description": "Settings for controlling the late runs service",
+ "properties": {
+ "enabled": {
+ "default": true,
+ "description": "Whether or not to start the late runs service in the server application.",
+ "title": "Enabled",
+ "type": "boolean"
+ },
+ "loop_seconds": {
+ "default": 5,
+ "description": "\n The late runs service will look for runs to mark as late this often. Defaults to `5`.\n ",
+ "title": "Loop Seconds",
+ "type": "number"
+ },
+ "after_seconds": {
+ "default": "PT15S",
+ "description": "\n The late runs service will mark runs as late after they have exceeded their scheduled start time by this many seconds. Defaults to `5` seconds.\n ",
+ "format": "duration",
+ "title": "After Seconds",
+ "type": "string"
+ }
+ },
+ "title": "ServerServicesLateRunsSettings",
+ "type": "object"
+ },
+ "ServerServicesPauseExpirationsSettings": {
+ "description": "Settings for controlling the pause expiration service",
+ "properties": {
+ "enabled": {
+ "default": true,
+ "description": "\n Whether or not to start the paused flow run expiration service in the server\n application. If disabled, paused flows that have timed out will remain in a Paused state\n until a resume attempt.\n ",
+ "title": "Enabled",
+ "type": "boolean"
+ },
+ "loop_seconds": {
+ "default": 5,
+ "description": "\n The pause expiration service will look for runs to mark as failed this often. Defaults to `5`.\n ",
+ "title": "Loop Seconds",
+ "type": "number"
+ }
+ },
+ "title": "ServerServicesPauseExpirationsSettings",
+ "type": "object"
+ },
+ "ServerServicesSchedulerSettings": {
+ "description": "Settings for controlling the scheduler service",
+ "properties": {
+ "enabled": {
+ "default": true,
+ "description": "Whether or not to start the scheduler service in the server application.",
+ "title": "Enabled",
+ "type": "boolean"
+ },
+ "loop_seconds": {
+ "default": 60,
+ "description": "\n The scheduler loop interval, in seconds. This determines\n how often the scheduler will attempt to schedule new flow runs, but has no\n impact on how quickly either flow runs or task runs are actually executed.\n Defaults to `60`.\n ",
+ "title": "Loop Seconds",
+ "type": "number"
+ },
+ "deployment_batch_size": {
+ "default": 100,
+ "description": "\n The number of deployments the scheduler will attempt to\n schedule in a single batch. If there are more deployments than the batch\n size, the scheduler immediately attempts to schedule the next batch; it\n does not sleep for `scheduler_loop_seconds` until it has visited every\n deployment once. Defaults to `100`.\n ",
+ "title": "Deployment Batch Size",
+ "type": "integer"
+ },
+ "max_runs": {
+ "default": 100,
+ "description": "\n The scheduler will attempt to schedule up to this many\n auto-scheduled runs in the future. Note that runs may have fewer than\n this many scheduled runs, depending on the value of\n `scheduler_max_scheduled_time`. Defaults to `100`.\n ",
+ "title": "Max Runs",
+ "type": "integer"
+ },
+ "min_runs": {
+ "default": 3,
+ "description": "\n The scheduler will attempt to schedule at least this many\n auto-scheduled runs in the future. Note that runs may have more than\n this many scheduled runs, depending on the value of\n `scheduler_min_scheduled_time`. Defaults to `3`.\n ",
+ "title": "Min Runs",
+ "type": "integer"
+ },
+ "max_scheduled_time": {
+ "default": "P100D",
+ "description": "\n The scheduler will create new runs up to this far in the\n future. Note that this setting will take precedence over\n `scheduler_max_runs`: if a flow runs once a month and\n `scheduler_max_scheduled_time` is three months, then only three runs will be\n scheduled. Defaults to 100 days (`8640000` seconds).\n ",
+ "format": "duration",
+ "title": "Max Scheduled Time",
+ "type": "string"
+ },
+ "min_scheduled_time": {
+ "default": "PT1H",
+ "description": "\n The scheduler will create new runs at least this far in the\n future. Note that this setting will take precedence over `scheduler_min_runs`:\n if a flow runs every hour and `scheduler_min_scheduled_time` is three hours,\n then three runs will be scheduled even if `scheduler_min_runs` is 1. Defaults to\n ",
+ "format": "duration",
+ "title": "Min Scheduled Time",
+ "type": "string"
+ },
+ "insert_batch_size": {
+ "default": 500,
+ "description": "\n The number of runs the scheduler will attempt to insert in a single batch.\n Defaults to `500`.\n ",
+ "title": "Insert Batch Size",
+ "type": "integer"
+ }
+ },
+ "title": "ServerServicesSchedulerSettings",
+ "type": "object"
+ },
+ "ServerServicesSettings": {
+ "description": "Settings for controlling server services",
+ "properties": {
+ "cancellation_cleanup": {
+ "$ref": "#/$defs/ServerServicesCancellationCleanupSettings"
+ },
+ "event_persister": {
+ "$ref": "#/$defs/ServerServicesEventPersisterSettings"
+ },
+ "flow_run_notifications": {
+ "$ref": "#/$defs/ServerServicesFlowRunNotificationsSettings"
+ },
+ "foreman": {
+ "$ref": "#/$defs/ServerServicesForemanSettings"
+ },
+ "late_runs": {
+ "$ref": "#/$defs/ServerServicesLateRunsSettings"
+ },
+ "scheduler": {
+ "$ref": "#/$defs/ServerServicesSchedulerSettings"
+ },
+ "pause_expirations": {
+ "$ref": "#/$defs/ServerServicesPauseExpirationsSettings"
+ },
+ "task_run_recorder": {
+ "$ref": "#/$defs/ServerServicesTaskRunRecorderSettings"
+ },
+ "triggers": {
+ "$ref": "#/$defs/ServerServicesTriggersSettings"
+ }
+ },
+ "title": "ServerServicesSettings",
+ "type": "object"
+ },
+ "ServerServicesTaskRunRecorderSettings": {
+ "description": "Settings for controlling the task run recorder service",
+ "properties": {
+ "enabled": {
+ "default": true,
+ "description": "Whether or not to start the task run recorder service in the server application.",
+ "title": "Enabled",
+ "type": "boolean"
+ }
+ },
+ "title": "ServerServicesTaskRunRecorderSettings",
+ "type": "object"
+ },
+ "ServerServicesTriggersSettings": {
+ "description": "Settings for controlling the triggers service",
+ "properties": {
+ "enabled": {
+ "default": true,
+ "description": "Whether or not to start the triggers service in the server application.",
+ "title": "Enabled",
+ "type": "boolean"
+ }
+ },
+ "title": "ServerServicesTriggersSettings",
+ "type": "object"
+ },
+ "ServerSettings": {
+ "description": "Settings for controlling server behavior",
+ "properties": {
+ "logging_level": {
+ "default": "WARNING",
+ "description": "The default logging level for the Prefect API server.",
+ "enum": [
+ "DEBUG",
+ "INFO",
+ "WARNING",
+ "ERROR",
+ "CRITICAL"
+ ],
+ "title": "Logging Level",
+ "type": "string"
+ },
+ "analytics_enabled": {
+ "default": true,
+ "description": "\n When enabled, Prefect sends anonymous data (e.g. count of flow runs, package version)\n on server startup to help us improve our product.\n ",
+ "title": "Analytics Enabled",
+ "type": "boolean"
+ },
+ "metrics_enabled": {
+ "default": false,
+ "description": "Whether or not to enable Prometheus metrics in the API.",
+ "title": "Metrics Enabled",
+ "type": "boolean"
+ },
+ "log_retryable_errors": {
+ "default": false,
+ "description": "If `True`, log retryable errors in the API and it's services.",
+ "title": "Log Retryable Errors",
+ "type": "boolean"
+ },
+ "register_blocks_on_start": {
+ "default": true,
+ "description": "If set, any block types that have been imported will be registered with the backend on application startup. If not set, block types must be manually registered.",
+ "title": "Register Blocks On Start",
+ "type": "boolean"
+ },
+ "memoize_block_auto_registration": {
+ "default": true,
+ "description": "Controls whether or not block auto-registration on start",
+ "title": "Memoize Block Auto Registration",
+ "type": "boolean"
+ },
+ "memo_store_path": {
+ "anyOf": [
+ {
+ "format": "path",
+ "type": "string"
+ },
+ {
+ "type": "null"
+ }
+ ],
+ "default": null,
+ "description": "The path to the memo store file.",
+ "title": "Memo Store Path"
+ },
+ "deployment_schedule_max_scheduled_runs": {
+ "default": 50,
+ "description": "The maximum number of scheduled runs to create for a deployment.",
+ "title": "Deployment Schedule Max Scheduled Runs",
+ "type": "integer"
+ },
+ "api": {
+ "$ref": "#/$defs/ServerAPISettings"
+ },
+ "database": {
+ "$ref": "#/$defs/ServerDatabaseSettings"
+ },
+ "deployments": {
+ "$ref": "#/$defs/ServerDeploymentsSettings",
+ "description": "Settings for controlling server deployments behavior"
+ },
+ "ephemeral": {
+ "$ref": "#/$defs/ServerEphemeralSettings"
+ },
+ "events": {
+ "$ref": "#/$defs/ServerEventsSettings",
+ "description": "Settings for controlling server events behavior"
+ },
+ "flow_run_graph": {
+ "$ref": "#/$defs/ServerFlowRunGraphSettings",
+ "description": "Settings for controlling flow run graph behavior"
+ },
+ "services": {
+ "$ref": "#/$defs/ServerServicesSettings",
+ "description": "Settings for controlling server services behavior"
+ },
+ "tasks": {
+ "$ref": "#/$defs/ServerTasksSettings",
+ "description": "Settings for controlling server tasks behavior"
+ },
+ "ui": {
+ "$ref": "#/$defs/ServerUISettings",
+ "description": "Settings for controlling server UI behavior"
+ }
+ },
+ "title": "ServerSettings",
+ "type": "object"
+ },
+ "ServerTasksSchedulingSettings": {
+ "description": "Settings for controlling server-side behavior related to task scheduling",
+ "properties": {
+ "max_scheduled_queue_size": {
+ "default": 1000,
+ "description": "The maximum number of scheduled tasks to queue for submission.",
+ "title": "Max Scheduled Queue Size",
+ "type": "integer"
+ },
+ "max_retry_queue_size": {
+ "default": 100,
+ "description": "The maximum number of retries to queue for submission.",
+ "title": "Max Retry Queue Size",
+ "type": "integer"
+ },
+ "pending_task_timeout": {
+ "default": "PT0S",
+ "description": "How long before a PENDING task are made available to another task worker.",
+ "format": "duration",
+ "title": "Pending Task Timeout",
+ "type": "string"
+ }
+ },
+ "title": "ServerTasksSchedulingSettings",
+ "type": "object"
+ },
+ "ServerTasksSettings": {
+ "description": "Settings for controlling server-side behavior related to tasks",
+ "properties": {
+ "tag_concurrency_slot_wait_seconds": {
+ "default": 30,
+ "description": "The number of seconds to wait before retrying when a task run cannot secure a concurrency slot from the server.",
+ "minimum": 0.0,
+ "title": "Tag Concurrency Slot Wait Seconds",
+ "type": "number"
+ },
+ "max_cache_key_length": {
+ "default": 2000,
+ "description": "The maximum number of characters allowed for a task run cache key.",
+ "title": "Max Cache Key Length",
+ "type": "integer"
+ },
+ "scheduling": {
+ "$ref": "#/$defs/ServerTasksSchedulingSettings"
+ }
+ },
+ "title": "ServerTasksSettings",
+ "type": "object"
+ },
+ "ServerUISettings": {
+ "properties": {
+ "enabled": {
+ "default": true,
+ "description": "Whether or not to serve the Prefect UI.",
+ "title": "Enabled",
+ "type": "boolean"
+ },
+ "api_url": {
+ "anyOf": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "null"
+ }
+ ],
+ "default": null,
+ "description": "The connection url for communication from the UI to the API. Defaults to `PREFECT_API_URL` if set. Otherwise, the default URL is generated from `PREFECT_SERVER_API_HOST` and `PREFECT_SERVER_API_PORT`.",
+ "title": "Api Url"
+ },
+ "serve_base": {
+ "default": "/",
+ "description": "The base URL path to serve the Prefect UI from.",
+ "title": "Serve Base",
+ "type": "string"
+ },
+ "static_directory": {
+ "anyOf": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "null"
+ }
+ ],
+ "default": null,
+ "description": "The directory to serve static files from. This should be used when running into permissions issues when attempting to serve the UI from the default directory (for example when running in a Docker container).",
+ "title": "Static Directory"
+ }
+ },
+ "title": "ServerUISettings",
+ "type": "object"
+ },
+ "TasksRunnerSettings": {
+ "properties": {
+ "thread_pool_max_workers": {
+ "anyOf": [
+ {
+ "exclusiveMinimum": 0,
+ "type": "integer"
+ },
+ {
+ "type": "null"
+ }
+ ],
+ "default": null,
+ "description": "The maximum number of workers for ThreadPoolTaskRunner.",
+ "title": "Thread Pool Max Workers"
+ }
+ },
+ "title": "TasksRunnerSettings",
+ "type": "object"
+ },
+ "TasksSchedulingSettings": {
+ "properties": {
+ "default_storage_block": {
+ "anyOf": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "null"
+ }
+ ],
+ "default": null,
+ "description": "The `block-type/block-document` slug of a block to use as the default storage for autonomous tasks.",
+ "title": "Default Storage Block"
+ },
+ "delete_failed_submissions": {
+ "default": true,
+ "description": "Whether or not to delete failed task submissions from the database.",
+ "title": "Delete Failed Submissions",
+ "type": "boolean"
+ }
+ },
+ "title": "TasksSchedulingSettings",
+ "type": "object"
+ },
+ "TasksSettings": {
+ "properties": {
+ "refresh_cache": {
+ "default": false,
+ "description": "If `True`, enables a refresh of cached results: re-executing the task will refresh the cached results.",
+ "title": "Refresh Cache",
+ "type": "boolean"
+ },
+ "default_retries": {
+ "default": 0,
+ "description": "This value sets the default number of retries for all tasks.",
+ "minimum": 0,
+ "title": "Default Retries",
+ "type": "integer"
+ },
+ "default_retry_delay_seconds": {
+ "anyOf": [
+ {
+ "type": "integer"
+ },
+ {
+ "type": "number"
+ },
+ {
+ "items": {
+ "type": "number"
+ },
+ "type": "array"
+ }
+ ],
+ "default": 0,
+ "description": "This value sets the default retry delay seconds for all tasks.",
+ "title": "Default Retry Delay Seconds"
+ },
+ "runner": {
+ "$ref": "#/$defs/TasksRunnerSettings",
+ "description": "Settings for controlling task runner behavior"
+ },
+ "scheduling": {
+ "$ref": "#/$defs/TasksSchedulingSettings",
+ "description": "Settings for controlling client-side task scheduling behavior"
+ }
+ },
+ "title": "TasksSettings",
+ "type": "object"
+ },
+ "TestingSettings": {
+ "properties": {
+ "test_mode": {
+ "default": false,
+ "description": "If `True`, places the API in test mode. This may modify behavior to facilitate testing.",
+ "title": "Test Mode",
+ "type": "boolean"
+ },
+ "unit_test_mode": {
+ "default": false,
+ "description": "This setting only exists to facilitate unit testing. If `True`, code is executing in a unit test context. Defaults to `False`.",
+ "title": "Unit Test Mode",
+ "type": "boolean"
+ },
+ "unit_test_loop_debug": {
+ "default": true,
+ "description": "If `True` turns on debug mode for the unit testing event loop.",
+ "title": "Unit Test Loop Debug",
+ "type": "boolean"
+ },
+ "test_setting": {
+ "anyOf": [
+ {},
+ {
+ "type": "null"
+ }
+ ],
+ "default": "FOO",
+ "description": "This setting only exists to facilitate unit testing. If in test mode, this setting will return its value. Otherwise, it returns `None`.",
+ "title": "Test Setting"
+ }
+ },
+ "title": "TestingSettings",
+ "type": "object"
+ },
+ "WorkerSettings": {
+ "properties": {
+ "heartbeat_seconds": {
+ "default": 30,
+ "description": "Number of seconds a worker should wait between sending a heartbeat.",
+ "title": "Heartbeat Seconds",
+ "type": "number"
+ },
+ "query_seconds": {
+ "default": 10,
+ "description": "Number of seconds a worker should wait between queries for scheduled work.",
+ "title": "Query Seconds",
+ "type": "number"
+ },
+ "prefetch_seconds": {
+ "default": 10,
+ "description": "The number of seconds into the future a worker should query for scheduled work.",
+ "title": "Prefetch Seconds",
+ "type": "number"
+ },
+ "webserver": {
+ "$ref": "#/$defs/WorkerWebserverSettings",
+ "description": "Settings for a worker's webserver"
+ }
+ },
+ "title": "WorkerSettings",
+ "type": "object"
+ },
+ "WorkerWebserverSettings": {
+ "properties": {
+ "host": {
+ "default": "0.0.0.0",
+ "description": "The host address the worker's webserver should bind to.",
+ "title": "Host",
+ "type": "string"
+ },
+ "port": {
+ "default": 8080,
+ "description": "The port the worker's webserver should bind to.",
+ "title": "Port",
+ "type": "integer"
+ }
+ },
+ "title": "WorkerWebserverSettings",
+ "type": "object"
+ }
+ },
+ "description": "Settings for Prefect using Pydantic settings.\n\nSee https://docs.pydantic.dev/latest/concepts/pydantic_settings",
+ "properties": {
+ "home": {
+ "default": "~/.prefect",
+ "description": "The path to the Prefect home directory. Defaults to ~/.prefect",
+ "format": "path",
+ "title": "Home",
+ "type": "string"
+ },
+ "profiles_path": {
+ "anyOf": [
+ {
+ "format": "path",
+ "type": "string"
+ },
+ {
+ "type": "null"
+ }
+ ],
+ "default": null,
+ "description": "The path to a profiles configuration file.",
+ "title": "Profiles Path"
+ },
+ "debug_mode": {
+ "default": false,
+ "description": "If True, enables debug mode which may provide additional logging and debugging features.",
+ "title": "Debug Mode",
+ "type": "boolean"
+ },
+ "api": {
+ "$ref": "#/$defs/APISettings"
+ },
+ "cli": {
+ "$ref": "#/$defs/CLISettings"
+ },
+ "client": {
+ "$ref": "#/$defs/ClientSettings",
+ "description": "Settings for for controlling API client behavior"
+ },
+ "cloud": {
+ "$ref": "#/$defs/CloudSettings"
+ },
+ "deployments": {
+ "$ref": "#/$defs/DeploymentsSettings"
+ },
+ "flows": {
+ "$ref": "#/$defs/FlowsSettings"
+ },
+ "internal": {
+ "$ref": "#/$defs/InternalSettings",
+ "description": "Settings for internal Prefect machinery"
+ },
+ "logging": {
+ "$ref": "#/$defs/LoggingSettings"
+ },
+ "results": {
+ "$ref": "#/$defs/ResultsSettings"
+ },
+ "runner": {
+ "$ref": "#/$defs/RunnerSettings"
+ },
+ "server": {
+ "$ref": "#/$defs/ServerSettings"
+ },
+ "tasks": {
+ "$ref": "#/$defs/TasksSettings",
+ "description": "Settings for controlling task behavior"
+ },
+ "testing": {
+ "$ref": "#/$defs/TestingSettings",
+ "description": "Settings used during testing"
+ },
+ "worker": {
+ "$ref": "#/$defs/WorkerSettings",
+ "description": "Settings for controlling worker behavior"
+ },
+ "ui_url": {
+ "anyOf": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "null"
+ }
+ ],
+ "default": null,
+ "description": "The URL of the Prefect UI. If not set, the client will attempt to infer it.",
+ "title": "Ui Url"
+ },
+ "silence_api_url_misconfiguration": {
+ "default": false,
+ "description": "\n If `True`, disable the warning when a user accidentally misconfigure its `PREFECT_API_URL`\n Sometimes when a user manually set `PREFECT_API_URL` to a custom url,reverse-proxy for example,\n we would like to silence this warning so we will set it to `FALSE`.\n ",
+ "title": "Silence Api Url Misconfiguration",
+ "type": "boolean"
+ },
+ "experimental_warn": {
+ "default": true,
+ "description": "If `True`, warn on usage of experimental features.",
+ "title": "Experimental Warn",
+ "type": "boolean"
+ },
+ "async_fetch_state_result": {
+ "default": false,
+ "description": "\n Determines whether `State.result()` fetches results automatically or not.\n In Prefect 2.6.0, the `State.result()` method was updated to be async\n to facilitate automatic retrieval of results from storage which means when\n writing async code you must `await` the call. For backwards compatibility,\n the result is not retrieved by default for async users. You may opt into this\n per call by passing `fetch=True` or toggle this setting to change the behavior\n globally.\n ",
+ "title": "Async Fetch State Result",
+ "type": "boolean"
+ },
+ "experimental_enable_schedule_concurrency": {
+ "default": false,
+ "description": "Whether or not to enable concurrency for scheduled tasks.",
+ "title": "Experimental Enable Schedule Concurrency",
+ "type": "boolean"
+ }
+ },
+ "title": "Settings",
+ "type": "object"
+}
\ No newline at end of file
diff --git a/setup.py b/setup.py
index 889cdd6f806a..2b560ab5499f 100644
--- a/setup.py
+++ b/setup.py
@@ -14,6 +14,7 @@ def read_requirements(file):
client_requires = read_requirements("requirements-client.txt")
install_requires = read_requirements("requirements.txt")[1:] + client_requires
dev_requires = read_requirements("requirements-dev.txt")
+otel_requires = read_requirements("requirements-otel.txt")
setup(
# Package metadata
@@ -49,29 +50,30 @@ def read_requirements(file):
install_requires=install_requires,
extras_require={
"dev": dev_requires,
+ "otel": otel_requires,
# Infrastructure extras
- "aws": "prefect-aws>=0.5.0rc1",
- "azure": "prefect-azure>=0.4.0rc1",
- "gcp": "prefect-gcp>=0.6.0rc1",
- "docker": "prefect-docker>=0.6.0rc1",
- "kubernetes": "prefect-kubernetes>=0.4.0rc1",
- "shell": "prefect-shell>=0.3.0rc1",
+ "aws": "prefect-aws>=0.5.0",
+ "azure": "prefect-azure>=0.4.0",
+ "gcp": "prefect-gcp>=0.6.0",
+ "docker": "prefect-docker>=0.6.0",
+ "kubernetes": "prefect-kubernetes>=0.4.0",
+ "shell": "prefect-shell>=0.3.0",
# Distributed task execution extras
- "dask": "prefect-dask>=0.3.0rc1",
- "ray": "prefect-ray>=0.4.0rc1",
+ "dask": "prefect-dask>=0.3.0",
+ "ray": "prefect-ray>=0.4.0",
# Version control extras
- "bitbucket": "prefect-bitbucket>=0.3.0rc1",
- "github": "prefect-github>=0.3.0rc1",
- "gitlab": "prefect-gitlab>=0.3.0rc1",
+ "bitbucket": "prefect-bitbucket>=0.3.0",
+ "github": "prefect-github>=0.3.0",
+ "gitlab": "prefect-gitlab>=0.3.0",
# Database extras
- "databricks": "prefect-databricks>=0.3.0rc1",
- "dbt": "prefect-dbt>=0.6.0rc1",
- "snowflake": "prefect-snowflake>=0.28.0rc1",
- "sqlalchemy": "prefect-sqlalchemy>=0.5.0rc1",
+ "databricks": "prefect-databricks>=0.3.0",
+ "dbt": "prefect-dbt>=0.6.0",
+ "snowflake": "prefect-snowflake>=0.28.0",
+ "sqlalchemy": "prefect-sqlalchemy>=0.5.0",
"redis": "prefect-redis>=0.2.0",
# Monitoring extras
- "email": "prefect-email>=0.4.0rc1",
- "slack": "prefect-slack>=0.3.0rc1",
+ "email": "prefect-email>=0.4.0",
+ "slack": "prefect-slack>=0.3.0",
},
classifiers=[
"Natural Language :: English",
diff --git a/src/integrations/prefect-aws/README.md b/src/integrations/prefect-aws/README.md
index 5526f03c5dbe..fbd323352343 100644
--- a/src/integrations/prefect-aws/README.md
+++ b/src/integrations/prefect-aws/README.md
@@ -7,7 +7,7 @@
-## Welcome!
+## Welcome
`prefect-aws` makes it easy to leverage the capabilities of AWS in your flows, featuring support for ECS, S3, Secrets Manager, and Batch.
@@ -18,7 +18,3 @@ To start using `prefect-aws`:
```bash
pip install prefect-aws
```
-
-### Contributing
-
-Thanks for thinking about chipping in! Check out this [step-by-step guide](https://prefecthq.github.io/prefect-aws/#installation) on how to get started.
diff --git a/src/integrations/prefect-aws/prefect_aws/batch.py b/src/integrations/prefect-aws/prefect_aws/batch.py
index c9ef8dca2a9c..17ac00146cae 100644
--- a/src/integrations/prefect-aws/prefect_aws/batch.py
+++ b/src/integrations/prefect-aws/prefect_aws/batch.py
@@ -3,14 +3,14 @@
from typing import Any, Dict, Optional
from prefect import task
+from prefect._internal.compatibility.async_dispatch import async_dispatch
from prefect.logging import get_run_logger
-from prefect.utilities.asyncutils import run_sync_in_worker_thread, sync_compatible
+from prefect.utilities.asyncutils import run_sync_in_worker_thread
from prefect_aws.credentials import AwsCredentials
@task
-@sync_compatible
-async def batch_submit(
+async def abatch_submit(
job_name: str,
job_queue: str,
job_definition: str,
@@ -18,7 +18,7 @@ async def batch_submit(
**batch_kwargs: Optional[Dict[str, Any]],
) -> str:
"""
- Submit a job to the AWS Batch job service.
+ Asynchronously submit a job to the AWS Batch job service.
Args:
job_name: The AWS batch job name.
@@ -73,3 +73,69 @@ def example_batch_submit_flow():
**batch_kwargs,
)
return response["jobId"]
+
+
+@task
+@async_dispatch(abatch_submit)
+def batch_submit(
+ job_name: str,
+ job_queue: str,
+ job_definition: str,
+ aws_credentials: AwsCredentials,
+ **batch_kwargs: Optional[Dict[str, Any]],
+) -> str:
+ """
+ Submit a job to the AWS Batch job service.
+
+ Args:
+ job_name: The AWS batch job name.
+ job_queue: Name of the AWS batch job queue.
+ job_definition: The AWS batch job definition.
+ aws_credentials: Credentials to use for authentication with AWS.
+ **batch_kwargs: Additional keyword arguments to pass to the boto3
+ `submit_job` function. See the documentation for
+ [submit_job](https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/batch.html#Batch.Client.submit_job)
+ for more details.
+
+ Returns:
+ The id corresponding to the job.
+
+ Example:
+ Submits a job to batch.
+
+ ```python
+ from prefect import flow
+ from prefect_aws import AwsCredentials
+ from prefect_aws.batch import batch_submit
+
+
+ @flow
+ def example_batch_submit_flow():
+ aws_credentials = AwsCredentials(
+ aws_access_key_id="acccess_key_id",
+ aws_secret_access_key="secret_access_key"
+ )
+ job_id = batch_submit(
+ "job_name",
+ "job_queue",
+ "job_definition",
+ aws_credentials
+ )
+ return job_id
+
+ example_batch_submit_flow()
+ ```
+
+ """ # noqa
+ logger = get_run_logger()
+ logger.info("Preparing to submit %s job to %s job queue", job_name, job_queue)
+
+ batch_client = aws_credentials.get_boto3_session().client("batch")
+
+ response = batch_client.submit_job(
+ jobName=job_name,
+ jobQueue=job_queue,
+ jobDefinition=job_definition,
+ **batch_kwargs,
+ )
+ return response["jobId"]
diff --git a/src/integrations/prefect-aws/prefect_aws/credentials.py b/src/integrations/prefect-aws/prefect_aws/credentials.py
index 987a4c722660..7a8a4add88b3 100644
--- a/src/integrations/prefect-aws/prefect_aws/credentials.py
+++ b/src/integrations/prefect-aws/prefect_aws/credentials.py
@@ -70,7 +70,7 @@ class AwsCredentials(CredentialsBlock):
_logo_url = "https://cdn.sanity.io/images/3ugk85nk/production/d74b16fe84ce626345adf235a47008fea2869a60-225x225.png" # noqa
_block_type_name = "AWS Credentials"
- _documentation_url = "https://prefecthq.github.io/prefect-aws/credentials/#prefect_aws.credentials.AwsCredentials" # noqa
+ _documentation_url = "https://docs.prefect.io/integrations/prefect-aws" # noqa
aws_access_key_id: Optional[str] = Field(
default=None,
@@ -209,7 +209,7 @@ class MinIOCredentials(CredentialsBlock):
"docs: https://docs.min.io/docs/minio-server-configuration-guide.html "
"for more info about the possible credential configurations."
)
- _documentation_url = "https://prefecthq.github.io/prefect-aws/credentials/#prefect_aws.credentials.MinIOCredentials" # noqa
+ _documentation_url = "https://docs.prefect.io/integrations/prefect-aws" # noqa
minio_root_user: str = Field(default=..., description="Admin or root user.")
minio_root_password: SecretStr = Field(
diff --git a/src/integrations/prefect-aws/prefect_aws/lambda_function.py b/src/integrations/prefect-aws/prefect_aws/lambda_function.py
index 8cbd14de1bca..6b778c48f407 100644
--- a/src/integrations/prefect-aws/prefect_aws/lambda_function.py
+++ b/src/integrations/prefect-aws/prefect_aws/lambda_function.py
@@ -79,7 +79,7 @@ class LambdaFunction(Block):
_block_type_name = "Lambda Function"
_logo_url = "https://cdn.sanity.io/images/3ugk85nk/production/d74b16fe84ce626345adf235a47008fea2869a60-225x225.png" # noqa
- _documentation_url = "https://prefecthq.github.io/prefect-aws/s3/#prefect_aws.lambda_function.LambdaFunction" # noqa
+ _documentation_url = "https://docs.prefect.io/integrations/prefect-aws" # noqa
function_name: str = Field(
title="Function Name",
diff --git a/src/integrations/prefect-aws/prefect_aws/s3.py b/src/integrations/prefect-aws/prefect_aws/s3.py
index 84570728c3d2..39a3e9b7ea9c 100644
--- a/src/integrations/prefect-aws/prefect_aws/s3.py
+++ b/src/integrations/prefect-aws/prefect_aws/s3.py
@@ -403,9 +403,7 @@ class S3Bucket(WritableFileSystem, WritableDeploymentStorage, ObjectStorageBlock
_logo_url = "https://cdn.sanity.io/images/3ugk85nk/production/d74b16fe84ce626345adf235a47008fea2869a60-225x225.png" # noqa
_block_type_name = "S3 Bucket"
- _documentation_url = (
- "https://prefecthq.github.io/prefect-aws/s3/#prefect_aws.s3.S3Bucket" # noqa
- )
+ _documentation_url = "https://docs.prefect.io/integrations/prefect-aws" # noqa
bucket_name: str = Field(default=..., description="Name of your bucket.")
diff --git a/src/integrations/prefect-aws/prefect_aws/secrets_manager.py b/src/integrations/prefect-aws/prefect_aws/secrets_manager.py
index 0536f8175b9e..82a695d02141 100644
--- a/src/integrations/prefect-aws/prefect_aws/secrets_manager.py
+++ b/src/integrations/prefect-aws/prefect_aws/secrets_manager.py
@@ -367,7 +367,7 @@ class AwsSecret(SecretBlock):
_logo_url = "https://cdn.sanity.io/images/3ugk85nk/production/d74b16fe84ce626345adf235a47008fea2869a60-225x225.png" # noqa
_block_type_name = "AWS Secret"
- _documentation_url = "https://prefecthq.github.io/prefect-aws/secrets_manager/#prefect_aws.secrets_manager.AwsSecret" # noqa
+ _documentation_url = "https://docs.prefect.io/integrations/prefect-aws" # noqa
aws_credentials: AwsCredentials
secret_name: str = Field(default=..., description="The name of the secret.")
diff --git a/src/integrations/prefect-aws/prefect_aws/workers/ecs_worker.py b/src/integrations/prefect-aws/prefect_aws/workers/ecs_worker.py
index 4d6da6127392..54cf62958350 100644
--- a/src/integrations/prefect-aws/prefect_aws/workers/ecs_worker.py
+++ b/src/integrations/prefect-aws/prefect_aws/workers/ecs_worker.py
@@ -654,7 +654,7 @@ class ECSWorker(BaseWorker):
"and Fargate clusters. Requires an AWS account."
)
_display_name = "AWS Elastic Container Service"
- _documentation_url = "https://prefecthq.github.io/prefect-aws/ecs_worker/"
+ _documentation_url = "https://docs.prefect.io/integrations/prefect-aws/"
_logo_url = "https://cdn.sanity.io/images/3ugk85nk/production/d74b16fe84ce626345adf235a47008fea2869a60-225x225.png" # noqa
async def run(
diff --git a/src/integrations/prefect-aws/pyproject.toml b/src/integrations/prefect-aws/pyproject.toml
index 8f6be1fb19fb..bf24bb96be85 100644
--- a/src/integrations/prefect-aws/pyproject.toml
+++ b/src/integrations/prefect-aws/pyproject.toml
@@ -27,7 +27,7 @@ dependencies = [
"botocore>=1.27.53",
"mypy_boto3_s3>=1.24.94",
"mypy_boto3_secretsmanager>=1.26.49",
- "prefect>=3.0.0rc1",
+ "prefect>=3.1.3",
"pyparsing>=3.1.1",
"tenacity>=8.0.0",
]
@@ -50,7 +50,7 @@ dev = [
"mypy",
"pillow",
"pre-commit",
- "pytest",
+ "pytest >= 8.3",
"pytest-asyncio >= 0.18.2, != 0.22.0, < 0.23.0", # Cannot override event loop in 0.23.0. See https://github.com/pytest-dev/pytest-asyncio/issues/706 for more details.
"pytest-cov",
"pytest-env",
@@ -86,7 +86,6 @@ fail_under = 80
show_missing = true
[tool.pytest.ini_options]
+asyncio_default_fixture_loop_scope = "session"
asyncio_mode = "auto"
-env = [
- "PREFECT_TEST_MODE=1",
-]
+env = ["PREFECT_TEST_MODE=1"]
diff --git a/src/integrations/prefect-aws/tests/test_batch.py b/src/integrations/prefect-aws/tests/test_batch.py
index 404aa38dee57..8f8acb47dfbe 100644
--- a/src/integrations/prefect-aws/tests/test_batch.py
+++ b/src/integrations/prefect-aws/tests/test_batch.py
@@ -1,13 +1,23 @@
+from typing import Optional
from uuid import UUID
import boto3
import pytest
from moto import mock_batch, mock_iam
-from prefect_aws.batch import batch_submit
+from prefect_aws.batch import abatch_submit, batch_submit
from prefect import flow
+def assert_valid_job_id(job_id: Optional[str]):
+ assert job_id is not None, "job_id is None"
+ try:
+ UUID(str(job_id))
+ assert True, f"{job_id} is a valid UUID"
+ except ValueError:
+ assert False, f"{job_id} is not a valid UUID"
+
+
@pytest.fixture(scope="function")
def batch_client(aws_credentials):
with mock_batch():
@@ -62,20 +72,57 @@ def job_definition_arn(batch_client):
return job_definition_arn
-def test_batch_submit(job_queue_arn, job_definition_arn, aws_credentials):
- @flow
- def test_flow():
- return batch_submit(
+class TestBatchSubmit:
+ def test_batch_submit(self, job_queue_arn, job_definition_arn, aws_credentials):
+ @flow
+ def test_flow():
+ return batch_submit(
+ "batch_test_job",
+ job_queue_arn,
+ job_definition_arn,
+ aws_credentials,
+ )
+
+ job_id = test_flow()
+
+ assert_valid_job_id(job_id)
+
+ async def test_batch_submit_async_dispatch(
+ self, job_queue_arn, job_definition_arn, aws_credentials
+ ):
+ @flow
+ async def test_flow():
+ return await batch_submit(
+ "batch_test_job",
+ job_queue_arn,
+ job_definition_arn,
+ aws_credentials,
+ )
+
+ job_id = await test_flow()
+ assert_valid_job_id(job_id)
+
+ async def test_batch_submit_force_sync_from_async(
+ self, job_queue_arn, job_definition_arn, aws_credentials
+ ):
+ job_id = batch_submit(
"batch_test_job",
job_queue_arn,
job_definition_arn,
aws_credentials,
+ _sync=True,
)
+ assert_valid_job_id(job_id)
- job_id = test_flow()
- try:
- UUID(str(job_id))
- assert True, f"{job_id} is a valid UUID"
- except ValueError:
- assert False, f"{job_id} is not a valid UUID"
+class TestBatchSubmitAsync:
+ async def test_batch_submit_explicit_async(
+ self, job_queue_arn, job_definition_arn, aws_credentials
+ ):
+ job_id = await abatch_submit(
+ "batch_test_job",
+ job_queue_arn,
+ job_definition_arn,
+ aws_credentials,
+ )
+ assert_valid_job_id(job_id)
diff --git a/src/integrations/prefect-azure/prefect_azure/blob_storage.py b/src/integrations/prefect-azure/prefect_azure/blob_storage.py
index 7a6edb5af999..6f3c348762d1 100644
--- a/src/integrations/prefect-azure/prefect_azure/blob_storage.py
+++ b/src/integrations/prefect-azure/prefect_azure/blob_storage.py
@@ -63,8 +63,9 @@ def example_blob_storage_download_flow():
logger = get_run_logger()
logger.info("Downloading blob from container %s with key %s", container, blob)
- async with blob_storage_credentials.get_blob_client(container, blob) as blob_client:
- blob_obj = await blob_client.download_blob()
+ async with blob_storage_credentials as credentials:
+ async with credentials.get_blob_client(container, blob) as blob_client:
+ blob_obj = await blob_client.download_blob()
output = await blob_obj.content_as_bytes()
return output
@@ -123,8 +124,9 @@ def example_blob_storage_upload_flow():
if blob is None:
blob = str(uuid.uuid4())
- async with blob_storage_credentials.get_blob_client(container, blob) as blob_client:
- await blob_client.upload_blob(data, overwrite=overwrite)
+ async with blob_storage_credentials as credentials:
+ async with credentials.get_blob_client(container, blob) as blob_client:
+ await blob_client.upload_blob(data, overwrite=overwrite)
return blob
@@ -176,17 +178,16 @@ def example_blob_storage_list_flow():
logger = get_run_logger()
logger.info("Listing blobs from container %s", container)
- async with blob_storage_credentials.get_container_client(
- container
- ) as container_client:
- blobs = [
- blob
- async for blob in container_client.list_blobs(
- name_starts_with=name_starts_with, include=include, **kwargs
- )
- ]
+ async with blob_storage_credentials as credentials:
+ async with credentials.get_container_client(container) as container_client:
+ blobs = [
+ blob
+ async for blob in container_client.list_blobs(
+ name_starts_with=name_starts_with, include=include, **kwargs
+ )
+ ]
- return blobs
+ return blobs
class AzureBlobStorageContainer(
@@ -207,7 +208,7 @@ class AzureBlobStorageContainer(
_block_type_name = "Azure Blob Storage Container"
_logo_url = "https://cdn.sanity.io/images/3ugk85nk/production/54e3fa7e00197a4fbd1d82ed62494cb58d08c96a-250x250.png" # noqa
- _documentation_url = "https://prefecthq.github.io/prefect-azure/blob_storage/#prefect_azure.blob_storabe.AzureBlobStorageContainer" # noqa
+ _documentation_url = "https://docs.prefect.io/integrations/prefect-azure" # noqa
container_name: str = Field(
default=..., description="The name of a Azure Blob Storage container."
@@ -278,32 +279,35 @@ async def download_folder_to_path(
to_folder,
)
full_container_path = self._get_path_relative_to_base_folder(from_folder)
- async with self.credentials.get_container_client(
- self.container_name
- ) as container_client:
- try:
- async for blob in container_client.list_blobs(
- name_starts_with=full_container_path
- ):
- blob_path = blob.name
- local_path = Path(to_folder) / Path(blob_path).relative_to(
- full_container_path
- )
- local_path.parent.mkdir(parents=True, exist_ok=True)
- async with container_client.get_blob_client(
- blob_path
- ) as blob_client:
- blob_obj = await blob_client.download_blob(**download_kwargs)
-
- with local_path.open(mode="wb") as to_file:
- await blob_obj.readinto(to_file)
- except ResourceNotFoundError as exc:
- raise RuntimeError(
- "An error occurred when attempting to download from container"
- f" {self.container_name}: {exc.reason}"
- ) from exc
+ async with self.credentials as credentials:
+ async with credentials.get_container_client(
+ self.container_name
+ ) as container_client:
+ try:
+ async for blob in container_client.list_blobs(
+ name_starts_with=full_container_path
+ ):
+ blob_path = blob.name
+ local_path = Path(to_folder) / Path(blob_path).relative_to(
+ full_container_path
+ )
+ local_path.parent.mkdir(parents=True, exist_ok=True)
+ async with container_client.get_blob_client(
+ blob_path
+ ) as blob_client:
+ blob_obj = await blob_client.download_blob(
+ **download_kwargs
+ )
+
+ with local_path.open(mode="wb") as to_file:
+ await blob_obj.readinto(to_file)
+ except ResourceNotFoundError as exc:
+ raise RuntimeError(
+ "An error occurred when attempting to download from container"
+ f" {self.container_name}: {exc.reason}"
+ ) from exc
- return Path(to_folder)
+ return Path(to_folder)
@sync_compatible
async def download_object_to_file_object(
@@ -349,19 +353,20 @@ async def download_object_to_file_object(
"Downloading object from container %s to file object", self.container_name
)
full_container_path = self._get_path_relative_to_base_folder(from_path)
- async with self.credentials.get_blob_client(
- self.container_name, full_container_path
- ) as blob_client:
- try:
- blob_obj = await blob_client.download_blob(**download_kwargs)
- await blob_obj.download_to_stream(to_file_object)
- except ResourceNotFoundError as exc:
- raise RuntimeError(
- "An error occurred when attempting to download from container"
- f" {self.container_name}: {exc.reason}"
- ) from exc
-
- return to_file_object
+ async with self.credentials as credentials:
+ async with credentials.get_blob_client(
+ self.container_name, full_container_path
+ ) as blob_client:
+ try:
+ blob_obj = await blob_client.download_blob(**download_kwargs)
+ await blob_obj.download_to_stream(to_file_object)
+ except ResourceNotFoundError as exc:
+ raise RuntimeError(
+ "An error occurred when attempting to download from container"
+ f" {self.container_name}: {exc.reason}"
+ ) from exc
+
+ return to_file_object
@sync_compatible
async def download_object_to_path(
@@ -409,24 +414,25 @@ async def download_object_to_path(
to_path,
)
full_container_path = self._get_path_relative_to_base_folder(from_path)
- async with self.credentials.get_blob_client(
- self.container_name, full_container_path
- ) as blob_client:
- try:
- blob_obj = await blob_client.download_blob(**download_kwargs)
+ async with self.credentials as credentials:
+ async with credentials.get_blob_client(
+ self.container_name, full_container_path
+ ) as blob_client:
+ try:
+ blob_obj = await blob_client.download_blob(**download_kwargs)
- path = Path(to_path)
+ path = Path(to_path)
- path.parent.mkdir(parents=True, exist_ok=True)
+ path.parent.mkdir(parents=True, exist_ok=True)
- with path.open(mode="wb") as to_file:
- await blob_obj.readinto(to_file)
- except ResourceNotFoundError as exc:
- raise RuntimeError(
- "An error occurred when attempting to download from container"
- f" {self.container_name}: {exc.reason}"
- ) from exc
- return Path(to_path)
+ with path.open(mode="wb") as to_file:
+ await blob_obj.readinto(to_file)
+ except ResourceNotFoundError as exc:
+ raise RuntimeError(
+ "An error occurred when attempting to download from container"
+ f" {self.container_name}: {exc.reason}"
+ ) from exc
+ return path
@sync_compatible
async def upload_from_file_object(
@@ -471,18 +477,19 @@ async def upload_from_file_object(
"Uploading object to container %s with key %s", self.container_name, to_path
)
full_container_path = self._get_path_relative_to_base_folder(to_path)
- async with self.credentials.get_blob_client(
- self.container_name, full_container_path
- ) as blob_client:
- try:
- await blob_client.upload_blob(from_file_object, **upload_kwargs)
- except ResourceNotFoundError as exc:
- raise RuntimeError(
- "An error occurred when attempting to upload from container"
- f" {self.container_name}: {exc.reason}"
- ) from exc
-
- return to_path
+ async with self.credentials as credentials:
+ async with credentials.get_blob_client(
+ self.container_name, full_container_path
+ ) as blob_client:
+ try:
+ await blob_client.upload_blob(from_file_object, **upload_kwargs)
+ except ResourceNotFoundError as exc:
+ raise RuntimeError(
+ "An error occurred when attempting to upload from container"
+ f" {self.container_name}: {exc.reason}"
+ ) from exc
+
+ return to_path
@sync_compatible
async def upload_from_path(
@@ -526,19 +533,20 @@ async def upload_from_path(
"Uploading object to container %s with key %s", self.container_name, to_path
)
full_container_path = self._get_path_relative_to_base_folder(to_path)
- async with self.credentials.get_blob_client(
- self.container_name, full_container_path
- ) as blob_client:
- try:
- with open(from_path, "rb") as f:
- await blob_client.upload_blob(f, **upload_kwargs)
- except ResourceNotFoundError as exc:
- raise RuntimeError(
- "An error occurred when attempting to upload to container"
- f" {self.container_name}: {exc.reason}"
- ) from exc
-
- return to_path
+ async with self.credentials as credentials:
+ async with credentials.get_blob_client(
+ self.container_name, full_container_path
+ ) as blob_client:
+ try:
+ with open(from_path, "rb") as f:
+ await blob_client.upload_blob(f, **upload_kwargs)
+ except ResourceNotFoundError as exc:
+ raise RuntimeError(
+ "An error occurred when attempting to upload to container"
+ f" {self.container_name}: {exc.reason}"
+ ) from exc
+
+ return to_path
@sync_compatible
async def upload_from_folder(
@@ -587,29 +595,30 @@ async def upload_from_folder(
to_folder,
)
full_container_path = self._get_path_relative_to_base_folder(to_folder)
- async with self.credentials.get_container_client(
- self.container_name
- ) as container_client:
- if not Path(from_folder).is_dir():
- raise ValueError(f"{from_folder} is not a directory")
- for path in Path(from_folder).rglob("*"):
- if path.is_file():
- blob_path = Path(full_container_path) / path.relative_to(
- from_folder
- )
- async with container_client.get_blob_client(
- blob_path.as_posix()
- ) as blob_client:
- try:
- await blob_client.upload_blob(
- path.read_bytes(), **upload_kwargs
- )
- except ResourceNotFoundError as exc:
- raise RuntimeError(
- "An error occurred when attempting to upload to "
- f"container {self.container_name}: {exc.reason}"
- ) from exc
- return full_container_path
+ async with self.credentials as credentials:
+ async with credentials.get_container_client(
+ self.container_name
+ ) as container_client:
+ if not Path(from_folder).is_dir():
+ raise ValueError(f"{from_folder} is not a directory")
+ for path in Path(from_folder).rglob("*"):
+ if path.is_file():
+ blob_path = Path(full_container_path) / path.relative_to(
+ from_folder
+ )
+ async with container_client.get_blob_client(
+ blob_path.as_posix()
+ ) as blob_client:
+ try:
+ await blob_client.upload_blob(
+ path.read_bytes(), **upload_kwargs
+ )
+ except ResourceNotFoundError as exc:
+ raise RuntimeError(
+ "An error occurred when attempting to upload to "
+ f"container {self.container_name}: {exc.reason}"
+ ) from exc
+ return full_container_path
@sync_compatible
async def get_directory(
@@ -736,11 +745,12 @@ async def list_blobs(self) -> List[str]:
self.container_name,
)
- async with self.credentials.get_container_client(
- self.container_name
- ) as container_client:
- blobs = container_client.list_blobs()
- filenames = []
- async for blob in blobs:
- filenames.append(blob.name)
- return filenames
+ async with self.credentials as credentials:
+ async with credentials.get_container_client(
+ self.container_name
+ ) as container_client:
+ blobs = container_client.list_blobs()
+ filenames = []
+ async for blob in blobs:
+ filenames.append(blob.name)
+ return filenames
diff --git a/src/integrations/prefect-azure/prefect_azure/credentials.py b/src/integrations/prefect-azure/prefect_azure/credentials.py
index 2a3f776bc26d..3f0ffefbfa24 100644
--- a/src/integrations/prefect-azure/prefect_azure/credentials.py
+++ b/src/integrations/prefect-azure/prefect_azure/credentials.py
@@ -7,7 +7,7 @@
from azure.identity.aio import DefaultAzureCredential as ADefaultAzureCredential
from azure.mgmt.containerinstance import ContainerInstanceManagementClient
from azure.mgmt.resource import ResourceManagementClient
-from pydantic import Field, SecretStr, model_validator
+from pydantic import Field, PrivateAttr, SecretStr, model_validator
try:
from azure.cosmos import CosmosClient
@@ -95,7 +95,8 @@ class AzureBlobStorageCredentials(Block):
_block_type_name = "Azure Blob Storage Credentials"
_logo_url = "https://cdn.sanity.io/images/3ugk85nk/production/54e3fa7e00197a4fbd1d82ed62494cb58d08c96a-250x250.png" # noqa
- _documentation_url = "https://prefecthq.github.io/prefect-azure/credentials/#prefect_azure.credentials.AzureBlobStorageCredentials" # noqa
+ _documentation_url = "https://docs.prefect.io/integrations/prefect-azure" # noqa
+ _credential: Optional[ADefaultAzureCredential] = PrivateAttr(default=None)
connection_string: Optional[SecretStr] = Field(
default=None,
@@ -162,9 +163,10 @@ async def example_get_client_flow():
```
"""
if self.connection_string is None:
+ self._credential = self._credential or ADefaultAzureCredential()
return BlobServiceClient(
account_url=self.account_url,
- credential=ADefaultAzureCredential(),
+ credential=self._credential,
)
return BlobServiceClient.from_connection_string(
@@ -205,10 +207,11 @@ async def example_get_blob_client_flow():
```
"""
if self.connection_string is None:
+ self._credential = self._credential or ADefaultAzureCredential()
return BlobClient(
account_url=self.account_url,
container_name=container,
- credential=ADefaultAzureCredential(),
+ credential=self._credential,
blob_name=blob,
)
@@ -250,10 +253,11 @@ async def example_get_container_client_flow():
```
"""
if self.connection_string is None:
+ self._credential = self._credential or ADefaultAzureCredential()
return ContainerClient(
account_url=self.account_url,
container_name=container,
- credential=ADefaultAzureCredential(),
+ credential=self._credential,
)
container_client = ContainerClient.from_connection_string(
@@ -261,6 +265,17 @@ async def example_get_container_client_flow():
)
return container_client
+ async def aclose(self):
+ """Cleanup resources."""
+ if self._credential:
+ await self._credential.close()
+
+ async def __aenter__(self):
+ return self
+
+ async def __aexit__(self, exc_type, exc_val, exc_tb):
+ await self.aclose()
+
class AzureCosmosDbCredentials(Block):
"""
@@ -281,7 +296,7 @@ class AzureCosmosDbCredentials(Block):
_block_type_name = "Azure Cosmos DB Credentials"
_logo_url = "https://cdn.sanity.io/images/3ugk85nk/production/54e3fa7e00197a4fbd1d82ed62494cb58d08c96a-250x250.png" # noqa
- _documentation_url = "https://prefecthq.github.io/prefect-azure/credentials/#prefect_azure.credentials.AzureCosmosDbCredentials" # noqa
+ _documentation_url = "https://docs.prefect.io/integrations/prefect-azure" # noqa
connection_string: SecretStr = Field(
default=..., description="Includes the authorization information required."
@@ -401,7 +416,7 @@ class AzureMlCredentials(Block):
_block_type_name = "AzureML Credentials"
_logo_url = "https://cdn.sanity.io/images/3ugk85nk/production/54e3fa7e00197a4fbd1d82ed62494cb58d08c96a-250x250.png" # noqa
- _documentation_url = "https://prefecthq.github.io/prefect-azure/credentials/#prefect_azure.credentials.AzureMlCredentials" # noqa
+ _documentation_url = "https://docs.prefect.io/integrations/prefect-azure" # noqa
tenant_id: str = Field(
default=...,
@@ -474,7 +489,7 @@ class AzureContainerInstanceCredentials(Block):
_block_type_name = "Azure Container Instance Credentials"
_logo_url = "https://cdn.sanity.io/images/3ugk85nk/production/54e3fa7e00197a4fbd1d82ed62494cb58d08c96a-250x250.png" # noqa
- _documentation_url = "https://prefecthq.github.io/prefect-azure/credentials/#prefect_azure.credentials.AzureContainerInstanceCredentials" # noqa
+ _documentation_url = "https://docs.prefect.io/integrations/prefect-azure" # noqa
client_id: Optional[str] = Field(
default=None,
diff --git a/src/integrations/prefect-azure/prefect_azure/workers/container_instance.py b/src/integrations/prefect-azure/prefect_azure/workers/container_instance.py
index 81d1b057a49d..3b293db87fe7 100644
--- a/src/integrations/prefect-azure/prefect_azure/workers/container_instance.py
+++ b/src/integrations/prefect-azure/prefect_azure/workers/container_instance.py
@@ -526,9 +526,7 @@ class AzureContainerWorker(BaseWorker):
"Execute flow runs within containers on Azure's Container Instances "
"service. Requires an Azure account."
)
- _documentation_url = (
- "https://prefecthq.github.io/prefect-azure/container_instance_worker/"
- )
+ _documentation_url = "https://docs.prefect.io/integrations/prefect-azure"
async def run(
self,
diff --git a/src/integrations/prefect-azure/pyproject.toml b/src/integrations/prefect-azure/pyproject.toml
index 864b796698e3..60bb213454e3 100644
--- a/src/integrations/prefect-azure/pyproject.toml
+++ b/src/integrations/prefect-azure/pyproject.toml
@@ -27,8 +27,8 @@ dependencies = [
"azure_identity>=1.10",
"azure_mgmt_containerinstance>=10.0",
"azure-mgmt-resource>=21.2",
- "prefect>=3.0.0rc1",
- "setuptools", #required in 3.12 to get pkg_resources (used by azureml.core)
+ "prefect>=3.0.0",
+ "setuptools", #required in 3.12 to get pkg_resources (used by azureml.core)
]
dynamic = ["version"]
@@ -52,7 +52,7 @@ dev = [
"pillow",
"pre-commit",
"pytest-asyncio",
- "pytest",
+ "pytest >= 8.3",
"pytest-env",
"pytest-xdist",
]
@@ -85,7 +85,6 @@ fail_under = 80
show_missing = true
[tool.pytest.ini_options]
+asyncio_default_fixture_loop_scope = "session"
asyncio_mode = "auto"
-env = [
- "PREFECT_TEST_MODE=1",
-]
+env = ["PREFECT_TEST_MODE=1"]
diff --git a/src/integrations/prefect-azure/tests/conftest.py b/src/integrations/prefect-azure/tests/conftest.py
index 21f8fbf8eea9..8efe3bb9b8c1 100644
--- a/src/integrations/prefect-azure/tests/conftest.py
+++ b/src/integrations/prefect-azure/tests/conftest.py
@@ -90,6 +90,9 @@ async def close(self):
@pytest.fixture
def blob_storage_credentials():
blob_storage_credentials = MagicMock()
+ blob_storage_credentials.__aenter__ = AsyncMock(
+ return_value=blob_storage_credentials
+ )
blob_storage_credentials.get_client.side_effect = (
lambda: BlobStorageClientMethodsMock()
)
diff --git a/src/integrations/prefect-bitbucket/pyproject.toml b/src/integrations/prefect-bitbucket/pyproject.toml
index 20ab9b222773..8498167ec1b0 100644
--- a/src/integrations/prefect-bitbucket/pyproject.toml
+++ b/src/integrations/prefect-bitbucket/pyproject.toml
@@ -23,7 +23,7 @@ classifiers = [
"Topic :: Software Development :: Libraries",
]
dependencies = [
- "prefect>=3.0.0rc1",
+ "prefect>=3.0.0",
"pydantic>=2.4",
"atlassian-python-api>=3.32.1,!=3.41.5,!=3.41.6,!=3.41.7,!=3.41.8",
]
@@ -42,7 +42,7 @@ dev = [
"pillow",
"pre-commit",
"pytest-asyncio",
- "pytest",
+ "pytest >= 8.3",
"pytest-env",
"pytest-xdist",
]
@@ -75,7 +75,6 @@ fail_under = 80
show_missing = true
[tool.pytest.ini_options]
+asyncio_default_fixture_loop_scope = "session"
asyncio_mode = "auto"
-env = [
- "PREFECT_TEST_MODE=1",
-]
+env = ["PREFECT_TEST_MODE=1"]
diff --git a/src/integrations/prefect-dask/prefect_dask/task_runners.py b/src/integrations/prefect-dask/prefect_dask/task_runners.py
index f4edbab25295..60066faaf870 100644
--- a/src/integrations/prefect-dask/prefect_dask/task_runners.py
+++ b/src/integrations/prefect-dask/prefect_dask/task_runners.py
@@ -319,7 +319,7 @@ def submit(
self,
task: "Task[P, Coroutine[Any, Any, R]]",
parameters: Dict[str, Any],
- wait_for: Optional[Iterable[PrefectFuture]] = None,
+ wait_for: Optional[Iterable[PrefectDaskFuture[R]]] = None,
dependencies: Optional[Dict[str, Set[TaskRunInput]]] = None,
) -> PrefectDaskFuture[R]:
...
@@ -329,26 +329,26 @@ def submit(
self,
task: "Task[Any, R]",
parameters: Dict[str, Any],
- wait_for: Optional[Iterable[PrefectFuture]] = None,
+ wait_for: Optional[Iterable[PrefectDaskFuture[R]]] = None,
dependencies: Optional[Dict[str, Set[TaskRunInput]]] = None,
) -> PrefectDaskFuture[R]:
...
def submit(
self,
- task: Task,
+ task: "Union[Task[P, R], Task[P, Coroutine[Any, Any, R]]]",
parameters: Dict[str, Any],
- wait_for: Optional[Iterable[PrefectFuture]] = None,
+ wait_for: Optional[Iterable[PrefectDaskFuture[R]]] = None,
dependencies: Optional[Dict[str, Set[TaskRunInput]]] = None,
- ) -> PrefectDaskFuture:
+ ) -> PrefectDaskFuture[R]:
if not self._started:
raise RuntimeError(
"The task runner must be started before submitting work."
)
- # unpack the upstream call in order to cast Prefect futures to Dask futures
- # where possible to optimize Dask task scheduling
+ # Convert both parameters and wait_for futures to Dask futures
parameters = self._optimize_futures(parameters)
+ wait_for = self._optimize_futures(wait_for) if wait_for else None
future = self._client.submit(
task,
@@ -357,7 +357,9 @@ def submit(
dependencies=dependencies,
return_type="state",
)
- return PrefectDaskFuture(wrapped_future=future, task_run_id=future.task_run_id)
+ return PrefectDaskFuture[R](
+ wrapped_future=future, task_run_id=future.task_run_id
+ )
@overload
def map(
diff --git a/src/integrations/prefect-dask/pyproject.toml b/src/integrations/prefect-dask/pyproject.toml
index 81525fa4c7f6..d76206730ca0 100644
--- a/src/integrations/prefect-dask/pyproject.toml
+++ b/src/integrations/prefect-dask/pyproject.toml
@@ -5,7 +5,7 @@ build-backend = "setuptools.build_meta"
[project]
name = "prefect-dask"
dependencies = [
- "prefect>=3.0.0rc1",
+ "prefect>=3.0.0",
# don't allow versions from 2023.3.2 to 2023.5 (inclusive) due to issue with
# get_client starting in 2023.3.2 (fixed in 2023.6.0)
# https://github.com/dask/distributed/issues/7763
@@ -47,7 +47,7 @@ dev = [
"pillow",
"pre-commit",
"pytest-asyncio >= 0.18.2, != 0.22.0, < 0.23.0",
- "pytest",
+ "pytest >= 8.3",
"pytest-env",
"pytest-xdist",
]
@@ -80,7 +80,6 @@ fail_under = 80
show_missing = true
[tool.pytest.ini_options]
+asyncio_default_fixture_loop_scope = "session"
asyncio_mode = "auto"
-env = [
- "PREFECT_TEST_MODE=1",
-]
+env = ["PREFECT_TEST_MODE=1"]
diff --git a/src/integrations/prefect-dask/tests/conftest.py b/src/integrations/prefect-dask/tests/conftest.py
index 7292781b3329..338bc8fdae28 100644
--- a/src/integrations/prefect-dask/tests/conftest.py
+++ b/src/integrations/prefect-dask/tests/conftest.py
@@ -4,7 +4,6 @@
import pytest
-from prefect.settings import PREFECT_ASYNC_FETCH_STATE_RESULT, temporary_settings
from prefect.testing.utilities import prefect_test_harness
@@ -46,9 +45,3 @@ def event_loop(request):
# Workaround for failures in pytest_asyncio 0.17;
# see https://github.com/pytest-dev/pytest-asyncio/issues/257
policy.set_event_loop(loop)
-
-
-@pytest.fixture(autouse=True)
-def fetch_state_result():
- with temporary_settings(updates={PREFECT_ASYNC_FETCH_STATE_RESULT: True}):
- yield
diff --git a/src/integrations/prefect-dask/tests/test_task_runners.py b/src/integrations/prefect-dask/tests/test_task_runners.py
index 748de299c65e..d1bc5c8bec0b 100644
--- a/src/integrations/prefect-dask/tests/test_task_runners.py
+++ b/src/integrations/prefect-dask/tests/test_task_runners.py
@@ -452,3 +452,49 @@ def umbrella_flow():
return future.result()
assert umbrella_flow() == "nested task"
+
+ def test_state_dependencies_via_wait_for(self, task_runner):
+ @task
+ def task_a():
+ return time.time()
+
+ @task
+ def task_b():
+ return time.time()
+
+ @flow(task_runner=task_runner)
+ def test_flow() -> tuple[float, float]:
+ a = task_a.submit()
+ b = task_b.submit(wait_for=[a])
+ return a.result(), b.result()
+
+ a_time, b_time = test_flow()
+
+ assert b_time > a_time, "task_b timestamp should be after task_a timestamp"
+
+ def test_state_dependencies_via_wait_for_disparate_upstream_tasks(
+ self, task_runner
+ ):
+ @task
+ def task_a():
+ return time.time()
+
+ @task
+ def task_b():
+ return time.time()
+
+ @task
+ def task_c():
+ return time.time()
+
+ @flow(task_runner=task_runner)
+ def test_flow() -> tuple[float, float, float]:
+ a = task_a.submit()
+ b = task_b.submit()
+ c = task_c.submit(wait_for=[a, b])
+
+ return a.result(), b.result(), c.result()
+
+ a_time, b_time, c_time = test_flow()
+
+ assert c_time > a_time and c_time > b_time
diff --git a/src/integrations/prefect-databricks/pyproject.toml b/src/integrations/prefect-databricks/pyproject.toml
index 935c5e724276..3b75338caad7 100644
--- a/src/integrations/prefect-databricks/pyproject.toml
+++ b/src/integrations/prefect-databricks/pyproject.toml
@@ -22,7 +22,7 @@ classifiers = [
"Programming Language :: Python :: 3.12",
"Topic :: Software Development :: Libraries",
]
-dependencies = ["prefect>=3.0.0rc1"]
+dependencies = ["prefect>=3.0.0"]
dynamic = ["version"]
[project.optional-dependencies]
@@ -37,7 +37,7 @@ dev = [
"pillow",
"pre-commit",
"pytest-asyncio",
- "pytest",
+ "pytest >= 8.3",
"pytest-env",
"pytest-xdist",
"respx",
@@ -71,7 +71,6 @@ fail_under = 80
show_missing = true
[tool.pytest.ini_options]
+asyncio_default_fixture_loop_scope = "session"
asyncio_mode = "auto"
-env = [
- "PREFECT_TEST_MODE=1",
-]
+env = ["PREFECT_TEST_MODE=1"]
diff --git a/src/integrations/prefect-dbt/prefect_dbt/__init__.py b/src/integrations/prefect-dbt/prefect_dbt/__init__.py
index f0e463e9bcfd..5e39db25c398 100644
--- a/src/integrations/prefect-dbt/prefect_dbt/__init__.py
+++ b/src/integrations/prefect-dbt/prefect_dbt/__init__.py
@@ -1,6 +1,6 @@
from . import _version
-from .cloud import DbtCloudCredentials # noqa
+from .cloud import DbtCloudCredentials, DbtCloudJob # noqa
from .cli import ( # noqa
DbtCliProfile,
GlobalConfigs,
diff --git a/src/integrations/prefect-dbt/prefect_dbt/cli/commands.py b/src/integrations/prefect-dbt/prefect_dbt/cli/commands.py
index 2193de75f037..f7da28a31fe3 100644
--- a/src/integrations/prefect-dbt/prefect_dbt/cli/commands.py
+++ b/src/integrations/prefect-dbt/prefect_dbt/cli/commands.py
@@ -290,7 +290,7 @@ class DbtCoreOperation(ShellOperation):
_block_type_name = "dbt Core Operation"
_logo_url = "https://images.ctfassets.net/gm98wzqotmnx/5zE9lxfzBHjw3tnEup4wWL/9a001902ed43a84c6c96d23b24622e19/dbt-bit_tm.png?h=250" # noqa
- _documentation_url = "https://prefecthq.github.io/prefect-dbt/cli/commands/#prefect_dbt.cli.commands.DbtCoreOperation" # noqa
+ _documentation_url = "https://docs.prefect.io/integrations/prefect-dbt" # noqa
profiles_dir: Optional[Path] = Field(
default=None,
@@ -929,9 +929,11 @@ def _create_unsuccessful_markdown(run_results: dict) -> str:
n.node.resource_type,
n.message,
n.node.path,
- n.node.compiled_code
- if n.node.resource_type not in ["seed", "source"]
- else None,
+ (
+ n.node.compiled_code
+ if n.node.resource_type not in ["seed", "source"]
+ else None
+ ),
)
if len(run_results["Fail"]) > 0:
markdown += "\n### Failed Nodes:\n"
@@ -941,9 +943,11 @@ def _create_unsuccessful_markdown(run_results: dict) -> str:
n.node.resource_type,
n.message,
n.node.path,
- n.node.compiled_code
- if n.node.resource_type not in ["seed", "source"]
- else None,
+ (
+ n.node.compiled_code
+ if n.node.resource_type not in ["seed", "source"]
+ else None
+ ),
)
if len(run_results["Skipped"]) > 0:
markdown += "\n### Skipped Nodes:\n"
@@ -953,9 +957,11 @@ def _create_unsuccessful_markdown(run_results: dict) -> str:
n.node.resource_type,
n.message,
n.node.path,
- n.node.compiled_code
- if n.node.resource_type not in ["seed", "source"]
- else None,
+ (
+ n.node.compiled_code
+ if n.node.resource_type not in ["seed", "source"]
+ else None
+ ),
)
if len(run_results["Warn"]) > 0:
markdown += "\n### Warned Nodes:\n"
@@ -965,9 +971,11 @@ def _create_unsuccessful_markdown(run_results: dict) -> str:
n.node.resource_type,
n.message,
n.node.path,
- n.node.compiled_code
- if n.node.resource_type not in ["seed", "source"]
- else None,
+ (
+ n.node.compiled_code
+ if n.node.resource_type not in ["seed", "source"]
+ else None
+ ),
)
return markdown
diff --git a/src/integrations/prefect-dbt/prefect_dbt/cli/configs/base.py b/src/integrations/prefect-dbt/prefect_dbt/cli/configs/base.py
index 1028973e55c1..4aff4a005c1c 100644
--- a/src/integrations/prefect-dbt/prefect_dbt/cli/configs/base.py
+++ b/src/integrations/prefect-dbt/prefect_dbt/cli/configs/base.py
@@ -34,7 +34,7 @@ class DbtConfigs(Block, abc.ABC):
"fields provided in extras and credentials."
),
)
- _documentation_url = "https://prefecthq.github.io/prefect-dbt/cli/configs/base/#prefect_dbt.cli.configs.base.DbtConfigs" # noqa
+ _documentation_url = "https://docs.prefect.io/integrations/prefect-dbt" # noqa
def _populate_configs_json(
self,
@@ -145,7 +145,7 @@ class TargetConfigs(BaseTargetConfigs):
_block_type_name = "dbt CLI Target Configs"
_logo_url = "https://images.ctfassets.net/gm98wzqotmnx/5zE9lxfzBHjw3tnEup4wWL/9a001902ed43a84c6c96d23b24622e19/dbt-bit_tm.png?h=250" # noqa
- _documentation_url = "https://prefecthq.github.io/prefect-dbt/cli/configs/base/#prefect_dbt.cli.configs.base.TargetConfigs" # noqa
+ _documentation_url = "https://docs.prefect.io/integrations/prefect-dbt" # noqa
class GlobalConfigs(DbtConfigs):
@@ -189,7 +189,7 @@ class GlobalConfigs(DbtConfigs):
_block_type_name = "dbt CLI Global Configs"
_logo_url = "https://images.ctfassets.net/gm98wzqotmnx/5zE9lxfzBHjw3tnEup4wWL/9a001902ed43a84c6c96d23b24622e19/dbt-bit_tm.png?h=250" # noqa
- _documentation_url = "https://prefecthq.github.io/prefect-dbt/cli/configs/base/#prefect_dbt.cli.configs.base.GlobalConfigs" # noqa
+ _documentation_url = "https://docs.prefect.io/integrations/prefect-dbt" # noqa
send_anonymous_usage_stats: Optional[bool] = Field(
default=None,
diff --git a/src/integrations/prefect-dbt/prefect_dbt/cli/configs/bigquery.py b/src/integrations/prefect-dbt/prefect_dbt/cli/configs/bigquery.py
index 27eb98701f84..e799f674496c 100644
--- a/src/integrations/prefect-dbt/prefect_dbt/cli/configs/bigquery.py
+++ b/src/integrations/prefect-dbt/prefect_dbt/cli/configs/bigquery.py
@@ -56,7 +56,7 @@ class BigQueryTargetConfigs(BaseTargetConfigs):
_block_type_name = "dbt CLI BigQuery Target Configs"
_logo_url = "https://images.ctfassets.net/gm98wzqotmnx/5zE9lxfzBHjw3tnEup4wWL/9a001902ed43a84c6c96d23b24622e19/dbt-bit_tm.png?h=250" # noqa
_description = "dbt CLI target configs containing credentials and settings, specific to BigQuery." # noqa
- _documentation_url = "https://prefecthq.github.io/prefect-dbt/cli/configs/bigquery/#prefect_dbt.cli.configs.bigquery.BigQueryTargetConfigs" # noqa
+ _documentation_url = "https://docs.prefect.io/integrations/prefect-dbt" # noqa
type: Literal["bigquery"] = Field(
default="bigquery", description="The type of target."
diff --git a/src/integrations/prefect-dbt/prefect_dbt/cli/configs/postgres.py b/src/integrations/prefect-dbt/prefect_dbt/cli/configs/postgres.py
index 19561ee25de2..a207465840d1 100644
--- a/src/integrations/prefect-dbt/prefect_dbt/cli/configs/postgres.py
+++ b/src/integrations/prefect-dbt/prefect_dbt/cli/configs/postgres.py
@@ -30,7 +30,7 @@ class PostgresTargetConfigs(BaseTargetConfigs):
_block_type_name = "dbt CLI Postgres Target Configs"
_logo_url = "https://images.ctfassets.net/gm98wzqotmnx/5zE9lxfzBHjw3tnEup4wWL/9a001902ed43a84c6c96d23b24622e19/dbt-bit_tm.png?h=250" # noqa
_description = "dbt CLI target configs containing credentials and settings specific to Postgres." # noqa
- _documentation_url = "https://prefecthq.github.io/prefect-dbt/cli/configs/postgres/#prefect_dbt.cli.configs.postgres.PostgresTargetConfigs" # noqa
+ _documentation_url = "https://docs.prefect.io/integrations/prefect-dbt" # noqa
type: Literal["postgres"] = Field(
default="postgres", description="The type of the target."
diff --git a/src/integrations/prefect-dbt/prefect_dbt/cli/configs/snowflake.py b/src/integrations/prefect-dbt/prefect_dbt/cli/configs/snowflake.py
index 124e5bd62ca9..3398f54a1bfd 100644
--- a/src/integrations/prefect-dbt/prefect_dbt/cli/configs/snowflake.py
+++ b/src/integrations/prefect-dbt/prefect_dbt/cli/configs/snowflake.py
@@ -64,7 +64,7 @@ class SnowflakeTargetConfigs(BaseTargetConfigs):
_block_type_name = "dbt CLI Snowflake Target Configs"
_logo_url = "https://images.ctfassets.net/gm98wzqotmnx/5zE9lxfzBHjw3tnEup4wWL/9a001902ed43a84c6c96d23b24622e19/dbt-bit_tm.png?h=250" # noqa
- _documentation_url = "https://prefecthq.github.io/prefect-dbt/cli/configs/snowflake/#prefect_dbt.cli.configs.snowflake.SnowflakeTargetConfigs" # noqa
+ _documentation_url = "https://docs.prefect.io/integrations/prefect-dbt" # noqa
type: Literal["snowflake"] = Field(
default="snowflake", description="The type of the target configs."
diff --git a/src/integrations/prefect-dbt/prefect_dbt/cli/credentials.py b/src/integrations/prefect-dbt/prefect_dbt/cli/credentials.py
index c6bfe0077162..5fa4e9cd9163 100644
--- a/src/integrations/prefect-dbt/prefect_dbt/cli/credentials.py
+++ b/src/integrations/prefect-dbt/prefect_dbt/cli/credentials.py
@@ -108,7 +108,7 @@ class DbtCliProfile(Block):
_block_type_name = "dbt CLI Profile"
_logo_url = "https://images.ctfassets.net/gm98wzqotmnx/5zE9lxfzBHjw3tnEup4wWL/9a001902ed43a84c6c96d23b24622e19/dbt-bit_tm.png?h=250" # noqa
- _documentation_url = "https://prefecthq.github.io/prefect-dbt/cli/credentials/#prefect_dbt.cli.credentials.DbtCliProfile" # noqa
+ _documentation_url = "https://docs.prefect.io/integrations/prefect-dbt" # noqa
name: str = Field(
default=..., description="Profile name used for populating profiles.yml."
diff --git a/src/integrations/prefect-dbt/prefect_dbt/cloud/credentials.py b/src/integrations/prefect-dbt/prefect_dbt/cloud/credentials.py
index a4a756207634..82d4eb179efa 100644
--- a/src/integrations/prefect-dbt/prefect_dbt/cloud/credentials.py
+++ b/src/integrations/prefect-dbt/prefect_dbt/cloud/credentials.py
@@ -61,7 +61,7 @@ def trigger_dbt_cloud_job_run_flow():
_block_type_name = "dbt Cloud Credentials"
_logo_url = "https://images.ctfassets.net/gm98wzqotmnx/5zE9lxfzBHjw3tnEup4wWL/9a001902ed43a84c6c96d23b24622e19/dbt-bit_tm.png?h=250" # noqa
- _documentation_url = "https://prefecthq.github.io/prefect-dbt/cloud/credentials/#prefect_dbt.cloud.credentials.DbtCloudCredentials" # noqa
+ _documentation_url = "https://docs.prefect.io/integrations/prefect-dbt" # noqa
api_key: SecretStr = Field(
default=...,
diff --git a/src/integrations/prefect-dbt/prefect_dbt/cloud/jobs.py b/src/integrations/prefect-dbt/prefect_dbt/cloud/jobs.py
index d1694f3d74bc..b493cc1e3bd9 100644
--- a/src/integrations/prefect-dbt/prefect_dbt/cloud/jobs.py
+++ b/src/integrations/prefect-dbt/prefect_dbt/cloud/jobs.py
@@ -1006,7 +1006,7 @@ def dbt_cloud_job_flow():
_block_type_name = "dbt Cloud Job"
_logo_url = "https://images.ctfassets.net/gm98wzqotmnx/5zE9lxfzBHjw3tnEup4wWL/9a001902ed43a84c6c96d23b24622e19/dbt-bit_tm.png?h=250" # noqa
- _documentation_url = "https://prefecthq.github.io/prefect-dbt/cloud/jobs/#prefect_dbt.cloud.jobs.DbtCloudJob" # noqa
+ _documentation_url = "https://docs.prefect.io/integrations/prefect-dbt" # noqa
dbt_cloud_credentials: DbtCloudCredentials = Field(
default=...,
diff --git a/src/integrations/prefect-dbt/pyproject.toml b/src/integrations/prefect-dbt/pyproject.toml
index 64b15b1a9fd5..32d6b9bd1d98 100644
--- a/src/integrations/prefect-dbt/pyproject.toml
+++ b/src/integrations/prefect-dbt/pyproject.toml
@@ -23,23 +23,23 @@ classifiers = [
"Topic :: Software Development :: Libraries",
]
dependencies = [
- "prefect>=3.0.0rc1",
+ "prefect>=3.0.0",
"dbt-core>=1.7.0",
- "prefect_shell>=0.3.0rc1",
+ "prefect_shell>=0.3.0",
"sgqlc>=16.0.0",
]
dynamic = ["version"]
[project.optional-dependencies]
-snowflake = ["prefect-snowflake>=0.28.0rc1", "dbt-snowflake"]
-bigquery = ["prefect-gcp[bigquery]>=0.6.0rc1", "dbt-bigquery"]
+snowflake = ["prefect-snowflake>=0.28.0", "dbt-snowflake"]
+bigquery = ["prefect-gcp[bigquery]>=0.6.0", "dbt-bigquery"]
postgres = ["prefect-sqlalchemy>=0.5.1", "dbt-postgres"]
all_extras = [
"dbt-bigquery",
"dbt-postgres",
"dbt-snowflake",
- "prefect-gcp[bigquery]>=0.6.0rc1",
- "prefect-snowflake>=0.28.0rc1",
+ "prefect-gcp[bigquery]>=0.6.0",
+ "prefect-snowflake>=0.28.0",
"prefect-sqlalchemy>=0.5.1",
]
dev = [
@@ -55,10 +55,10 @@ dev = [
"mypy",
"pillow",
"pre-commit",
- "prefect-gcp[bigquery]>=0.6.0rc1",
- "prefect-snowflake>=0.28.0rc1",
+ "prefect-gcp[bigquery]>=0.6.0",
+ "prefect-snowflake>=0.28.0",
"prefect-sqlalchemy>=0.5.1",
- "pytest",
+ "pytest >= 8.3",
"pytest-asyncio",
"pytest-env",
"pytest-xdist",
@@ -93,7 +93,6 @@ fail_under = 80
show_missing = true
[tool.pytest.ini_options]
+asyncio_default_fixture_loop_scope = "session"
asyncio_mode = "auto"
-env = [
- "PREFECT_TEST_MODE=1",
-]
+env = ["PREFECT_TEST_MODE=1"]
diff --git a/src/integrations/prefect-docker/prefect_docker/worker.py b/src/integrations/prefect-docker/prefect_docker/worker.py
index e529b123375d..24a57631046b 100644
--- a/src/integrations/prefect-docker/prefect_docker/worker.py
+++ b/src/integrations/prefect-docker/prefect_docker/worker.py
@@ -396,7 +396,7 @@ class DockerWorker(BaseWorker):
"Docker daemon."
)
_display_name = "Docker"
- _documentation_url = "https://prefecthq.github.io/prefect-docker/worker/"
+ _documentation_url = "https://docs.prefect.io/integrations/prefect-docker"
_logo_url = "https://images.ctfassets.net/gm98wzqotmnx/2IfXXfMq66mrzJBDFFCHTp/6d8f320d9e4fc4393f045673d61ab612/Moby-logo.png?h=250" # noqa
def __init__(
diff --git a/src/integrations/prefect-docker/pyproject.toml b/src/integrations/prefect-docker/pyproject.toml
index db89b60136cd..e947a46f52f9 100644
--- a/src/integrations/prefect-docker/pyproject.toml
+++ b/src/integrations/prefect-docker/pyproject.toml
@@ -22,11 +22,7 @@ classifiers = [
"Programming Language :: Python :: 3.12",
"Topic :: Software Development :: Libraries",
]
-dependencies = [
- "prefect>=3.0.0rc1",
- "docker>=6.1.1",
- "exceptiongroup",
-]
+dependencies = ["prefect>=3.0.0", "docker>=6.1.1", "exceptiongroup"]
dynamic = ["version"]
[project.optional-dependencies]
@@ -41,7 +37,7 @@ dev = [
"pillow",
"pre-commit",
"pytest-asyncio",
- "pytest",
+ "pytest >= 8.3",
"pytest-env",
"pytest-xdist",
]
@@ -74,6 +70,7 @@ fail_under = 80
show_missing = true
[tool.pytest.ini_options]
+asyncio_default_fixture_loop_scope = "session"
asyncio_mode = "auto"
env = ["PREFECT_TEST_MODE=1"]
filterwarnings = [
diff --git a/src/integrations/prefect-email/prefect_email/credentials.py b/src/integrations/prefect-email/prefect_email/credentials.py
index fa9e24db0484..1280d161dec1 100644
--- a/src/integrations/prefect-email/prefect_email/credentials.py
+++ b/src/integrations/prefect-email/prefect_email/credentials.py
@@ -97,7 +97,7 @@ class EmailServerCredentials(Block):
_block_type_name = "Email Server Credentials"
_logo_url = "https://cdn.sanity.io/images/3ugk85nk/production/82bc6ed16ca42a2252a5512c72233a253b8a58eb-250x250.png" # noqa
- _documentation_url = "https://prefecthq.github.io/prefect-email/credentials/#prefect_email.credentials.EmailServerCredentials" # noqa
+ _documentation_url = "https://docs.prefect.io/integrations/prefect-email" # noqa
username: Optional[str] = Field(
default=None,
diff --git a/src/integrations/prefect-email/pyproject.toml b/src/integrations/prefect-email/pyproject.toml
index 1cce0fee97e4..8a3c67d19fbe 100644
--- a/src/integrations/prefect-email/pyproject.toml
+++ b/src/integrations/prefect-email/pyproject.toml
@@ -22,7 +22,7 @@ classifiers = [
"Programming Language :: Python :: 3.12",
"Topic :: Software Development :: Libraries",
]
-dependencies = ["prefect>=3.0.0rc1"]
+dependencies = ["prefect>=3.0.0"]
dynamic = ["version"]
[project.optional-dependencies]
@@ -38,7 +38,7 @@ dev = [
"pillow",
"pre-commit",
"pytest-asyncio",
- "pytest",
+ "pytest >= 8.3",
"pytest-env",
"pytest-xdist",
]
@@ -71,7 +71,6 @@ fail_under = 80
show_missing = true
[tool.pytest.ini_options]
+asyncio_default_fixture_loop_scope = "session"
asyncio_mode = "auto"
-env = [
- "PREFECT_TEST_MODE=1",
-]
+env = ["PREFECT_TEST_MODE=1"]
diff --git a/src/integrations/prefect-gcp/README.md b/src/integrations/prefect-gcp/README.md
index 586fd4e082d7..f8e76ae31397 100644
--- a/src/integrations/prefect-gcp/README.md
+++ b/src/integrations/prefect-gcp/README.md
@@ -9,18 +9,12 @@
`prefect-gcp` makes it easy to leverage the capabilities of Google Cloud Platform (GCP) in your flows, featuring support for Vertex AI, Cloud Run, BigQuery, Cloud Storage, and Secret Manager.
-Visit the full docs [here](https://PrefectHQ.github.io/prefect-gcp).
+Visit the full docs [here](https://docs.prefect.io/integrations/prefect-gcp).
-### Installation
+## Installation
To start using `prefect-gcp`:
```bash
pip install prefect-gcp
```
-
-To install extras, see [here](https://prefecthq.github.io/prefect-gcp/#installation).
-
-### Contributing
-
-Thanks for thinking about chipping in! Check out this [step-by-step guide](https://prefecthq.github.io/prefect-gcp/#installation) on how to get started.
diff --git a/src/integrations/prefect-gcp/prefect_gcp/bigquery.py b/src/integrations/prefect-gcp/prefect_gcp/bigquery.py
index 5e0c3522ada9..0b69c0496838 100644
--- a/src/integrations/prefect-gcp/prefect_gcp/bigquery.py
+++ b/src/integrations/prefect-gcp/prefect_gcp/bigquery.py
@@ -560,7 +560,7 @@ class BigQueryWarehouse(DatabaseBlock):
_block_type_name = "BigQuery Warehouse"
_logo_url = "https://cdn.sanity.io/images/3ugk85nk/production/10424e311932e31c477ac2b9ef3d53cefbaad708-250x250.png" # noqa
- _documentation_url = "https://prefecthq.github.io/prefect-gcp/bigquery/#prefect_gcp.bigquery.BigQueryWarehouse" # noqa: E501
+ _documentation_url = "https://docs.prefect.io/integrations/prefect-gcp" # noqa: E501
gcp_credentials: GcpCredentials
fetch_size: int = Field(
diff --git a/src/integrations/prefect-gcp/prefect_gcp/cloud_storage.py b/src/integrations/prefect-gcp/prefect_gcp/cloud_storage.py
index 68ea9bdbace0..b7b6183c5e8c 100644
--- a/src/integrations/prefect-gcp/prefect_gcp/cloud_storage.py
+++ b/src/integrations/prefect-gcp/prefect_gcp/cloud_storage.py
@@ -581,7 +581,7 @@ class GcsBucket(WritableDeploymentStorage, WritableFileSystem, ObjectStorageBloc
_logo_url = "https://cdn.sanity.io/images/3ugk85nk/production/10424e311932e31c477ac2b9ef3d53cefbaad708-250x250.png" # noqa
_block_type_name = "GCS Bucket"
- _documentation_url = "https://prefecthq.github.io/prefect-gcp/cloud_storage/#prefect_gcp.cloud_storage.GcsBucket" # noqa: E501
+ _documentation_url = "https://docs.prefect.io/integrations/prefect-gcp" # noqa: E501
bucket: str = Field(..., description="Name of the bucket.")
gcp_credentials: GcpCredentials = Field(
diff --git a/src/integrations/prefect-gcp/prefect_gcp/credentials.py b/src/integrations/prefect-gcp/prefect_gcp/credentials.py
index 33703fe0cf72..45165c8c63e8 100644
--- a/src/integrations/prefect-gcp/prefect_gcp/credentials.py
+++ b/src/integrations/prefect-gcp/prefect_gcp/credentials.py
@@ -124,7 +124,7 @@ class GcpCredentials(CredentialsBlock):
_logo_url = "https://cdn.sanity.io/images/3ugk85nk/production/10424e311932e31c477ac2b9ef3d53cefbaad708-250x250.png" # noqa
_block_type_name = "GCP Credentials"
- _documentation_url = "https://prefecthq.github.io/prefect-gcp/credentials/#prefect_gcp.credentials.GcpCredentials" # noqa: E501
+ _documentation_url = "https://docs.prefect.io/integrations/prefect-gcp" # noqa: E501
service_account_file: Optional[Path] = Field(
default=None, description="Path to the service account JSON keyfile."
diff --git a/src/integrations/prefect-gcp/prefect_gcp/secret_manager.py b/src/integrations/prefect-gcp/prefect_gcp/secret_manager.py
index 6f90f8f4ec98..edd90a90f337 100644
--- a/src/integrations/prefect-gcp/prefect_gcp/secret_manager.py
+++ b/src/integrations/prefect-gcp/prefect_gcp/secret_manager.py
@@ -307,7 +307,7 @@ class GcpSecret(SecretBlock):
"""
_logo_url = "https://cdn.sanity.io/images/3ugk85nk/production/10424e311932e31c477ac2b9ef3d53cefbaad708-250x250.png" # noqa
- _documentation_url = "https://prefecthq.github.io/prefect-gcp/secret_manager/#prefect_gcp.secret_manager.GcpSecret" # noqa: E501
+ _documentation_url = "https://docs.prefect.io/integrations/prefect-gcp" # noqa: E501
gcp_credentials: GcpCredentials
secret_name: str = Field(default=..., description="Name of the secret to manage.")
diff --git a/src/integrations/prefect-gcp/prefect_gcp/workers/cloud_run.py b/src/integrations/prefect-gcp/prefect_gcp/workers/cloud_run.py
index bc20017ee06e..298386ef168d 100644
--- a/src/integrations/prefect-gcp/prefect_gcp/workers/cloud_run.py
+++ b/src/integrations/prefect-gcp/prefect_gcp/workers/cloud_run.py
@@ -541,7 +541,7 @@ class CloudRunWorker(BaseWorker):
"a Google Cloud Platform account."
)
_display_name = "Google Cloud Run"
- _documentation_url = "https://prefecthq.github.io/prefect-gcp/cloud_run_worker/"
+ _documentation_url = "https://docs.prefect.io/integrations/prefect-gcp"
_logo_url = "https://cdn.sanity.io/images/3ugk85nk/production/10424e311932e31c477ac2b9ef3d53cefbaad708-250x250.png" # noqa
def _create_job_error(self, exc, configuration):
diff --git a/src/integrations/prefect-gcp/prefect_gcp/workers/cloud_run_v2.py b/src/integrations/prefect-gcp/prefect_gcp/workers/cloud_run_v2.py
index c7836d324695..0d3b6989378a 100644
--- a/src/integrations/prefect-gcp/prefect_gcp/workers/cloud_run_v2.py
+++ b/src/integrations/prefect-gcp/prefect_gcp/workers/cloud_run_v2.py
@@ -211,7 +211,7 @@ def _populate_env(self):
]
envs.extend(envs_from_secrets)
- self.job_body["template"]["template"]["containers"][0]["env"] = envs
+ self.job_body["template"]["template"]["containers"][0]["env"].extend(envs)
def _configure_cloudsql_volumes(self):
"""
@@ -226,7 +226,10 @@ def _configure_cloudsql_volumes(self):
if "volumes" not in template:
template["volumes"] = []
template["volumes"].append(
- {"name": "cloudsql", "cloudSqlInstance": self.cloudsql_instances}
+ {
+ "name": "cloudsql",
+ "cloudSqlInstance": {"instances": self.cloudsql_instances},
+ }
)
if "volumeMounts" not in containers[0]:
containers[0]["volumeMounts"] = []
@@ -469,7 +472,7 @@ class CloudRunWorkerV2(BaseWorker):
job_configuration_variables = CloudRunWorkerV2Variables
_description = "Execute flow runs within containers on Google Cloud Run (V2 API). Requires a Google Cloud Platform account." # noqa
_display_name = "Google Cloud Run V2"
- _documentation_url = "https://prefecthq.github.io/prefect-gcp/worker_v2/"
+ _documentation_url = "https://docs.prefect.io/integrations/prefect-gcp"
_logo_url = "https://images.ctfassets.net/gm98wzqotmnx/4SpnOBvMYkHp6z939MDKP6/549a91bc1ce9afd4fb12c68db7b68106/social-icon-google-cloud-1200-630.png?h=250" # noqa
async def run(
diff --git a/src/integrations/prefect-gcp/prefect_gcp/workers/vertex.py b/src/integrations/prefect-gcp/prefect_gcp/workers/vertex.py
index add6ec12798a..d11efff4c3d0 100644
--- a/src/integrations/prefect-gcp/prefect_gcp/workers/vertex.py
+++ b/src/integrations/prefect-gcp/prefect_gcp/workers/vertex.py
@@ -372,7 +372,7 @@ class VertexAIWorker(BaseWorker):
"a Google Cloud Platform account."
)
_display_name = "Google Vertex AI"
- _documentation_url = "https://prefecthq.github.io/prefect-gcp/vertex_worker/"
+ _documentation_url = "https://docs.prefect.io/integrations/prefect-gcp" # noqa
_logo_url = "https://cdn.sanity.io/images/3ugk85nk/production/10424e311932e31c477ac2b9ef3d53cefbaad708-250x250.png" # noqa
async def run(
diff --git a/src/integrations/prefect-gcp/pyproject.toml b/src/integrations/prefect-gcp/pyproject.toml
index 69f90adbd206..21c9e5b1e855 100644
--- a/src/integrations/prefect-gcp/pyproject.toml
+++ b/src/integrations/prefect-gcp/pyproject.toml
@@ -23,7 +23,7 @@ classifiers = [
"Topic :: Software Development :: Libraries",
]
dependencies = [
- "prefect>=3.0.0rc1",
+ "prefect>=3.0.0",
"google-api-python-client>=2.20.0",
"google-cloud-storage>=2.0.0",
"tenacity>=8.0.0",
@@ -61,7 +61,7 @@ dev = [
"pillow",
"pre-commit",
"pyarrow",
- "pytest",
+ "pytest >= 8.3",
"pytest-asyncio",
"pytest-env",
"pytest-xdist",
@@ -95,10 +95,9 @@ fail_under = 80
show_missing = true
[tool.pytest.ini_options]
+asyncio_default_fixture_loop_scope = "session"
asyncio_mode = "auto"
-env = [
- "PREFECT_TEST_MODE=1",
-]
+env = ["PREFECT_TEST_MODE=1"]
filterwarnings = [
"ignore:Type google._upb._message.* uses PyType_Spec with a metaclass that has custom tp_new. This is deprecated and will no longer be allowed in Python 3.14:DeprecationWarning",
]
diff --git a/src/integrations/prefect-gcp/tests/test_cloud_run_worker_v2.py b/src/integrations/prefect-gcp/tests/test_cloud_run_worker_v2.py
index 597edd0352a0..5b422df5c6be 100644
--- a/src/integrations/prefect-gcp/tests/test_cloud_run_worker_v2.py
+++ b/src/integrations/prefect-gcp/tests/test_cloud_run_worker_v2.py
@@ -120,6 +120,29 @@ def test_populate_env_with_secrets(self, cloud_run_worker_v2_job_config):
},
]
+ def test_populate_env_with_existing_envs(self, cloud_run_worker_v2_job_config):
+ cloud_run_worker_v2_job_config.job_body["template"]["template"]["containers"][
+ 0
+ ]["env"] = [{"name": "ENV0", "value": "VALUE0"}]
+ cloud_run_worker_v2_job_config.env_from_secrets = {
+ "SECRET_ENV1": SecretKeySelector(secret="SECRET1", version="latest")
+ }
+ cloud_run_worker_v2_job_config._populate_env()
+
+ assert cloud_run_worker_v2_job_config.job_body["template"]["template"][
+ "containers"
+ ][0]["env"] == [
+ {"name": "ENV0", "value": "VALUE0"},
+ {"name": "ENV1", "value": "VALUE1"},
+ {"name": "ENV2", "value": "VALUE2"},
+ {
+ "name": "SECRET_ENV1",
+ "valueSource": {
+ "secretKeyRef": {"secret": "SECRET1", "version": "latest"}
+ },
+ },
+ ]
+
def test_populate_image_if_not_present(self, cloud_run_worker_v2_job_config):
cloud_run_worker_v2_job_config._populate_image_if_not_present()
@@ -231,7 +254,7 @@ def test_configure_cloudsql_volumes_preserves_existing_volumes(
assert template["volumes"][0] == {"name": "existing-volume", "emptyDir": {}}
assert template["volumes"][1] == {
"name": "cloudsql",
- "cloudSqlInstance": ["project:region:instance1"],
+ "cloudSqlInstance": {"instances": ["project:region:instance1"]},
}
assert len(template["containers"][0]["volumeMounts"]) == 2
@@ -261,7 +284,7 @@ class MockFlowRun:
assert any(
vol["name"] == "cloudsql"
- and vol["cloudSqlInstance"] == ["project:region:instance1"]
+ and vol["cloudSqlInstance"]["instances"] == ["project:region:instance1"]
for vol in template["volumes"]
)
assert any(
diff --git a/src/integrations/prefect-github/prefect_github/credentials.py b/src/integrations/prefect-github/prefect_github/credentials.py
index 24fe819be558..1e5971a6e9d3 100644
--- a/src/integrations/prefect-github/prefect_github/credentials.py
+++ b/src/integrations/prefect-github/prefect_github/credentials.py
@@ -25,7 +25,7 @@ class GitHubCredentials(CredentialsBlock):
_block_type_name = "GitHub Credentials"
_logo_url = "https://cdn.sanity.io/images/3ugk85nk/production/41971cfecfea5f79ff334164f06ecb34d1038dd4-250x250.png" # noqa
- _documentation_url = "https://prefecthq.github.io/prefect-github/credentials/#prefect_github.credentials.GitHubCredentials" # noqa
+ _documentation_url = "https://docs.prefect.io/integrations/prefect-github"
token: Optional[SecretStr] = Field(
default=None, description="A GitHub personal access token (PAT)."
diff --git a/src/integrations/prefect-github/prefect_github/repository.py b/src/integrations/prefect-github/prefect_github/repository.py
index 4742bc2d8117..57551f2909f3 100644
--- a/src/integrations/prefect-github/prefect_github/repository.py
+++ b/src/integrations/prefect-github/prefect_github/repository.py
@@ -39,7 +39,7 @@ class GitHubRepository(ReadableDeploymentStorage):
_block_type_name = "GitHub Repository"
_logo_url = "https://cdn.sanity.io/images/3ugk85nk/production/41971cfecfea5f79ff334164f06ecb34d1038dd4-250x250.png" # noqa: E501
- _documentation_url = "https://prefecthq.github.io/prefect-github/repository/#prefect_github.repository.GitHubRepository" # noqa
+ _documentation_url = "https://docs.prefect.io/integrations/prefect-github" # noqa
repository_url: str = Field(
default=...,
diff --git a/src/integrations/prefect-github/pyproject.toml b/src/integrations/prefect-github/pyproject.toml
index c7ded155c8ce..3d5259993096 100644
--- a/src/integrations/prefect-github/pyproject.toml
+++ b/src/integrations/prefect-github/pyproject.toml
@@ -4,10 +4,7 @@ build-backend = "setuptools.build_meta"
[project]
name = "prefect-github"
-dependencies = [
- "sgqlc>=15.0",
- "prefect>=3.0.0rc1",
-]
+dependencies = ["sgqlc>=15.0", "prefect>=3.0.0"]
dynamic = ["version"]
description = "Prefect integrations interacting with GitHub"
readme = "README.md"
@@ -40,7 +37,7 @@ dev = [
"pillow",
"pre-commit",
"pytest-asyncio",
- "pytest",
+ "pytest >= 8.3",
"pytest-env",
"pytest-xdist",
]
@@ -73,7 +70,6 @@ fail_under = 80
show_missing = true
[tool.pytest.ini_options]
+asyncio_default_fixture_loop_scope = "session"
asyncio_mode = "auto"
-env = [
- "PREFECT_TEST_MODE=1",
-]
+env = ["PREFECT_TEST_MODE=1"]
diff --git a/src/integrations/prefect-gitlab/pyproject.toml b/src/integrations/prefect-gitlab/pyproject.toml
index 2e57871fd70d..516b1af20e5a 100644
--- a/src/integrations/prefect-gitlab/pyproject.toml
+++ b/src/integrations/prefect-gitlab/pyproject.toml
@@ -22,11 +22,7 @@ classifiers = [
"Programming Language :: Python :: 3.12",
"Topic :: Software Development :: Libraries",
]
-dependencies = [
- "prefect>=3.0.0rc1",
- "python-gitlab>=3.12.0",
- "tenacity>=8.2.3",
-]
+dependencies = ["prefect>=3.0.0", "python-gitlab>=3.12.0", "tenacity>=8.2.3"]
dynamic = ["version"]
[project.optional-dependencies]
@@ -42,7 +38,7 @@ dev = [
"pillow",
"pre-commit",
"pytest-asyncio",
- "pytest",
+ "pytest >= 8.3",
"pytest-env",
"pytest-xdist",
]
@@ -75,7 +71,6 @@ fail_under = 80
show_missing = true
[tool.pytest.ini_options]
+asyncio_default_fixture_loop_scope = "session"
asyncio_mode = "auto"
-env = [
- "PREFECT_TEST_MODE=1",
-]
+env = ["PREFECT_TEST_MODE=1"]
diff --git a/src/integrations/prefect-kubernetes/prefect_kubernetes/credentials.py b/src/integrations/prefect-kubernetes/prefect_kubernetes/credentials.py
index 802edc9b5007..d753317824d2 100644
--- a/src/integrations/prefect-kubernetes/prefect_kubernetes/credentials.py
+++ b/src/integrations/prefect-kubernetes/prefect_kubernetes/credentials.py
@@ -52,7 +52,7 @@ class KubernetesClusterConfig(Block):
_block_type_name = "Kubernetes Cluster Config"
_logo_url = "https://cdn.sanity.io/images/3ugk85nk/production/2d0b896006ad463b49c28aaac14f31e00e32cfab-250x250.png"
- _documentation_url = "https://prefecthq.github.io/prefect-kubernetes/credentials/#prefect_kubernetes.credentials.KubernetesClusterConfig" # noqa
+ _documentation_url = "https://docs.prefect.io/integrations/prefect-kubernetes" # noqa
config: Dict = Field(
default=..., description="The entire contents of a kubectl config file."
)
@@ -142,7 +142,7 @@ class KubernetesCredentials(Block):
_block_type_name = "Kubernetes Credentials"
_logo_url = "https://cdn.sanity.io/images/3ugk85nk/production/2d0b896006ad463b49c28aaac14f31e00e32cfab-250x250.png" # noqa
- _documentation_url = "https://prefecthq.github.io/prefect-kubernetes/credentials/#prefect_kubernetes.credentials.KubernetesCredentials" # noqa
+ _documentation_url = "https://docs.prefect.io/integrations/prefect-kubernetes" # noqa
cluster_config: Optional[KubernetesClusterConfig] = None
diff --git a/src/integrations/prefect-kubernetes/prefect_kubernetes/jobs.py b/src/integrations/prefect-kubernetes/prefect_kubernetes/jobs.py
index d3b9ec828429..5650ca551f7b 100644
--- a/src/integrations/prefect-kubernetes/prefect_kubernetes/jobs.py
+++ b/src/integrations/prefect-kubernetes/prefect_kubernetes/jobs.py
@@ -540,7 +540,7 @@ class KubernetesJob(JobBlock):
_block_type_name = "Kubernetes Job"
_block_type_slug = "k8s-job"
_logo_url = "https://cdn.sanity.io/images/3ugk85nk/production/2d0b896006ad463b49c28aaac14f31e00e32cfab-250x250.png" # noqa: E501
- _documentation_url = "https://prefecthq.github.io/prefect-kubernetes/jobs/#prefect_kubernetes.jobs.KubernetesJob" # noqa
+ _documentation_url = "https://docs.prefect.io/integrations/prefect-kubernetes" # noqa
@sync_compatible
async def trigger(self):
diff --git a/src/integrations/prefect-kubernetes/prefect_kubernetes/settings.py b/src/integrations/prefect-kubernetes/prefect_kubernetes/settings.py
new file mode 100644
index 000000000000..4dbada798b57
--- /dev/null
+++ b/src/integrations/prefect-kubernetes/prefect_kubernetes/settings.py
@@ -0,0 +1,53 @@
+from typing import Optional
+
+from pydantic import AliasChoices, AliasPath, Field
+
+from prefect.settings.base import PrefectBaseSettings, _build_settings_config
+
+
+class KubernetesWorkerSettings(PrefectBaseSettings):
+ model_config = _build_settings_config(("integrations", "kubernetes", "worker"))
+
+ api_key_secret_name: Optional[str] = Field(
+ default=None,
+ description="The name of the secret the worker's API key is stored in.",
+ )
+
+ create_secret_for_api_key: bool = Field(
+ default=False,
+ description="If `True`, the worker will create a secret in the same namespace as created Kubernetes jobs to store the Prefect API key.",
+ validation_alias=AliasChoices(
+ AliasPath("create_secret_for_api_key"),
+ "prefect_integrations_kubernetes_worker_create_secret_for_api_key",
+ "prefect_kubernetes_worker_store_prefect_api_in_secret",
+ ),
+ )
+
+ add_tcp_keepalive: bool = Field(
+ default=True,
+ description="If `True`, the worker will add TCP keepalive to the Kubernetes client.",
+ validation_alias=AliasChoices(
+ AliasPath("add_tcp_keepalive"),
+ "prefect_integrations_kubernetes_worker_add_tcp_keepalive",
+ "prefect_kubernetes_worker_add_tcp_keepalive",
+ ),
+ )
+
+
+class KubernetesSettings(PrefectBaseSettings):
+ model_config = _build_settings_config(("integrations", "kubernetes"))
+
+ cluster_uid: Optional[str] = Field(
+ default=None,
+ description="A unique identifier for the current cluster being used.",
+ validation_alias=AliasChoices(
+ AliasPath("cluster_uid"),
+ "prefect_integrations_kubernetes_cluster_uid",
+ "prefect_kubernetes_cluster_uid",
+ ),
+ )
+
+ worker: KubernetesWorkerSettings = Field(
+ description="Settings for controlling Kubernetes worker behavior.",
+ default_factory=KubernetesWorkerSettings,
+ )
diff --git a/src/integrations/prefect-kubernetes/prefect_kubernetes/worker.py b/src/integrations/prefect-kubernetes/prefect_kubernetes/worker.py
index e51e66667fe8..0d3f58637d66 100644
--- a/src/integrations/prefect-kubernetes/prefect_kubernetes/worker.py
+++ b/src/integrations/prefect-kubernetes/prefect_kubernetes/worker.py
@@ -14,11 +14,11 @@
### Securing your Prefect Cloud API key
If you are using Prefect Cloud and would like to pass your Prefect Cloud API key to
created jobs via a Kubernetes secret, set the
-`PREFECT_KUBERNETES_WORKER_STORE_PREFECT_API_IN_SECRET` environment variable before
+`PREFECT_INTEGRATIONS_KUBERNETES_WORKER_CREATE_SECRET_FOR_API_KEY` environment variable before
starting your worker:
```bash
-export PREFECT_KUBERNETES_WORKER_STORE_PREFECT_API_IN_SECRET="true"
+export PREFECT_INTEGRATIONS_KUBERNETES_WORKER_CREATE_SECRET_FOR_API_KEY="true"
prefect worker start --pool 'my-work-pool' --type kubernetes
```
@@ -105,7 +105,6 @@
import enum
import json
import logging
-import os
import shlex
from contextlib import asynccontextmanager
from datetime import datetime
@@ -160,6 +159,7 @@
)
from prefect_kubernetes.credentials import KubernetesClusterConfig
from prefect_kubernetes.events import KubernetesEventsReplicator
+from prefect_kubernetes.settings import KubernetesSettings
from prefect_kubernetes.utilities import (
KeepAliveClientRequest,
_slugify_label_key,
@@ -566,7 +566,7 @@ class KubernetesWorker(BaseWorker):
"Kubernetes cluster."
)
_display_name = "Kubernetes"
- _documentation_url = "https://prefecthq.github.io/prefect-kubernetes/worker/"
+ _documentation_url = "https://docs.prefect.io/integrations/prefect-kubernetes"
_logo_url = "https://cdn.sanity.io/images/3ugk85nk/production/2d0b896006ad463b49c28aaac14f31e00e32cfab-250x250.png" # noqa
def __init__(self, *args, **kwargs):
@@ -650,6 +650,7 @@ async def _get_configured_kubernetes_client(
Returns a configured Kubernetes client.
"""
client = None
+ settings = KubernetesSettings()
if configuration.cluster_config:
config_dict = configuration.cluster_config.config
@@ -667,9 +668,7 @@ async def _get_configured_kubernetes_client(
# If in-cluster config fails, load the local kubeconfig
client = await config.new_client_from_config()
- if os.environ.get(
- "PREFECT_KUBERNETES_WORKER_ADD_TCP_KEEPALIVE", "TRUE"
- ).strip().lower() in ("true", "1"):
+ if settings.worker.add_tcp_keepalive:
client.rest_client.pool_manager._request_class = KeepAliveClientRequest
try:
@@ -678,7 +677,10 @@ async def _get_configured_kubernetes_client(
await client.close()
async def _replace_api_key_with_secret(
- self, configuration: KubernetesWorkerJobConfiguration, client: "ApiClient"
+ self,
+ configuration: KubernetesWorkerJobConfiguration,
+ client: "ApiClient",
+ secret_name: Optional[str] = None,
):
"""Replaces the PREFECT_API_KEY environment variable with a Kubernetes secret"""
manifest_env = configuration.job_manifest["spec"]["template"]["spec"][
@@ -693,7 +695,7 @@ async def _replace_api_key_with_secret(
{},
)
api_key = manifest_api_key_env.get("value")
- if api_key:
+ if api_key and not secret_name:
secret_name = f"prefect-{_slugify_name(self.name)}-api-key"
secret = await self._upsert_secret(
name=secret_name,
@@ -706,6 +708,7 @@ async def _replace_api_key_with_secret(
self._created_secrets[
(secret.metadata.name, secret.metadata.namespace)
] = configuration
+ if secret_name:
new_api_env_entry = {
"name": "PREFECT_API_KEY",
"valueFrom": {"secretKeyRef": {"name": secret_name, "key": "value"}},
@@ -733,9 +736,14 @@ async def _create_job(
"""
Creates a Kubernetes job from a job manifest.
"""
- if os.environ.get(
- "PREFECT_KUBERNETES_WORKER_STORE_PREFECT_API_IN_SECRET", ""
- ).strip().lower() in ("true", "1"):
+ settings = KubernetesSettings()
+ if settings.worker.api_key_secret_name:
+ await self._replace_api_key_with_secret(
+ configuration=configuration,
+ client=client,
+ secret_name=settings.worker.api_key_secret_name,
+ )
+ elif settings.worker.create_secret_for_api_key:
await self._replace_api_key_with_secret(
configuration=configuration, client=client
)
@@ -838,8 +846,9 @@ async def _get_cluster_uid(self, client: "ApiClient") -> str:
See https://github.com/kubernetes/kubernetes/issues/44954
"""
+ settings = KubernetesSettings()
# Default to an environment variable
- env_cluster_uid = os.environ.get("PREFECT_KUBERNETES_CLUSTER_UID")
+ env_cluster_uid = settings.cluster_uid
if env_cluster_uid:
return env_cluster_uid
diff --git a/src/integrations/prefect-kubernetes/pyproject.toml b/src/integrations/prefect-kubernetes/pyproject.toml
index 37245b5e0fec..427f4c839aea 100644
--- a/src/integrations/prefect-kubernetes/pyproject.toml
+++ b/src/integrations/prefect-kubernetes/pyproject.toml
@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
[project]
dependencies = [
- "prefect>=3.0.0rc1",
+ "prefect>=3.1.0",
"kubernetes-asyncio>=29.0.0",
"tenacity>=8.2.3",
"exceptiongroup",
@@ -45,7 +45,7 @@ dev = [
"pillow",
"pre-commit",
"pytest-asyncio",
- "pytest",
+ "pytest >= 8.3",
"pytest-env",
"pytest-timeout",
"pytest-xdist",
@@ -79,6 +79,7 @@ fail_under = 80
show_missing = true
[tool.pytest.ini_options]
+asyncio_default_fixture_loop_scope = "session"
asyncio_mode = "auto"
env = ["PREFECT_TEST_MODE=1"]
timeout = 30
diff --git a/src/integrations/prefect-kubernetes/tests/test_settings.py b/src/integrations/prefect-kubernetes/tests/test_settings.py
new file mode 100644
index 000000000000..1f808d5c005a
--- /dev/null
+++ b/src/integrations/prefect-kubernetes/tests/test_settings.py
@@ -0,0 +1,111 @@
+import os
+
+import toml
+from prefect_kubernetes.settings import KubernetesSettings
+
+
+def test_set_values_via_environment_variables(monkeypatch):
+ monkeypatch.setenv(
+ "PREFECT_INTEGRATIONS_KUBERNETES_WORKER_API_KEY_SECRET_NAME", "test-secret"
+ )
+ monkeypatch.setenv(
+ "PREFECT_INTEGRATIONS_KUBERNETES_WORKER_CREATE_SECRET_FOR_API_KEY", "true"
+ )
+ monkeypatch.setenv(
+ "PREFECT_INTEGRATIONS_KUBERNETES_WORKER_ADD_TCP_KEEPALIVE", "false"
+ )
+ monkeypatch.setenv(
+ "PREFECT_INTEGRATIONS_KUBERNETES_CLUSTER_UID", "test-cluster-uid"
+ )
+
+ settings = KubernetesSettings()
+
+ assert settings.worker.api_key_secret_name == "test-secret"
+ assert settings.worker.create_secret_for_api_key is True
+ assert settings.worker.add_tcp_keepalive is False
+ assert settings.cluster_uid == "test-cluster-uid"
+
+
+def test_set_values_via_dot_env_file(tmp_path):
+ dot_env_path = tmp_path / ".env"
+ with open(dot_env_path, "w") as f:
+ f.write(
+ "PREFECT_INTEGRATIONS_KUBERNETES_WORKER_API_KEY_SECRET_NAME=test-secret\n"
+ "PREFECT_INTEGRATIONS_KUBERNETES_WORKER_CREATE_SECRET_FOR_API_KEY=true\n"
+ "PREFECT_INTEGRATIONS_KUBERNETES_WORKER_ADD_TCP_KEEPALIVE=false\n"
+ "PREFECT_INTEGRATIONS_KUBERNETES_CLUSTER_UID=test-cluster-uid\n"
+ )
+
+ original_dir = os.getcwd()
+ try:
+ os.chdir(tmp_path)
+ settings = KubernetesSettings()
+ finally:
+ os.chdir(original_dir)
+
+ assert settings.worker.api_key_secret_name == "test-secret"
+ assert settings.worker.create_secret_for_api_key is True
+ assert settings.worker.add_tcp_keepalive is False
+ assert settings.cluster_uid == "test-cluster-uid"
+
+
+def test_set_values_via_prefect_toml_file(tmp_path):
+ toml_path = tmp_path / "prefect.toml"
+ toml_data = {
+ "integrations": {
+ "kubernetes": {
+ "worker": {
+ "api_key_secret_name": "test-secret",
+ "create_secret_for_api_key": True,
+ "add_tcp_keepalive": False,
+ },
+ "cluster_uid": "test-cluster-uid",
+ },
+ },
+ }
+ toml_path.write_text(toml.dumps(toml_data))
+
+ original_dir = os.getcwd()
+ try:
+ os.chdir(tmp_path)
+ settings = KubernetesSettings()
+ finally:
+ os.chdir(original_dir)
+
+ assert settings.worker.api_key_secret_name == "test-secret"
+ assert settings.worker.create_secret_for_api_key is True
+ assert settings.worker.add_tcp_keepalive is False
+ assert settings.cluster_uid == "test-cluster-uid"
+
+
+def test_set_values_via_pyproject_toml_file(tmp_path):
+ pyproject_toml_path = tmp_path / "pyproject.toml"
+ pyproject_toml_data = {
+ "tool": {
+ "prefect": {
+ "integrations": {
+ "kubernetes": {
+ "cluster_uid": "test-cluster-uid",
+ "worker": {
+ "api_key_secret_name": "test-secret",
+ "create_secret_for_api_key": True,
+ "add_tcp_keepalive": False,
+ },
+ },
+ },
+ },
+ },
+ }
+ pyproject_toml_path.write_text(toml.dumps(pyproject_toml_data))
+
+ original_dir = os.getcwd()
+ try:
+ os.chdir(tmp_path)
+ settings = KubernetesSettings()
+ finally:
+ os.chdir(original_dir)
+
+ assert settings.worker.api_key_secret_name == "test-secret"
+ assert settings.worker.create_secret_for_api_key is True
+ assert settings.worker.add_tcp_keepalive is False
+ assert settings.cluster_uid == "test-cluster-uid"
diff --git a/src/integrations/prefect-kubernetes/tests/test_worker.py b/src/integrations/prefect-kubernetes/tests/test_worker.py
index 993841e0c23c..04deb4ce2c5a 100644
--- a/src/integrations/prefect-kubernetes/tests/test_worker.py
+++ b/src/integrations/prefect-kubernetes/tests/test_worker.py
@@ -188,7 +188,17 @@ async def mock_stream(*args, **kwargs):
@pytest.fixture
def enable_store_api_key_in_secret(monkeypatch):
- monkeypatch.setenv("PREFECT_KUBERNETES_WORKER_STORE_PREFECT_API_IN_SECRET", "true")
+ monkeypatch.setenv(
+ "PREFECT_INTEGRATIONS_KUBERNETES_WORKER_CREATE_SECRET_FOR_API_KEY", "true"
+ )
+
+
+@pytest.fixture
+def mock_api_key_secret_name(monkeypatch):
+ monkeypatch.setenv(
+ "PREFECT_INTEGRATIONS_KUBERNETES_WORKER_API_KEY_SECRET_NAME", "test-secret"
+ )
+ return "test-secret"
from_template_and_values_cases = [
@@ -1609,6 +1619,104 @@ async def test_store_api_key_in_existing_secret(
),
)
+ async def test_use_existing_secret_name(
+ self,
+ flow_run,
+ mock_core_client,
+ mock_watch,
+ mock_pods_stream_that_returns_running_pod,
+ mock_batch_client,
+ mock_api_key_secret_name,
+ ):
+ mock_watch.return_value.stream = mock_pods_stream_that_returns_running_pod
+
+ configuration = await KubernetesWorkerJobConfiguration.from_template_and_values(
+ KubernetesWorker.get_default_base_job_template(), {"image": "foo"}
+ )
+ with temporary_settings(updates={PREFECT_API_KEY: "fake"}):
+ async with KubernetesWorker(work_pool_name="test") as k8s_worker:
+ mock_core_client.return_value.read_namespaced_secret.return_value = (
+ V1Secret(
+ api_version="v1",
+ kind="Secret",
+ metadata=V1ObjectMeta(
+ name=f"prefect-{_slugify_name(k8s_worker.name)}-api-key",
+ namespace=configuration.namespace,
+ ),
+ data={
+ "value": base64.b64encode("fake".encode("utf-8")).decode(
+ "utf-8"
+ )
+ },
+ )
+ )
+
+ configuration.prepare_for_flow_run(flow_run=flow_run)
+ await k8s_worker.run(flow_run, configuration)
+ mock_batch_client.return_value.create_namespaced_job.assert_called_once()
+ env = mock_batch_client.return_value.create_namespaced_job.call_args[0][
+ 1
+ ]["spec"]["template"]["spec"]["containers"][0]["env"]
+ assert {
+ "name": "PREFECT_API_KEY",
+ "valueFrom": {
+ "secretKeyRef": {
+ "name": mock_api_key_secret_name,
+ "key": "value",
+ }
+ },
+ } in env
+
+ async def test_existing_secret_name_takes_precedence(
+ self,
+ flow_run,
+ mock_core_client,
+ mock_watch,
+ mock_pods_stream_that_returns_running_pod,
+ mock_batch_client,
+ mock_api_key_secret_name,
+ enable_store_api_key_in_secret,
+ ):
+ mock_watch.return_value.stream = mock_pods_stream_that_returns_running_pod
+
+ configuration = await KubernetesWorkerJobConfiguration.from_template_and_values(
+ KubernetesWorker.get_default_base_job_template(), {"image": "foo"}
+ )
+ with temporary_settings(updates={PREFECT_API_KEY: "fake"}):
+ async with KubernetesWorker(work_pool_name="test") as k8s_worker:
+ mock_core_client.return_value.read_namespaced_secret.return_value = (
+ V1Secret(
+ api_version="v1",
+ kind="Secret",
+ metadata=V1ObjectMeta(
+ name=f"prefect-{_slugify_name(k8s_worker.name)}-api-key",
+ namespace=configuration.namespace,
+ ),
+ data={
+ "value": base64.b64encode("fake".encode("utf-8")).decode(
+ "utf-8"
+ )
+ },
+ )
+ )
+
+ configuration.prepare_for_flow_run(flow_run=flow_run)
+ await k8s_worker.run(flow_run, configuration)
+ mock_batch_client.return_value.create_namespaced_job.assert_called_once()
+ env = mock_batch_client.return_value.create_namespaced_job.call_args[0][
+ 1
+ ]["spec"]["template"]["spec"]["containers"][0]["env"]
+ assert {
+ "name": "PREFECT_API_KEY",
+ "valueFrom": {
+ "secretKeyRef": {
+ "name": mock_api_key_secret_name,
+ "key": "value",
+ }
+ },
+ } in env
+ mock_core_client.return_value.replace_namespaced_secret.assert_not_called()
+
async def test_create_job_failure(
self,
flow_run,
diff --git a/src/integrations/prefect-ray/pyproject.toml b/src/integrations/prefect-ray/pyproject.toml
index f6cc61d94e93..1205e6ea1f99 100644
--- a/src/integrations/prefect-ray/pyproject.toml
+++ b/src/integrations/prefect-ray/pyproject.toml
@@ -22,10 +22,7 @@ classifiers = [
"Programming Language :: Python :: 3.12",
"Topic :: Software Development :: Libraries",
]
-dependencies = [
- "prefect>=3.0.0rc1",
- "ray[default]>=2.0.0",
-]
+dependencies = ["prefect>=3.0.0", "ray[default]>=2.0.0"]
dynamic = ["version"]
[project.optional-dependencies]
@@ -41,7 +38,7 @@ dev = [
"pillow",
"pre-commit",
"pytest-asyncio",
- "pytest",
+ "pytest >= 8.3",
"pytest-asyncio>=0.18.2,!=0.22.0,<0.23.0",
"pytest-env",
"pytest-xdist",
@@ -75,7 +72,6 @@ fail_under = 80
show_missing = true
[tool.pytest.ini_options]
+asyncio_default_fixture_loop_scope = "session"
asyncio_mode = "auto"
-env = [
- "PREFECT_TEST_MODE=1",
-]
+env = ["PREFECT_TEST_MODE=1"]
diff --git a/src/integrations/prefect-ray/tests/conftest.py b/src/integrations/prefect-ray/tests/conftest.py
index fe650e4b8b90..3723fce5f3d5 100644
--- a/src/integrations/prefect-ray/tests/conftest.py
+++ b/src/integrations/prefect-ray/tests/conftest.py
@@ -1,6 +1,5 @@
import pytest
-from prefect.settings import PREFECT_ASYNC_FETCH_STATE_RESULT, temporary_settings
from prefect.testing.utilities import prefect_test_harness
@@ -8,9 +7,3 @@
def prefect_db():
with prefect_test_harness():
yield
-
-
-@pytest.fixture(autouse=True)
-def fetch_state_result():
- with temporary_settings(updates={PREFECT_ASYNC_FETCH_STATE_RESULT: True}):
- yield
diff --git a/src/integrations/prefect-redis/prefect_redis/blocks.py b/src/integrations/prefect-redis/prefect_redis/blocks.py
index 400cf235b91c..05f390d8de02 100644
--- a/src/integrations/prefect-redis/prefect_redis/blocks.py
+++ b/src/integrations/prefect-redis/prefect_redis/blocks.py
@@ -77,7 +77,7 @@ async def read_path(self, path: str) -> bytes:
Returns:
Contents at key as bytes
"""
- client = self.get_client()
+ client = self.get_async_client()
ret = await client.get(path)
await client.close()
@@ -90,7 +90,7 @@ async def write_path(self, path: str, content: bytes) -> None:
path: Redis key to write to
content: Binary object to write
"""
- client = self.get_client()
+ client = self.get_async_client()
ret = await client.set(path, content)
await client.close()
diff --git a/src/integrations/prefect-redis/prefect_redis/locking.py b/src/integrations/prefect-redis/prefect_redis/locking.py
index ce3ed377d2ba..47429fbddff1 100644
--- a/src/integrations/prefect-redis/prefect_redis/locking.py
+++ b/src/integrations/prefect-redis/prefect_redis/locking.py
@@ -148,7 +148,7 @@ async def await_for_lock(self, key: str, timeout: Optional[float] = None) -> boo
lock = AsyncLock(self.async_client, lock_name)
lock_freed = await lock.acquire(blocking_timeout=timeout)
if lock_freed:
- lock.release()
+ await lock.release()
return lock_freed
def is_locked(self, key: str) -> bool:
diff --git a/src/integrations/prefect-redis/pyproject.toml b/src/integrations/prefect-redis/pyproject.toml
index 682e47025971..6fbc6e2016a6 100644
--- a/src/integrations/prefect-redis/pyproject.toml
+++ b/src/integrations/prefect-redis/pyproject.toml
@@ -22,7 +22,7 @@ classifiers = [
"Programming Language :: Python :: 3.12",
"Topic :: Software Development :: Libraries",
]
-dependencies = ["prefect>=3.0.0rc1", "redis>=5.0.1"]
+dependencies = ["prefect>=3.0.0", "redis>=5.0.1"]
dynamic = ["version"]
[project.optional-dependencies]
@@ -37,7 +37,7 @@ dev = [
"pillow",
"pre-commit",
"pytest-asyncio",
- "pytest",
+ "pytest >= 8.3",
"pytest-env",
"pytest-timeout",
"pytest-xdist",
@@ -71,8 +71,7 @@ fail_under = 80
show_missing = true
[tool.pytest.ini_options]
+asyncio_default_fixture_loop_scope = "session"
asyncio_mode = "auto"
timeout = "30"
-env = [
- "PREFECT_TEST_MODE=1",
-]
+env = ["PREFECT_TEST_MODE=1"]
diff --git a/src/integrations/prefect-shell/prefect_shell/commands.py b/src/integrations/prefect-shell/prefect_shell/commands.py
index 40395586f422..50360595730b 100644
--- a/src/integrations/prefect-shell/prefect_shell/commands.py
+++ b/src/integrations/prefect-shell/prefect_shell/commands.py
@@ -227,7 +227,7 @@ class ShellOperation(JobBlock):
_block_type_name = "Shell Operation"
_logo_url = "https://cdn.sanity.io/images/3ugk85nk/production/0b47a017e1b40381de770c17647c49cdf6388d1c-250x250.png" # noqa: E501
- _documentation_url = "https://prefecthq.github.io/prefect-shell/commands/#prefect_shell.commands.ShellOperation" # noqa: E501
+ _documentation_url = "https://docs.prefect.io/integrations/prefect-shell" # noqa
commands: List[str] = Field(
default=..., description="A list of commands to execute sequentially."
diff --git a/src/integrations/prefect-shell/pyproject.toml b/src/integrations/prefect-shell/pyproject.toml
index bd2ae207af8a..7a0a64af6307 100644
--- a/src/integrations/prefect-shell/pyproject.toml
+++ b/src/integrations/prefect-shell/pyproject.toml
@@ -22,7 +22,7 @@ classifiers = [
"Programming Language :: Python :: 3.12",
"Topic :: Software Development :: Libraries",
]
-dependencies = ["prefect>=3.0.0rc1"]
+dependencies = ["prefect>=3.0.0"]
dynamic = ["version"]
[project.optional-dependencies]
@@ -38,7 +38,7 @@ dev = [
"pillow",
"pre-commit",
"pytest-asyncio",
- "pytest",
+ "pytest >= 8.3",
"pytest-env",
"pytest-xdist",
]
@@ -54,7 +54,15 @@ version_file = "prefect_shell/_version.py"
root = "../../.."
tag_regex = "^prefect-shell-(?P\\d+\\.\\d+\\.\\d+(?:[a-zA-Z0-9]+(?:\\.[a-zA-Z0-9]+)*)?)$"
fallback_version = "0.2.0"
-git_describe_command = 'git describe --dirty --tags --long --match "prefect-shell-*[0-9]*"'
+git_describe_command = [
+ "git",
+ "describe",
+ "--dirty",
+ "--tags",
+ "--long",
+ "--match",
+ "prefect-shell-*[0-9]*",
+]
[tool.interrogate]
ignore-init-module = true
@@ -72,6 +80,4 @@ show_missing = true
[tool.pytest.ini_options]
asyncio_mode = "auto"
-env = [
- "PREFECT_TEST_MODE=1",
-]
+env = ["PREFECT_TEST_MODE=1"]
diff --git a/src/integrations/prefect-shell/tests/test_commands.py b/src/integrations/prefect-shell/tests/test_commands.py
index 738af1082a80..af541c3f17b6 100644
--- a/src/integrations/prefect-shell/tests/test_commands.py
+++ b/src/integrations/prefect-shell/tests/test_commands.py
@@ -156,23 +156,37 @@ async def test_error(self, method):
@pytest.mark.skipif(sys.version >= "3.12", reason="Fails on Python 3.12")
@pytest.mark.parametrize("method", ["run", "trigger"])
async def test_output(self, prefect_task_runs_caplog, method):
+ # Set the log level to INFO explicitly
+ prefect_task_runs_caplog.set_level(logging.INFO)
+
op = ShellOperation(commands=["echo 'testing\nthe output'", "echo good"])
assert await self.execute(op, method) == ["testing", "the output", "good"]
- records = prefect_task_runs_caplog.records
- assert len(records) == 3
- assert "triggered with 2 commands running" in records[0].message
- assert "stream output:\ntesting\nthe output\ngood" in records[1].message
- assert "completed with return code 0" in records[2].message
+
+ # Filter for only INFO level records
+ log_messages = [
+ r.message
+ for r in prefect_task_runs_caplog.records
+ if r.levelno >= logging.INFO
+ ]
+ assert any("triggered with 2 commands running" in m for m in log_messages)
+ assert any(
+ "stream output:\ntesting\nthe output\ngood" in m for m in log_messages
+ )
+ assert any("completed with return code 0" in m for m in log_messages)
@pytest.mark.parametrize("method", ["run", "trigger"])
async def test_stream_output(self, prefect_task_runs_caplog, method):
# If stream_output is False, there should be output,
# but no logs from the shell process
+ prefect_task_runs_caplog.set_level(logging.INFO) # Only capture INFO logs
+
op = ShellOperation(
commands=["echo 'testing\nthe output'", "echo good"], stream_output=False
)
assert await self.execute(op, method) == ["testing", "the output", "good"]
- records = prefect_task_runs_caplog.records
+ records = [
+ r for r in prefect_task_runs_caplog.records if r.levelno >= logging.INFO
+ ]
assert len(records) == 2
assert "triggered with 2 commands running" in records[0].message
assert "completed with return code 0" in records[1].message
diff --git a/src/integrations/prefect-slack/prefect_slack/credentials.py b/src/integrations/prefect-slack/prefect_slack/credentials.py
index 1b02e913c4e2..4a8b14e40a0d 100644
--- a/src/integrations/prefect-slack/prefect_slack/credentials.py
+++ b/src/integrations/prefect-slack/prefect_slack/credentials.py
@@ -1,14 +1,15 @@
"""Credential classes use to store Slack credentials."""
-from typing import Optional
+from typing import Any, Optional, Union
from pydantic import Field, SecretStr
from slack_sdk.web.async_client import AsyncWebClient
from slack_sdk.webhook.async_client import AsyncWebhookClient
+from slack_sdk.webhook.client import WebhookClient
+from prefect._internal.compatibility.async_dispatch import async_dispatch
from prefect.blocks.core import Block
from prefect.blocks.notifications import NotificationBlock
-from prefect.utilities.asyncutils import sync_compatible
class SlackCredentials(Block):
@@ -35,7 +36,7 @@ class SlackCredentials(Block):
_block_type_name = "Slack Credentials"
_logo_url = "https://cdn.sanity.io/images/3ugk85nk/production/c1965ecbf8704ee1ea20d77786de9a41ce1087d1-500x500.png" # noqa
- _documentation_url = "https://prefecthq.github.io/prefect-slack/credentials/#prefect_slack.credentials.SlackCredentials" # noqa
+ _documentation_url = "https://docs.prefect.io/integrations/prefect-slack" # noqa
token: SecretStr = Field(
default=...,
@@ -81,7 +82,7 @@ class SlackWebhook(NotificationBlock):
_block_type_name = "Slack Incoming Webhook"
_logo_url = "https://images.ctfassets.net/gm98wzqotmnx/7dkzINU9r6j44giEFuHuUC/85d4cd321ad60c1b1e898bc3fbd28580/5cb480cd5f1b6d3fbadece79.png?h=250" # noqa
- _documentation_url = "https://prefecthq.github.io/prefect-slack/credentials/#prefect_slack.credentials.SlackWebhook" # noqa
+ _documentation_url = "https://docs.prefect.io/integrations/prefect-slack" # noqa
url: SecretStr = Field(
default=...,
@@ -90,22 +91,18 @@ class SlackWebhook(NotificationBlock):
examples=["https://hooks.slack.com/XXX"],
)
- def get_client(self) -> AsyncWebhookClient:
+ def get_client(
+ self, sync_client: bool = False
+ ) -> Union[AsyncWebhookClient, WebhookClient]:
"""
Returns an authenticated `AsyncWebhookClient` to interact with the configured
Slack webhook.
"""
+ if sync_client:
+ return WebhookClient(url=self.url.get_secret_value())
return AsyncWebhookClient(url=self.url.get_secret_value())
- @sync_compatible
- async def notify(self, body: str, subject: Optional[str] = None):
- """
- Sends a message to the Slack channel.
- """
- client = self.get_client()
-
- response = await client.send(text=body)
-
+ def _raise_on_failure(self, response: Any):
# prefect>=2.17.2 added a means for notification blocks to raise errors on
# failures. This is not available in older versions, so we need to check if the
# private base class attribute exists before using it.
@@ -117,3 +114,20 @@ async def notify(self, body: str, subject: Optional[str] = None):
if response.status_code >= 400:
raise NotificationError(f"Failed to send message: {response.body}")
+
+ async def notify_async(self, body: str, subject: Optional[str] = None):
+ """
+ Sends a message to the Slack channel asynchronously.
+ """
+ client = self.get_client()
+ response = await client.send(text=body)
+ self._raise_on_failure(response)
+
+ @async_dispatch(notify_async)
+ def notify(self, body: str, subject: Optional[str] = None):
+ """
+ Sends a message to the Slack channel.
+ """
+ client = self.get_client(sync_client=True)
+ response = client.send(text=body)
+ self._raise_on_failure(response)
diff --git a/src/integrations/prefect-slack/pyproject.toml b/src/integrations/prefect-slack/pyproject.toml
index f4f71ddca3bf..771253f76438 100644
--- a/src/integrations/prefect-slack/pyproject.toml
+++ b/src/integrations/prefect-slack/pyproject.toml
@@ -22,11 +22,7 @@ classifiers = [
"Programming Language :: Python :: 3.12",
"Topic :: Software Development :: Libraries",
]
-dependencies = [
- "aiohttp",
- "slack_sdk>=3.15.1",
- "prefect>=3.0.0rc1",
-]
+dependencies = ["aiohttp", "slack_sdk>=3.15.1", "prefect>=3.0.0"]
dynamic = ["version"]
[project.optional-dependencies]
@@ -41,7 +37,7 @@ dev = [
"pillow",
"pre-commit",
"pytest-asyncio",
- "pytest",
+ "pytest >= 8.3",
"pytest-env",
"pytest-xdist",
]
@@ -74,7 +70,6 @@ fail_under = 80
show_missing = true
[tool.pytest.ini_options]
+asyncio_default_fixture_loop_scope = "session"
asyncio_mode = "auto"
-env = [
- "PREFECT_TEST_MODE=1",
-]
+env = ["PREFECT_TEST_MODE=1"]
diff --git a/src/integrations/prefect-slack/tests/test_credentials.py b/src/integrations/prefect-slack/tests/test_credentials.py
index 37fa3dd335ad..e54aae52a291 100644
--- a/src/integrations/prefect-slack/tests/test_credentials.py
+++ b/src/integrations/prefect-slack/tests/test_credentials.py
@@ -1,9 +1,10 @@
-from unittest.mock import AsyncMock
+from unittest.mock import AsyncMock, MagicMock
import pytest
from prefect_slack import SlackCredentials, SlackWebhook
from slack_sdk.web.async_client import AsyncWebClient
-from slack_sdk.webhook.async_client import AsyncWebhookClient, WebhookResponse
+from slack_sdk.webhook.async_client import AsyncWebhookClient
+from slack_sdk.webhook.webhook_response import WebhookResponse
def test_slack_credentials():
@@ -63,3 +64,52 @@ async def test_slack_webhook_block_handles_raise_on_failure(
with pytest.raises(NotificationError, match="Failed to send message: woops"):
with block.raise_on_failure():
await block.notify("hello", "world")
+
+
+def test_slack_webhook_sync_notify(monkeypatch):
+ """Test the sync notify path"""
+ mock_client = MagicMock()
+ mock_client.send.return_value = WebhookResponse(
+ url="http://test", status_code=200, body="ok", headers={}
+ )
+
+ webhook = SlackWebhook(url="http://test")
+ monkeypatch.setattr(webhook, "get_client", MagicMock(return_value=mock_client))
+
+ webhook.notify("test message")
+ mock_client.send.assert_called_once_with(text="test message")
+
+
+async def test_slack_webhook_async_notify(monkeypatch):
+ """Test the async notify path"""
+ mock_client = MagicMock()
+ mock_client.send = AsyncMock(
+ return_value=WebhookResponse(
+ url="http://test", status_code=200, body="ok", headers={}
+ )
+ )
+
+ webhook = SlackWebhook(url="http://test")
+ monkeypatch.setattr(webhook, "get_client", MagicMock(return_value=mock_client))
+
+ await webhook.notify_async("test message")
+ mock_client.send.assert_called_once_with(text="test message")
+
+
+@pytest.mark.parametrize("message", ["test message 1", "test message 2"])
+async def test_slack_webhook_notify_async_dispatch(monkeypatch, message):
+ """Test that async_dispatch properly handles both sync and async contexts"""
+
+ mock_response = WebhookResponse(
+ url="http://test", status_code=200, body="ok", headers={}
+ )
+
+ mock_client = MagicMock()
+ mock_client.send = AsyncMock(return_value=mock_response)
+
+ webhook = SlackWebhook(url="http://test")
+ monkeypatch.setattr(webhook, "get_client", lambda sync_client=False: mock_client)
+
+ # Test notification
+ await webhook.notify(message)
+ mock_client.send.assert_called_once_with(text=message)
diff --git a/src/integrations/prefect-snowflake/prefect_snowflake/credentials.py b/src/integrations/prefect-snowflake/prefect_snowflake/credentials.py
index 7c9012164358..1ea50ae97f83 100644
--- a/src/integrations/prefect-snowflake/prefect_snowflake/credentials.py
+++ b/src/integrations/prefect-snowflake/prefect_snowflake/credentials.py
@@ -72,7 +72,7 @@ class SnowflakeCredentials(CredentialsBlock):
_block_type_name = "Snowflake Credentials"
_logo_url = "https://cdn.sanity.io/images/3ugk85nk/production/bd359de0b4be76c2254bd329fe3a267a1a3879c2-250x250.png" # noqa
- _documentation_url = "https://prefecthq.github.io/prefect-snowflake/credentials/#prefect_snowflake.credentials.SnowflakeCredentials" # noqa
+ _documentation_url = "https://docs.prefect.io/integrations/prefect-snowflake" # noqa
account: str = Field(
...,
diff --git a/src/integrations/prefect-snowflake/prefect_snowflake/database.py b/src/integrations/prefect-snowflake/prefect_snowflake/database.py
index 0b95dc890d35..0ca83b5733b6 100644
--- a/src/integrations/prefect-snowflake/prefect_snowflake/database.py
+++ b/src/integrations/prefect-snowflake/prefect_snowflake/database.py
@@ -75,7 +75,7 @@ class SnowflakeConnector(DatabaseBlock):
_block_type_name = "Snowflake Connector"
_logo_url = "https://cdn.sanity.io/images/3ugk85nk/production/bd359de0b4be76c2254bd329fe3a267a1a3879c2-250x250.png" # noqa
- _documentation_url = "https://prefecthq.github.io/prefect-snowflake/database/#prefect_snowflake.database.SnowflakeConnector" # noqa
+ _documentation_url = "https://docs.prefect.io/integrations/prefect-snowflake" # noqa
_description = "Perform data operations against a Snowflake database."
credentials: SnowflakeCredentials = Field(
diff --git a/src/integrations/prefect-snowflake/pyproject.toml b/src/integrations/prefect-snowflake/pyproject.toml
index 5b78d1b71cb4..c57d27b28729 100644
--- a/src/integrations/prefect-snowflake/pyproject.toml
+++ b/src/integrations/prefect-snowflake/pyproject.toml
@@ -4,10 +4,7 @@ build-backend = "setuptools.build_meta"
[project]
name = "prefect-snowflake"
-dependencies = [
- "snowflake-connector-python>=2.7.6",
- "prefect>=3.0.0rc1",
-]
+dependencies = ["snowflake-connector-python>=2.7.6", "prefect>=3.0.0"]
dynamic = ["version"]
description = "Prefect integrations for interacting with Snowflake"
readme = "README.md"
@@ -40,7 +37,7 @@ dev = [
"pillow",
"pre-commit",
"pytest-asyncio",
- "pytest",
+ "pytest >= 8.3",
"pytest-env",
"pytest-xdist",
]
@@ -73,7 +70,6 @@ fail_under = 80
show_missing = true
[tool.pytest.ini_options]
+asyncio_default_fixture_loop_scope = "session"
asyncio_mode = "auto"
-env = [
- "PREFECT_TEST_MODE=1",
-]
+env = ["PREFECT_TEST_MODE=1"]
diff --git a/src/integrations/prefect-sqlalchemy/prefect_sqlalchemy/database.py b/src/integrations/prefect-sqlalchemy/prefect_sqlalchemy/database.py
index 1b340d14d336..cc8db9ec0618 100644
--- a/src/integrations/prefect-sqlalchemy/prefect_sqlalchemy/database.py
+++ b/src/integrations/prefect-sqlalchemy/prefect_sqlalchemy/database.py
@@ -94,7 +94,7 @@ class SqlAlchemyConnector(CredentialsBlock, DatabaseBlock):
_block_type_name = "SQLAlchemy Connector"
_logo_url = "https://cdn.sanity.io/images/3ugk85nk/production/3c7dff04f70aaf4528e184a3b028f9e40b98d68c-250x250.png" # type: ignore
- _documentation_url = "https://prefecthq.github.io/prefect-sqlalchemy/database/#prefect_sqlalchemy.database.SqlAlchemyConnector" # type: ignore
+ _documentation_url = "https://docs.prefect.io/integrations/prefect-sqlalchemy" # type: ignore
model_config = ConfigDict(arbitrary_types_allowed=True)
connection_info: Union[ConnectionComponents, DBUrl] = Field(
diff --git a/src/integrations/prefect-sqlalchemy/pyproject.toml b/src/integrations/prefect-sqlalchemy/pyproject.toml
index edb05df79d97..db97fa2592bc 100644
--- a/src/integrations/prefect-sqlalchemy/pyproject.toml
+++ b/src/integrations/prefect-sqlalchemy/pyproject.toml
@@ -4,10 +4,7 @@ build-backend = "setuptools.build_meta"
[project]
name = "prefect-sqlalchemy"
-dependencies = [
- "sqlalchemy>=1.4.31,<3",
- "prefect>=3.0.0rc1",
-]
+dependencies = ["sqlalchemy>=1.4.31,<3", "prefect>=3.0.0"]
dynamic = ["version"]
description = "Prefect integrations for working with databases"
readme = "README.md"
@@ -42,7 +39,7 @@ dev = [
"pillow",
"pre-commit",
"psycopg2",
- "pytest",
+ "pytest >= 8.3",
"pytest-asyncio",
"pytest-env",
"pytest-xdist",
@@ -76,7 +73,6 @@ fail_under = 80
show_missing = true
[tool.pytest.ini_options]
+asyncio_default_fixture_loop_scope = "session"
asyncio_mode = "auto"
-env = [
- "PREFECT_TEST_MODE=1",
-]
+env = ["PREFECT_TEST_MODE=1"]
diff --git a/src/integrations/prefect-sqlalchemy/tests/conftest.py b/src/integrations/prefect-sqlalchemy/tests/conftest.py
index 70ecf5f1c256..a06c90e85232 100644
--- a/src/integrations/prefect-sqlalchemy/tests/conftest.py
+++ b/src/integrations/prefect-sqlalchemy/tests/conftest.py
@@ -1,6 +1,5 @@
import pytest
-from prefect.settings import PREFECT_ASYNC_FETCH_STATE_RESULT, temporary_settings
from prefect.testing.utilities import prefect_test_harness
@@ -11,9 +10,3 @@ def prefect_db():
"""
with prefect_test_harness():
yield
-
-
-@pytest.fixture(autouse=True)
-def fetch_state_result():
- with temporary_settings(updates={PREFECT_ASYNC_FETCH_STATE_RESULT: True}):
- yield
diff --git a/src/prefect/_internal/compatibility/async_dispatch.py b/src/prefect/_internal/compatibility/async_dispatch.py
new file mode 100644
index 000000000000..12422d696d0e
--- /dev/null
+++ b/src/prefect/_internal/compatibility/async_dispatch.py
@@ -0,0 +1,63 @@
+import asyncio
+import inspect
+from functools import wraps
+from typing import Any, Callable, Coroutine, Optional, TypeVar, Union
+
+from typing_extensions import ParamSpec
+
+from prefect.tasks import Task
+
+R = TypeVar("R")
+P = ParamSpec("P")
+
+
+def is_in_async_context() -> bool:
+ """
+ Returns True if called from within an async context (coroutine or running event loop)
+ """
+ try:
+ asyncio.get_running_loop()
+ return True
+ except RuntimeError:
+ return False
+
+
+def _is_acceptable_callable(obj: Union[Callable, Task]) -> bool:
+ if inspect.iscoroutinefunction(obj):
+ return True
+ if isinstance(obj, Task) and inspect.iscoroutinefunction(obj.fn):
+ return True
+ return False
+
+
+def async_dispatch(
+ async_impl: Callable[P, Coroutine[Any, Any, R]],
+) -> Callable[[Callable[P, R]], Callable[P, Union[R, Coroutine[Any, Any, R]]]]:
+ """
+ Decorator that dispatches to either sync or async implementation based on context.
+
+ Args:
+ async_impl: The async implementation to dispatch to when in async context
+ """
+
+ def decorator(
+ sync_fn: Callable[P, R],
+ ) -> Callable[P, Union[R, Coroutine[Any, Any, R]]]:
+ if not _is_acceptable_callable(async_impl):
+ raise TypeError("async_impl must be an async function")
+
+ @wraps(sync_fn)
+ def wrapper(
+ *args: P.args,
+ _sync: Optional[bool] = None, # type: ignore
+ **kwargs: P.kwargs,
+ ) -> Union[R, Coroutine[Any, Any, R]]:
+ should_run_sync = _sync if _sync is not None else not is_in_async_context()
+
+ if should_run_sync:
+ return sync_fn(*args, **kwargs)
+ return async_impl(*args, **kwargs)
+
+ return wrapper # type: ignore
+
+ return decorator
diff --git a/src/prefect/_internal/schemas/bases.py b/src/prefect/_internal/schemas/bases.py
index 1c41ad033e0d..01bf0e1ab0d5 100644
--- a/src/prefect/_internal/schemas/bases.py
+++ b/src/prefect/_internal/schemas/bases.py
@@ -84,9 +84,12 @@ def reset_fields(self: Self) -> Self:
Returns:
PrefectBaseModel: A new instance of the model with the reset fields.
"""
+ data = self.model_dump()
return self.model_copy(
update={
- field: self.model_fields[field].get_default(call_default_factory=True)
+ field: self.model_fields[field].get_default(
+ call_default_factory=True, validated_data=data
+ )
for field in self._reset_fields
}
)
diff --git a/src/prefect/blocks/notifications.py b/src/prefect/blocks/notifications.py
index df8c6402b12a..0c3e6ab86bfe 100644
--- a/src/prefect/blocks/notifications.py
+++ b/src/prefect/blocks/notifications.py
@@ -494,6 +494,7 @@ def block_initialization(self) -> None:
entity=self.entity,
batch=self.batch,
tags=self.tags,
+ action="new",
).url()
)
self._start_apprise_client(url)
diff --git a/src/prefect/cache_policies.py b/src/prefect/cache_policies.py
index e4caa861c415..50717e5ceaea 100644
--- a/src/prefect/cache_policies.py
+++ b/src/prefect/cache_policies.py
@@ -7,6 +7,7 @@
from typing_extensions import Self
from prefect.context import TaskRunContext
+from prefect.exceptions import HashError
from prefect.utilities.hashing import hash_objects
if TYPE_CHECKING:
@@ -175,7 +176,7 @@ def compute_key(
keys.append(policy_key)
if not keys:
return None
- return hash_objects(*keys)
+ return hash_objects(*keys, raise_on_failure=True)
@dataclass
@@ -223,8 +224,7 @@ def compute_key(
lines = task_ctx.task.fn.__code__.co_code
else:
raise
-
- return hash_objects(lines)
+ return hash_objects(lines, raise_on_failure=True)
@dataclass
@@ -242,7 +242,7 @@ def compute_key(
) -> Optional[str]:
if not flow_parameters:
return None
- return hash_objects(flow_parameters)
+ return hash_objects(flow_parameters, raise_on_failure=True)
@dataclass
@@ -293,7 +293,18 @@ def compute_key(
if key not in exclude:
hashed_inputs[key] = val
- return hash_objects(hashed_inputs)
+ try:
+ return hash_objects(hashed_inputs, raise_on_failure=True)
+ except HashError as exc:
+ msg = (
+ f"{exc}\n\n"
+ "This often occurs when task inputs contain objects that cannot be cached "
+ "like locks, file handles, or other system resources.\n\n"
+ "To resolve this, you can:\n"
+ " 1. Exclude these arguments by defining a custom `cache_key_fn`\n"
+ " 2. Disable caching by passing `cache_policy=NONE`\n"
+ )
+ raise ValueError(msg) from exc
def __sub__(self, other: str) -> "CachePolicy":
if not isinstance(other, str):
diff --git a/src/prefect/cli/block.py b/src/prefect/cli/block.py
index 295fb86caa9e..808547eecb09 100644
--- a/src/prefect/cli/block.py
+++ b/src/prefect/cli/block.py
@@ -229,7 +229,7 @@ async def register(
block_catalog_url = f"{ui_url}/blocks/catalog"
msg = f"{msg.rstrip().rstrip('.')}: {block_catalog_url}\n"
- app.console.print(msg)
+ app.console.print(msg, soft_wrap=True)
@blocks_app.command("ls")
@@ -449,7 +449,7 @@ async def blocktype_delete(
try:
block_type = await client.read_block_type_by_slug(slug)
if is_interactive() and not typer.confirm(
- (f"Are you sure you want to delete block with id {id!r}?"),
+ (f"Are you sure you want to delete block type {block_type.slug!r}?"),
default=False,
):
exit_with_error("Deletion aborted.")
diff --git a/src/prefect/cli/cloud/__init__.py b/src/prefect/cli/cloud/__init__.py
index 46739a387fd9..ea63cac61fb9 100644
--- a/src/prefect/cli/cloud/__init__.py
+++ b/src/prefect/cli/cloud/__init__.py
@@ -3,7 +3,6 @@
"""
import os
-import signal
import traceback
import uuid
import urllib.parse
diff --git a/src/prefect/cli/config.py b/src/prefect/cli/config.py
index e6d43138bd86..aa9b7255ae4e 100644
--- a/src/prefect/cli/config.py
+++ b/src/prefect/cli/config.py
@@ -3,8 +3,10 @@
"""
import os
+from pathlib import Path
from typing import Any, Dict, List, Optional
+import toml
import typer
from dotenv import dotenv_values
from typing_extensions import Literal
@@ -16,6 +18,7 @@
from prefect.cli.root import app, is_interactive
from prefect.exceptions import ProfileSettingsValidationError
from prefect.settings.legacy import _get_settings_fields, _get_valid_setting_names
+from prefect.utilities.annotations import NotSet
from prefect.utilities.collections import listrepr
help_message = """
@@ -201,7 +204,9 @@ def view(
def _process_setting(
setting: prefect.settings.Setting,
value: str,
- source: Literal["env", "profile", "defaults", ".env file"],
+ source: Literal[
+ "env", "profile", "defaults", ".env file", "prefect.toml", "pyproject.toml"
+ ],
):
display_value = "********" if setting.is_secret and not show_secrets else value
source_blurb = f" (from {source})" if show_sources else ""
@@ -220,15 +225,23 @@ def _collect_defaults(default_values: Dict[str, Any], current_path: List[str]):
continue
_process_setting(setting, value, "defaults")
- # Process settings from the current profile
- for setting, value in current_profile_settings.items():
- value_and_source = (
- (value, "profile")
- if not (env_value := os.getenv(setting.name))
- else (env_value, "env")
- )
- _process_setting(setting, value_and_source[0], value_and_source[1])
+ def _process_toml_settings(
+ settings: Dict[str, Any],
+ base_path: List[str],
+ source: Literal["prefect.toml", "pyproject.toml"],
+ ):
+ for key, value in settings.items():
+ if isinstance(value, dict):
+ _process_toml_settings(value, base_path + [key], source)
+ else:
+ setting = _get_settings_fields(prefect.settings.Settings).get(
+ ".".join(base_path + [key]), NotSet
+ )
+ if setting is NotSet or setting.name in processed_settings:
+ continue
+ _process_setting(setting, value, source)
+ # Process settings from environment variables
for setting_name in VALID_SETTING_NAMES:
setting = _get_settings_fields(prefect.settings.Settings)[setting_name]
if setting.name in processed_settings:
@@ -236,12 +249,33 @@ def _collect_defaults(default_values: Dict[str, Any], current_path: List[str]):
if (env_value := os.getenv(setting.name)) is None:
continue
_process_setting(setting, env_value, "env")
- for key, value in dotenv_values().items():
+
+ # Process settings from .env file
+ for key, value in dotenv_values(".env").items():
if key in VALID_SETTING_NAMES:
setting = _get_settings_fields(prefect.settings.Settings)[key]
if setting.name in processed_settings or value is None:
continue
_process_setting(setting, value, ".env file")
+
+ # Process settings from prefect.toml
+ if Path("prefect.toml").exists():
+ toml_settings = toml.load(Path("prefect.toml"))
+ _process_toml_settings(toml_settings, base_path=[], source="prefect.toml")
+
+ # Process settings from pyproject.toml
+ if Path("pyproject.toml").exists():
+ pyproject_settings = toml.load(Path("pyproject.toml"))
+ pyproject_settings = pyproject_settings.get("tool", {}).get("prefect", {})
+ _process_toml_settings(
+ pyproject_settings, base_path=[], source="pyproject.toml"
+ )
+
+ # Process settings from the current profile
+ for setting, value in current_profile_settings.items():
+ if setting.name not in processed_settings:
+ _process_setting(setting, value, "profile")
+
if show_defaults:
_collect_defaults(
prefect.settings.Settings().model_dump(context=dump_context),
diff --git a/src/prefect/cli/shell.py b/src/prefect/cli/shell.py
index 6ca13f50bf6c..6837e5d00950 100644
--- a/src/prefect/cli/shell.py
+++ b/src/prefect/cli/shell.py
@@ -8,7 +8,7 @@
import subprocess
import sys
import threading
-from typing import List, Optional
+from typing import Any, Dict, List, Optional
import typer
from typing_extensions import Annotated
@@ -62,6 +62,7 @@ def run_shell_process(
log_output: bool = True,
stream_stdout: bool = False,
log_stderr: bool = False,
+ popen_kwargs: Optional[Dict[str, Any]] = None,
):
"""
Asynchronously executes the specified shell command and logs its output.
@@ -70,28 +71,32 @@ def run_shell_process(
It handles both the execution of the command and the collection of its output for logging purposes.
Args:
- command (str): The shell command to execute.
- log_output (bool, optional): If True, the output of the command (both stdout and stderr) is logged to Prefect.
- Defaults to True
- stream_stdout (bool, optional): If True, the stdout of the command is streamed to Prefect logs. Defaults to False.
- log_stderr (bool, optional): If True, the stderr of the command is logged to Prefect logs. Defaults to False.
-
+ command: The shell command to execute.
+ log_output: If True, the output of the command (both stdout and stderr) is logged to Prefect.
+ stream_stdout: If True, the stdout of the command is streamed to Prefect logs.
+ log_stderr: If True, the stderr of the command is logged to Prefect logs.
+ popen_kwargs: Additional keyword arguments to pass to the `subprocess.Popen` call.
"""
logger = get_run_logger() if log_output else logging.getLogger("prefect")
+ # Default Popen kwargs that can be overridden
+ kwargs = {
+ "stdout": subprocess.PIPE,
+ "stderr": subprocess.PIPE,
+ "shell": True,
+ "text": True,
+ "bufsize": 1,
+ "universal_newlines": True,
+ }
+
+ if popen_kwargs:
+ kwargs |= popen_kwargs
+
# Containers for log batching
stdout_container, stderr_container = [], []
- with subprocess.Popen(
- command,
- stdout=subprocess.PIPE,
- stderr=subprocess.PIPE,
- shell=True,
- text=True,
- bufsize=1,
- universal_newlines=True,
- ) as proc:
+ with subprocess.Popen(command, **kwargs) as proc:
# Create threads for collecting stdout and stderr
if stream_stdout:
stdout_logger = logger.info
diff --git a/src/prefect/client/cloud.py b/src/prefect/client/cloud.py
index 5e359f80f2c5..38a69150e922 100644
--- a/src/prefect/client/cloud.py
+++ b/src/prefect/client/cloud.py
@@ -1,5 +1,6 @@
import re
from typing import Any, Dict, List, Optional, cast
+from uuid import UUID
import anyio
import httpx
@@ -21,6 +22,7 @@
PREFECT_CLOUD_API_URL,
PREFECT_TESTING_UNIT_TEST_MODE,
)
+from prefect.types import KeyValueLabels
PARSE_API_URL_REGEX = re.compile(r"accounts/(.{36})/workspaces/(.{36})")
@@ -151,6 +153,25 @@ async def check_ip_allowlist_access(self) -> IPAllowlistMyAccessResponse:
response = await self.get(f"{self.account_base_url}/ip_allowlist/my_access")
return IPAllowlistMyAccessResponse.model_validate(response)
+ async def update_flow_run_labels(
+ self, flow_run_id: UUID, labels: KeyValueLabels
+ ) -> httpx.Response:
+ """
+ Update the labels for a flow run.
+
+ Args:
+ flow_run_id: The identifier for the flow run to update.
+ labels: A dictionary of labels to update for the flow run.
+
+ Returns:
+ an `httpx.Response` object from the PATCH request
+ """
+
+ return await self._client.patch(
+ f"{self.workspace_base_url}/flow_runs/{flow_run_id}/labels",
+ json=labels,
+ )
+
async def __aenter__(self):
await self._client.__aenter__()
return self
diff --git a/src/prefect/client/orchestration.py b/src/prefect/client/orchestration.py
index 14db1730cfdf..f233983f3c93 100644
--- a/src/prefect/client/orchestration.py
+++ b/src/prefect/client/orchestration.py
@@ -99,6 +99,7 @@
TaskRunResult,
Variable,
Worker,
+ WorkerMetadata,
WorkPool,
WorkQueue,
WorkQueueStatusDetail,
@@ -134,6 +135,7 @@
PREFECT_CLOUD_API_URL,
PREFECT_SERVER_ALLOW_EPHEMERAL_MODE,
PREFECT_TESTING_UNIT_TEST_MODE,
+ get_current_settings,
)
if TYPE_CHECKING:
@@ -2594,22 +2596,44 @@ async def send_worker_heartbeat(
work_pool_name: str,
worker_name: str,
heartbeat_interval_seconds: Optional[float] = None,
- ):
+ get_worker_id: bool = False,
+ worker_metadata: Optional[WorkerMetadata] = None,
+ ) -> Optional[UUID]:
"""
Sends a worker heartbeat for a given work pool.
Args:
work_pool_name: The name of the work pool to heartbeat against.
worker_name: The name of the worker sending the heartbeat.
+ return_id: Whether to return the worker ID. Note: will return `None` if the connected server does not support returning worker IDs, even if `return_id` is `True`.
+ worker_metadata: Metadata about the worker to send to the server.
"""
- await self._client.post(
+ params = {
+ "name": worker_name,
+ "heartbeat_interval_seconds": heartbeat_interval_seconds,
+ }
+ if worker_metadata:
+ params["metadata"] = worker_metadata.model_dump(mode="json")
+ if get_worker_id:
+ params["return_id"] = get_worker_id
+
+ resp = await self._client.post(
f"/work_pools/{work_pool_name}/workers/heartbeat",
- json={
- "name": worker_name,
- "heartbeat_interval_seconds": heartbeat_interval_seconds,
- },
+ json=params,
)
+ if (
+ (
+ self.server_type == ServerType.CLOUD
+ or get_current_settings().testing.test_mode
+ )
+ and get_worker_id
+ and resp.status_code == 200
+ ):
+ return UUID(resp.text)
+ else:
+ return None
+
async def read_workers_for_work_pool(
self,
work_pool_name: str,
@@ -3901,13 +3925,13 @@ def read_flow_run(self, flow_run_id: UUID) -> FlowRun:
def read_flow_runs(
self,
*,
- flow_filter: FlowFilter = None,
- flow_run_filter: FlowRunFilter = None,
- task_run_filter: TaskRunFilter = None,
- deployment_filter: DeploymentFilter = None,
- work_pool_filter: WorkPoolFilter = None,
- work_queue_filter: WorkQueueFilter = None,
- sort: FlowRunSort = None,
+ flow_filter: Optional[FlowFilter] = None,
+ flow_run_filter: Optional[FlowRunFilter] = None,
+ task_run_filter: Optional[TaskRunFilter] = None,
+ deployment_filter: Optional[DeploymentFilter] = None,
+ work_pool_filter: Optional[WorkPoolFilter] = None,
+ work_queue_filter: Optional[WorkQueueFilter] = None,
+ sort: Optional[FlowRunSort] = None,
limit: Optional[int] = None,
offset: int = 0,
) -> List[FlowRun]:
diff --git a/src/prefect/client/schemas/actions.py b/src/prefect/client/schemas/actions.py
index c83755cbca4f..2e918f5d588b 100644
--- a/src/prefect/client/schemas/actions.py
+++ b/src/prefect/client/schemas/actions.py
@@ -562,6 +562,17 @@ class LogCreate(ActionBaseModel):
timestamp: DateTime = Field(default=..., description="The log timestamp.")
flow_run_id: Optional[UUID] = Field(None)
task_run_id: Optional[UUID] = Field(None)
+ worker_id: Optional[UUID] = Field(None)
+
+ def model_dump(self, *args, **kwargs):
+ """
+ The worker_id field is only included in logs sent to Prefect Cloud.
+ If it's unset, we should not include it in the log payload.
+ """
+ data = super().model_dump(*args, **kwargs)
+ if self.worker_id is None:
+ data.pop("worker_id")
+ return data
class WorkPoolCreate(ActionBaseModel):
diff --git a/src/prefect/client/schemas/objects.py b/src/prefect/client/schemas/objects.py
index 1c354dc44567..5a01ad8d340d 100644
--- a/src/prefect/client/schemas/objects.py
+++ b/src/prefect/client/schemas/objects.py
@@ -32,6 +32,7 @@
from pydantic_extra_types.pendulum_dt import DateTime
from typing_extensions import Literal, Self, TypeVar
+from prefect._internal.compatibility import deprecated
from prefect._internal.compatibility.migration import getattr_migration
from prefect._internal.schemas.bases import ObjectBaseModel, PrefectBaseModel
from prefect._internal.schemas.fields import CreatedBy, UpdatedBy
@@ -52,6 +53,7 @@
from prefect.settings import PREFECT_CLOUD_API_URL, PREFECT_CLOUD_UI_URL
from prefect.types import (
MAX_VARIABLE_NAME_LENGTH,
+ KeyValueLabels,
Name,
NonNegativeInteger,
PositiveInteger,
@@ -186,7 +188,7 @@ class StateDetails(PrefectBaseModel):
def data_discriminator(x: Any) -> str:
- if isinstance(x, dict) and "type" in x:
+ if isinstance(x, dict) and "type" in x and x["type"] != "unpersisted":
return "BaseResult"
elif isinstance(x, dict) and "storage_key" in x:
return "ResultRecordMetadata"
@@ -220,10 +222,17 @@ def result(self: "State[R]", raise_on_failure: bool = True) -> R:
def result(self: "State[R]", raise_on_failure: bool = False) -> Union[R, Exception]:
...
+ @deprecated.deprecated_parameter(
+ "fetch",
+ when=lambda fetch: fetch is not True,
+ start_date="Oct 2024",
+ end_date="Jan 2025",
+ help="Please ensure you are awaiting the call to `result()` when calling in an async context.",
+ )
def result(
self,
raise_on_failure: bool = True,
- fetch: Optional[bool] = None,
+ fetch: bool = True,
retry_result_failure: bool = True,
) -> Union[R, Exception]:
"""
@@ -248,22 +257,6 @@ def result(
The result of the run
Examples:
- >>> from prefect import flow, task
- >>> @task
- >>> def my_task(x):
- >>> return x
-
- Get the result from a task future in a flow
-
- >>> @flow
- >>> def my_flow():
- >>> future = my_task("hello")
- >>> state = future.wait()
- >>> result = state.result()
- >>> print(result)
- >>> my_flow()
- hello
-
Get the result from a flow state
>>> @flow
@@ -307,7 +300,7 @@ def result(
>>> raise ValueError("oh no!")
>>> my_flow.deploy("my_deployment/my_flow")
>>> flow_run = run_deployment("my_deployment/my_flow")
- >>> await flow_run.state.result(raise_on_failure=True, fetch=True) # Raises `ValueError("oh no!")`
+ >>> await flow_run.state.result(raise_on_failure=True) # Raises `ValueError("oh no!")`
"""
from prefect.states import get_state_result
@@ -365,6 +358,12 @@ def default_scheduled_start_time(self) -> Self:
self.state_details.scheduled_time = DateTime.now("utc")
return self
+ @model_validator(mode="after")
+ def set_unpersisted_results_to_none(self) -> Self:
+ if isinstance(self.data, dict) and self.data.get("type") == "unpersisted":
+ self.data = None
+ return self
+
def is_scheduled(self) -> bool:
return self.type == StateType.SCHEDULED
@@ -494,6 +493,9 @@ class FlowRunPolicy(PrefectBaseModel):
resuming: Optional[bool] = Field(
default=False, description="Indicates if this run is resuming from a pause."
)
+ retry_type: Optional[Literal["in_process", "reschedule"]] = Field(
+ default=None, description="The type of retry this run is undergoing."
+ )
@model_validator(mode="before")
@classmethod
@@ -557,6 +559,11 @@ class FlowRun(ObjectBaseModel):
description="A list of tags on the flow run",
examples=[["tag-1", "tag-2"]],
)
+ labels: KeyValueLabels = Field(
+ default_factory=dict,
+ description="Prefect Cloud: A dictionary of key-value labels. Values can be strings, numbers, or booleans.",
+ examples=[{"key": "value1", "key2": 42}],
+ )
parent_task_run_id: Optional[UUID] = Field(
default=None,
description=(
@@ -1177,27 +1184,6 @@ class ConcurrencyLimit(ObjectBaseModel):
)
-class BlockSchema(ObjectBaseModel):
- """An ORM representation of a block schema."""
-
- checksum: str = Field(default=..., description="The block schema's unique checksum")
- fields: Dict[str, Any] = Field(
- default_factory=dict, description="The block schema's field schema"
- )
- block_type_id: Optional[UUID] = Field(default=..., description="A block type ID")
- block_type: Optional[BlockType] = Field(
- default=None, description="The associated block type"
- )
- capabilities: List[str] = Field(
- default_factory=list,
- description="A list of Block capabilities",
- )
- version: str = Field(
- default=DEFAULT_BLOCK_SCHEMA_VERSION,
- description="Human readable identifier for the block schema",
- )
-
-
class BlockSchemaReference(ObjectBaseModel):
"""An ORM representation of a block schema reference."""
@@ -1691,3 +1677,24 @@ class CsrfToken(ObjectBaseModel):
__getattr__ = getattr_migration(__name__)
+
+
+class Integration(PrefectBaseModel):
+ """A representation of an installed Prefect integration."""
+
+ name: str = Field(description="The name of the Prefect integration.")
+ version: str = Field(description="The version of the Prefect integration.")
+
+
+class WorkerMetadata(PrefectBaseModel):
+ """
+ Worker metadata.
+
+ We depend on the structure of `integrations`, but otherwise, worker classes
+ should support flexible metadata.
+ """
+
+ integrations: List[Integration] = Field(
+ default=..., description="Prefect integrations installed in the worker."
+ )
+ model_config = ConfigDict(extra="allow")
diff --git a/src/prefect/context.py b/src/prefect/context.py
index f9eb82db01c1..f4ae26606196 100644
--- a/src/prefect/context.py
+++ b/src/prefect/context.py
@@ -38,7 +38,11 @@
from prefect.client.schemas import FlowRun, TaskRun
from prefect.events.worker import EventsWorker
from prefect.exceptions import MissingContextError
-from prefect.results import ResultStore, get_default_persist_setting
+from prefect.results import (
+ ResultStore,
+ get_default_persist_setting,
+ get_default_persist_setting_for_tasks,
+)
from prefect.settings import Profile, Settings
from prefect.settings.legacy import _get_settings_fields
from prefect.states import State
@@ -397,7 +401,7 @@ class TaskRunContext(RunContext):
# Result handling
result_store: ResultStore
- persist_result: bool = Field(default_factory=get_default_persist_setting)
+ persist_result: bool = Field(default_factory=get_default_persist_setting_for_tasks)
__var__ = ContextVar("task_run")
diff --git a/src/prefect/deployments/runner.py b/src/prefect/deployments/runner.py
index f7af534ce7d3..56b7af2d5682 100644
--- a/src/prefect/deployments/runner.py
+++ b/src/prefect/deployments/runner.py
@@ -77,6 +77,7 @@ def fast_flow():
PREFECT_DEFAULT_WORK_POOL_NAME,
PREFECT_UI_URL,
)
+from prefect.types import ListOfNonEmptyStrings
from prefect.types.entrypoint import EntrypointType
from prefect.utilities.asyncutils import sync_compatible
from prefect.utilities.callables import ParameterSchema, parameter_schema
@@ -140,7 +141,7 @@ class RunnerDeployment(BaseModel):
version: Optional[str] = Field(
default=None, description="An optional version for the deployment."
)
- tags: List[str] = Field(
+ tags: ListOfNonEmptyStrings = Field(
default_factory=list,
description="One of more tags to apply to this deployment.",
)
diff --git a/src/prefect/events/filters.py b/src/prefect/events/filters.py
index 1683c9036b2a..9143c43a8689 100644
--- a/src/prefect/events/filters.py
+++ b/src/prefect/events/filters.py
@@ -83,17 +83,18 @@ def includes(self, event: Event) -> bool:
class EventNameFilter(EventDataFilter):
prefix: Optional[List[str]] = Field(
- None, description="Only include events matching one of these prefixes"
+ default=None, description="Only include events matching one of these prefixes"
)
exclude_prefix: Optional[List[str]] = Field(
- None, description="Exclude events matching one of these prefixes"
+ default=None, description="Exclude events matching one of these prefixes"
)
name: Optional[List[str]] = Field(
- None, description="Only include events matching one of these names exactly"
+ default=None,
+ description="Only include events matching one of these names exactly",
)
exclude_name: Optional[List[str]] = Field(
- None, description="Exclude events matching one of these names exactly"
+ default=None, description="Exclude events matching one of these names exactly"
)
def includes(self, event: Event) -> bool:
@@ -230,17 +231,20 @@ class EventFilter(EventDataFilter):
description="Filter criteria for when the events occurred",
)
event: Optional[EventNameFilter] = Field(
- None,
+ default=None,
description="Filter criteria for the event name",
)
any_resource: Optional[EventAnyResourceFilter] = Field(
- None, description="Filter criteria for any resource involved in the event"
+ default=None,
+ description="Filter criteria for any resource involved in the event",
)
resource: Optional[EventResourceFilter] = Field(
- None, description="Filter criteria for the resource of the event"
+ default=None,
+ description="Filter criteria for the resource of the event",
)
related: Optional[EventRelatedFilter] = Field(
- None, description="Filter criteria for the related resources of the event"
+ default=None,
+ description="Filter criteria for the related resources of the event",
)
id: EventIDFilter = Field(
default_factory=lambda: EventIDFilter(id=[]),
@@ -248,6 +252,6 @@ class EventFilter(EventDataFilter):
)
order: EventOrder = Field(
- EventOrder.DESC,
+ default=EventOrder.DESC,
description="The order to return filtered events",
)
diff --git a/src/prefect/exceptions.py b/src/prefect/exceptions.py
index 8ea4c82fb45c..d30a52756cae 100644
--- a/src/prefect/exceptions.py
+++ b/src/prefect/exceptions.py
@@ -443,3 +443,7 @@ class ProfileSettingsValidationError(PrefectException):
def __init__(self, errors: List[Tuple[Any, ValidationError]]) -> None:
self.errors = errors
+
+
+class HashError(PrefectException):
+ """Raised when hashing objects fails"""
diff --git a/src/prefect/flow_engine.py b/src/prefect/flow_engine.py
index 7d982df61f25..1463afe6fedb 100644
--- a/src/prefect/flow_engine.py
+++ b/src/prefect/flow_engine.py
@@ -22,8 +22,11 @@
)
from uuid import UUID
+from opentelemetry import trace
+from opentelemetry.trace import Tracer, get_tracer
from typing_extensions import ParamSpec
+import prefect
from prefect import Task
from prefect.client.orchestration import SyncPrefectClient, get_client
from prefect.client.schemas import FlowRun, TaskRun
@@ -124,6 +127,10 @@ class FlowRunEngine(Generic[P, R]):
_client: Optional[SyncPrefectClient] = None
short_circuit: bool = False
_flow_run_name_set: bool = False
+ _tracer: Tracer = field(
+ default_factory=lambda: get_tracer("prefect", prefect.__version__)
+ )
+ _span: Optional[trace.Span] = None
def __post_init__(self):
if self.flow is None and self.flow_run_id is None:
@@ -233,6 +240,17 @@ def set_state(self, state: State, force: bool = False) -> State:
self.flow_run.state = state # type: ignore
self.flow_run.state_name = state.name # type: ignore
self.flow_run.state_type = state.type # type: ignore
+
+ if self._span:
+ self._span.add_event(
+ state.name,
+ {
+ "prefect.state.message": state.message or "",
+ "prefect.state.type": state.type,
+ "prefect.state.name": state.name or state.type,
+ "prefect.state.id": str(state.id),
+ },
+ )
return state
def result(self, raise_on_failure: bool = True) -> "Union[R, State, None]":
@@ -281,6 +299,9 @@ def handle_success(self, result: R) -> R:
)
self.set_state(terminal_state)
self._return_value = resolved_result
+
+ self._end_span_on_success()
+
return result
def handle_exception(
@@ -311,6 +332,9 @@ def handle_exception(
)
state = self.set_state(Running())
self._raised = exc
+
+ self._end_span_on_error(exc, state.message)
+
return state
def handle_timeout(self, exc: TimeoutError) -> None:
@@ -329,6 +353,8 @@ def handle_timeout(self, exc: TimeoutError) -> None:
self.set_state(state)
self._raised = exc
+ self._end_span_on_error(exc, message)
+
def handle_crash(self, exc: BaseException) -> None:
state = run_coro_as_sync(exception_to_crashed_state(exc))
self.logger.error(f"Crash detected! {state.message}")
@@ -336,6 +362,23 @@ def handle_crash(self, exc: BaseException) -> None:
self.set_state(state, force=True)
self._raised = exc
+ self._end_span_on_error(exc, state.message)
+
+ def _end_span_on_success(self):
+ if not self._span:
+ return
+ self._span.set_status(trace.Status(trace.StatusCode.OK))
+ self._span.end(time.time_ns())
+ self._span = None
+
+ def _end_span_on_error(self, exc: BaseException, description: Optional[str]):
+ if not self._span:
+ return
+ self._span.record_exception(exc)
+ self._span.set_status(trace.Status(trace.StatusCode.ERROR, description))
+ self._span.end(time.time_ns())
+ self._span = None
+
def load_subflow_run(
self,
parent_task_run: TaskRun,
@@ -578,6 +621,18 @@ def initialize_run(self):
flow_version=self.flow.version,
empirical_policy=self.flow_run.empirical_policy,
)
+
+ self._span = self._tracer.start_span(
+ name=self.flow_run.name,
+ attributes={
+ **self.flow_run.labels,
+ "prefect.run.type": "flow",
+ "prefect.run.id": str(self.flow_run.id),
+ "prefect.tags": self.flow_run.tags,
+ "prefect.flow.name": self.flow.name,
+ },
+ )
+
try:
yield self
@@ -632,7 +687,7 @@ def cancel_all_tasks(self):
@contextmanager
def start(self) -> Generator[None, None, None]:
- with self.initialize_run():
+ with self.initialize_run(), trace.use_span(self._span):
self.begin_run()
if self.state.is_running():
diff --git a/src/prefect/logging/handlers.py b/src/prefect/logging/handlers.py
index 863cac5a8cda..0e254a9e58a9 100644
--- a/src/prefect/logging/handlers.py
+++ b/src/prefect/logging/handlers.py
@@ -180,6 +180,7 @@ def prepare(self, record: logging.LogRecord) -> Dict[str, Any]:
"""
flow_run_id = getattr(record, "flow_run_id", None)
task_run_id = getattr(record, "task_run_id", None)
+ worker_id = getattr(record, "worker_id", None)
if not flow_run_id:
try:
@@ -215,6 +216,7 @@ def prepare(self, record: logging.LogRecord) -> Dict[str, Any]:
log = LogCreate(
flow_run_id=flow_run_id if is_uuid_like else None,
task_run_id=task_run_id,
+ worker_id=worker_id,
name=record.name,
level=record.levelno,
timestamp=pendulum.from_timestamp(
@@ -236,6 +238,46 @@ def _get_payload_size(self, log: Dict[str, Any]) -> int:
return len(json.dumps(log).encode())
+class WorkerAPILogHandler(APILogHandler):
+ def emit(self, record: logging.LogRecord):
+ # Open-source API servers do not currently support worker logs, and
+ # worker logs only have an associated worker ID when connected to Cloud,
+ # so we won't send worker logs to the API unless they have a worker ID.
+ if not getattr(record, "worker_id", None):
+ return
+ super().emit(record)
+
+ def prepare(self, record: logging.LogRecord) -> Dict[str, Any]:
+ """
+ Convert a `logging.LogRecord` to the API `LogCreate` schema and serialize.
+
+ This will add in the worker id to the log.
+
+ Logs exceeding the maximum size will be dropped.
+ """
+
+ worker_id = getattr(record, "worker_id", None)
+
+ log = LogCreate(
+ worker_id=worker_id,
+ name=record.name,
+ level=record.levelno,
+ timestamp=pendulum.from_timestamp(
+ getattr(record, "created", None) or time.time()
+ ),
+ message=self.format(record),
+ ).model_dump(mode="json")
+
+ log_size = log["__payload_size__"] = self._get_payload_size(log)
+ if log_size > PREFECT_LOGGING_TO_API_MAX_LOG_SIZE.value():
+ raise ValueError(
+ f"Log of size {log_size} is greater than the max size of "
+ f"{PREFECT_LOGGING_TO_API_MAX_LOG_SIZE.value()}"
+ )
+
+ return log
+
+
class PrefectConsoleHandler(logging.StreamHandler):
def __init__(
self,
diff --git a/src/prefect/logging/loggers.py b/src/prefect/logging/loggers.py
index 724574402860..0f6d8b6f0a2b 100644
--- a/src/prefect/logging/loggers.py
+++ b/src/prefect/logging/loggers.py
@@ -12,6 +12,7 @@
import prefect
from prefect.exceptions import MissingContextError
from prefect.logging.filters import ObfuscateApiKeyFilter
+from prefect.telemetry.logging import add_telemetry_log_handler
if TYPE_CHECKING:
from prefect.client.schemas import FlowRun as ClientFlowRun
@@ -19,6 +20,7 @@
from prefect.context import RunContext
from prefect.flows import Flow
from prefect.tasks import Task
+ from prefect.workers.base import BaseWorker
class PrefectLogAdapter(logging.LoggerAdapter):
@@ -75,6 +77,8 @@ def get_logger(name: Optional[str] = None) -> logging.Logger:
obfuscate_api_key_filter = ObfuscateApiKeyFilter()
logger.addFilter(obfuscate_api_key_filter)
+ add_telemetry_log_handler(logger=logger)
+
return logger
@@ -136,6 +140,12 @@ def get_run_logger(
else:
raise MissingContextError("There is no active flow or task run context.")
+ if isinstance(logger, logging.LoggerAdapter):
+ assert isinstance(logger.logger, logging.Logger)
+ add_telemetry_log_handler(logger.logger)
+ else:
+ add_telemetry_log_handler(logger)
+
return logger
@@ -205,6 +215,29 @@ def task_run_logger(
)
+def get_worker_logger(worker: "BaseWorker", name: Optional[str] = None):
+ """
+ Create a worker logger with the worker's metadata attached.
+
+ If the worker has a backend_id, it will be attached to the log records.
+ If the worker does not have a backend_id a basic logger will be returned.
+ If the worker does not have a backend_id attribute, a basic logger will be returned.
+ """
+
+ worker_log_name = name or f"workers.{worker.__class__.type}.{worker.name.lower()}"
+
+ worker_id = getattr(worker, "backend_id", None)
+ if worker_id:
+ return PrefectLogAdapter(
+ get_logger(worker_log_name),
+ extra={
+ "worker_id": str(worker.backend_id),
+ },
+ )
+ else:
+ return get_logger(worker_log_name)
+
+
@contextmanager
def disable_logger(name: str):
"""
diff --git a/src/prefect/logging/logging.yml b/src/prefect/logging/logging.yml
index 5ac46a0eb1d5..c38d740c5622 100644
--- a/src/prefect/logging/logging.yml
+++ b/src/prefect/logging/logging.yml
@@ -69,6 +69,10 @@ handlers:
class: logging.StreamHandler
formatter: debug
+ worker_api:
+ level: 0
+ class: prefect.logging.handlers.WorkerAPILogHandler
+
loggers:
prefect:
level: "${PREFECT_LOGGING_LEVEL}"
@@ -86,6 +90,10 @@ loggers:
level: NOTSET
handlers: [api]
+ prefect.workers:
+ level: NOTSET
+ handlers: [worker_api]
+
prefect.server:
level: "${PREFECT_SERVER_LOGGING_LEVEL}"
@@ -102,9 +110,13 @@ loggers:
uvicorn:
level: "${PREFECT_SERVER_LOGGING_LEVEL}"
+ handlers: [console]
+ propagate: false
fastapi:
level: "${PREFECT_SERVER_LOGGING_LEVEL}"
+ handlers: [console]
+ propagate: false
# The root logger: any logger without propagation disabled sends to here as well
root:
diff --git a/src/prefect/main.py b/src/prefect/main.py
index 27757e156b2f..46ffa2d3d180 100644
--- a/src/prefect/main.py
+++ b/src/prefect/main.py
@@ -57,6 +57,11 @@
f"Using profile {prefect.context.get_settings_context().profile.name!r}"
)
+# Configure telemetry
+import prefect.telemetry.bootstrap
+
+prefect.telemetry.bootstrap.setup_telemetry()
+
from prefect._internal.compatibility.deprecated import (
inject_renamed_module_alias_finder,
diff --git a/src/prefect/results.py b/src/prefect/results.py
index d0a5cdc1ea71..dd17f614953d 100644
--- a/src/prefect/results.py
+++ b/src/prefect/results.py
@@ -56,13 +56,7 @@
from prefect.locking.protocol import LockManager
from prefect.logging import get_logger
from prefect.serializers import PickleSerializer, Serializer
-from prefect.settings import (
- PREFECT_DEFAULT_RESULT_STORAGE_BLOCK,
- PREFECT_LOCAL_STORAGE_PATH,
- PREFECT_RESULTS_DEFAULT_SERIALIZER,
- PREFECT_RESULTS_PERSIST_BY_DEFAULT,
- PREFECT_TASK_SCHEDULING_DEFAULT_STORAGE_BLOCK,
-)
+from prefect.settings.context import get_current_settings
from prefect.utilities.annotations import NotSet
from prefect.utilities.asyncutils import sync_compatible
from prefect.utilities.pydantic import get_dispatch_key, lookup_type, register_base_type
@@ -94,8 +88,9 @@ async def get_default_result_storage() -> WritableFileSystem:
"""
Generate a default file system for result storage.
"""
- default_block = PREFECT_DEFAULT_RESULT_STORAGE_BLOCK.value()
- basepath = PREFECT_LOCAL_STORAGE_PATH.value()
+ settings = get_current_settings()
+ default_block = settings.results.default_storage_block
+ basepath = settings.results.local_storage_path
cache_key = (str(default_block), str(basepath))
@@ -169,13 +164,14 @@ async def get_or_create_default_task_scheduling_storage() -> ResultStorage:
"""
Generate a default file system for background task parameter/result storage.
"""
- default_block = PREFECT_TASK_SCHEDULING_DEFAULT_STORAGE_BLOCK.value()
+ settings = get_current_settings()
+ default_block = settings.tasks.scheduling.default_storage_block
if default_block is not None:
return await Block.load(default_block)
# otherwise, use the local file system
- basepath = PREFECT_LOCAL_STORAGE_PATH.value()
+ basepath = settings.results.local_storage_path
return LocalFileSystem(basepath=basepath)
@@ -183,22 +179,36 @@ def get_default_result_serializer() -> Serializer:
"""
Generate a default file system for result storage.
"""
- return resolve_serializer(PREFECT_RESULTS_DEFAULT_SERIALIZER.value())
+ settings = get_current_settings()
+ return resolve_serializer(settings.results.default_serializer)
def get_default_persist_setting() -> bool:
"""
- Return the default option for result persistence (False).
+ Return the default option for result persistence.
+ """
+ settings = get_current_settings()
+ return settings.results.persist_by_default
+
+
+def get_default_persist_setting_for_tasks() -> bool:
+ """
+ Return the default option for result persistence for tasks.
"""
- return PREFECT_RESULTS_PERSIST_BY_DEFAULT.value()
+ settings = get_current_settings()
+ return (
+ settings.tasks.default_persist_result
+ if settings.tasks.default_persist_result is not None
+ else settings.results.persist_by_default
+ )
def should_persist_result() -> bool:
"""
Return the default option for result persistence determined by the current run context.
- If there is no current run context, the default value set by
- `PREFECT_RESULTS_PERSIST_BY_DEFAULT` will be returned.
+ If there is no current run context, the value of `results.persist_by_default` on the
+ current settings will be returned.
"""
from prefect.context import FlowRunContext, TaskRunContext
@@ -209,7 +219,7 @@ def should_persist_result() -> bool:
if flow_run_context is not None:
return flow_run_context.persist_result
- return PREFECT_RESULTS_PERSIST_BY_DEFAULT.value()
+ return get_default_persist_setting()
def _format_user_supplied_storage_key(key: str) -> str:
diff --git a/src/prefect/server/api/server.py b/src/prefect/server/api/server.py
index e9f2ac106d47..6feab7308de7 100644
--- a/src/prefect/server/api/server.py
+++ b/src/prefect/server/api/server.py
@@ -477,7 +477,7 @@ def create_app(
ignore_cache: bool = False,
) -> FastAPI:
"""
- Create an FastAPI app that includes the Prefect REST API and UI
+ Create a FastAPI app that includes the Prefect REST API and UI
Args:
settings: The settings to use to create the app. If not set, settings are pulled
diff --git a/src/prefect/server/api/variables.py b/src/prefect/server/api/variables.py
index a2ff7fed276a..7b755d6b8b52 100644
--- a/src/prefect/server/api/variables.py
+++ b/src/prefect/server/api/variables.py
@@ -5,6 +5,7 @@
from typing import List, Optional
from uuid import UUID
+import sqlalchemy as sa
from fastapi import Body, Depends, HTTPException, Path, status
from sqlalchemy.ext.asyncio import AsyncSession
@@ -50,9 +51,15 @@ async def create_variable(
db: PrefectDBInterface = Depends(provide_database_interface),
) -> core.Variable:
async with db.session_context(begin_transaction=True) as session:
- model = await models.variables.create_variable(
- session=session, variable=variable
- )
+ try:
+ model = await models.variables.create_variable(
+ session=session, variable=variable
+ )
+ except sa.exc.IntegrityError:
+ raise HTTPException(
+ status_code=409,
+ detail=f"A variable with the name {variable.name!r} already exists.",
+ )
return core.Variable.model_validate(model, from_attributes=True)
diff --git a/src/prefect/server/database/configurations.py b/src/prefect/server/database/configurations.py
index 1d40a44cf776..9721f16d7dfc 100644
--- a/src/prefect/server/database/configurations.py
+++ b/src/prefect/server/database/configurations.py
@@ -204,7 +204,6 @@ async def engine(self) -> AsyncEngine:
connect_args["timeout"] = self.connection_timeout
if connect_args:
- connect_args["server_settings"] = {"jit": "off"}
kwargs["connect_args"] = connect_args
if self.sqlalchemy_pool_size is not None:
diff --git a/src/prefect/server/database/migrations/versions/sqlite/2022_04_23_114831_fd966d4ad99c_rename_block_to_blockbasis_and_.py b/src/prefect/server/database/migrations/versions/sqlite/2022_04_23_114831_fd966d4ad99c_rename_block_to_blockbasis_and_.py
index a80a2d91514f..beb48e8ec5ec 100644
--- a/src/prefect/server/database/migrations/versions/sqlite/2022_04_23_114831_fd966d4ad99c_rename_block_to_blockbasis_and_.py
+++ b/src/prefect/server/database/migrations/versions/sqlite/2022_04_23_114831_fd966d4ad99c_rename_block_to_blockbasis_and_.py
@@ -16,22 +16,30 @@
def upgrade():
+ # First drop the foreign key constraints
+ with op.batch_alter_table("block", schema=None) as batch_op:
+ batch_op.drop_constraint("fk_block__block_spec_id__block_spec")
+
+ # Then rename the tables
op.rename_table("block_spec", "block_schema")
op.rename_table("block", "block_document")
+ # Handle indexes and column renames for block_document
with op.batch_alter_table("block_document", schema=None) as batch_op:
+ # Drop indexes first
batch_op.drop_index("ix_block__is_default_storage_block")
batch_op.drop_index("ix_block__name")
batch_op.drop_index("ix_block__updated")
batch_op.drop_index("uq_block__spec_id_name")
+
+ # Rename columns
batch_op.alter_column("block_spec_id", new_column_name="block_schema_id")
batch_op.alter_column(
"is_default_storage_block",
new_column_name="is_default_storage_block_document",
)
- batch_op.drop_constraint("fk_block__block_spec_id__block_spec")
- batch_op.drop_constraint("pk_block_data")
+ # Create new indexes
with op.batch_alter_table("block_document", schema=None) as batch_op:
batch_op.create_index(
batch_op.f("ix_block_document__is_default_storage_block_document"),
@@ -48,6 +56,15 @@ def upgrade():
"uq_block__schema_id_name", ["block_schema_id", "name"], unique=True
)
+ # Re-create foreign key at the end
+ batch_op.create_foreign_key(
+ batch_op.f("fk_block__block_schema_id__block_schema"),
+ "block_schema",
+ ["block_schema_id"],
+ ["id"],
+ ondelete="cascade",
+ )
+
with op.batch_alter_table("block_schema", schema=None) as batch_op:
batch_op.drop_index("ix_block_spec__type")
batch_op.drop_index("ix_block_spec__updated")
diff --git a/src/prefect/server/database/migrations/versions/sqlite/2024_09_16_162719_4ad4658cbefe_add_deployment_to_global_concurrency_.py b/src/prefect/server/database/migrations/versions/sqlite/2024_09_16_162719_4ad4658cbefe_add_deployment_to_global_concurrency_.py
index bd3a6c197ebb..edb13553a91f 100644
--- a/src/prefect/server/database/migrations/versions/sqlite/2024_09_16_162719_4ad4658cbefe_add_deployment_to_global_concurrency_.py
+++ b/src/prefect/server/database/migrations/versions/sqlite/2024_09_16_162719_4ad4658cbefe_add_deployment_to_global_concurrency_.py
@@ -5,6 +5,7 @@
Create Date: 2024-09-16 16:27:19.451150
"""
+
import sqlalchemy as sa
from alembic import op
@@ -37,16 +38,18 @@ def upgrade():
# migrate existing data
sql = sa.text(
"""
- WITH deployment_limit_mapping AS (
- SELECT d.id AS deployment_id, l.id AS limit_id
- FROM deployment d
- JOIN concurrency_limit_v2 l ON l.name = 'deployment:' || d.id
- )
- UPDATE deployment
- SET concurrency_limit_id = dlm.limit_id
- FROM deployment_limit_mapping dlm
- WHERE deployment.id = dlm.deployment_id;
- """
+ UPDATE deployment
+ SET concurrency_limit_id = (
+ SELECT l.id
+ FROM concurrency_limit_v2 l
+ WHERE l.name = 'deployment:' || deployment.id
+ )
+ WHERE EXISTS (
+ SELECT 1
+ FROM concurrency_limit_v2 l
+ WHERE l.name = 'deployment:' || deployment.id
+ );
+ """
)
op.execute(sql)
diff --git a/src/prefect/server/database/query_components.py b/src/prefect/server/database/query_components.py
index 0c469ec27859..6aaad5c76251 100644
--- a/src/prefect/server/database/query_components.py
+++ b/src/prefect/server/database/query_components.py
@@ -947,8 +947,15 @@ async def flow_run_graph_v2(
end_time=row.end_time,
parents=[Edge(id=id) for id in row.parent_ids or []],
children=[Edge(id=id) for id in row.child_ids or []],
+ # ensure encapsulating_ids is deduplicated
+ # so parents only show up once
encapsulating=[
- Edge(id=id) for id in row.encapsulating_ids or []
+ Edge(id=id)
+ for id in (
+ list(set(row.encapsulating_ids))
+ if row.encapsulating_ids
+ else []
+ )
],
artifacts=graph_artifacts.get(row.id, []),
),
@@ -1427,7 +1434,13 @@ def time(
end_time=time(row.end_time),
parents=edges(row.parent_ids),
children=edges(row.child_ids),
- encapsulating=edges(row.encapsulating_ids),
+ # ensure encapsulating_ids is deduplicated
+ # so parents only show up once
+ encapsulating=edges(
+ list(set(row.encapsulating_ids.split(",")))
+ if row.encapsulating_ids
+ else None
+ ),
artifacts=graph_artifacts.get(UUID(row.id), []),
),
)
diff --git a/src/prefect/server/database/sql/postgres/get-runs-from-worker-queues.sql.jinja b/src/prefect/server/database/sql/postgres/get-runs-from-worker-queues.sql.jinja
index f165d9770116..c5636b721877 100644
--- a/src/prefect/server/database/sql/postgres/get-runs-from-worker-queues.sql.jinja
+++ b/src/prefect/server/database/sql/postgres/get-runs-from-worker-queues.sql.jinja
@@ -65,6 +65,7 @@ FROM
WHERE
fr.work_queue_id = wq.id
AND fr.state_type = 'SCHEDULED'
+ AND (fr.empirical_policy->>'retry_type' IS NULL OR fr.empirical_policy->>'retry_type' != 'in_process')
{% if scheduled_after %}
AND fr.next_scheduled_start_time >= :scheduled_after
{% endif %}
diff --git a/src/prefect/server/database/sql/sqlite/get-runs-from-worker-queues.sql.jinja b/src/prefect/server/database/sql/sqlite/get-runs-from-worker-queues.sql.jinja
index 253464fe496c..366fd8ef2135 100644
--- a/src/prefect/server/database/sql/sqlite/get-runs-from-worker-queues.sql.jinja
+++ b/src/prefect/server/database/sql/sqlite/get-runs-from-worker-queues.sql.jinja
@@ -53,6 +53,7 @@ scheduled_flow_runs AS (
ROW_NUMBER() OVER (PARTITION BY work_queue_id ORDER BY next_scheduled_start_time) AS work_queue_rank
FROM flow_run fr
WHERE fr.state_type = 'SCHEDULED'
+ AND (json_extract(fr.empirical_policy, '$.retry_type') IS NULL OR json_extract(fr.empirical_policy, '$.retry_type') != 'in_process')
{% if scheduled_after %}
AND fr.next_scheduled_start_time >= :scheduled_after
{% endif %}
diff --git a/src/prefect/server/orchestration/core_policy.py b/src/prefect/server/orchestration/core_policy.py
index 610173b2f92f..ac9281597d61 100644
--- a/src/prefect/server/orchestration/core_policy.py
+++ b/src/prefect/server/orchestration/core_policy.py
@@ -595,6 +595,7 @@ async def before_transition(
updated_policy = context.run.empirical_policy.model_dump()
updated_policy["resuming"] = False
updated_policy["pause_keys"] = set()
+ updated_policy["retry_type"] = "in_process"
context.run.empirical_policy = core.FlowRunPolicy(**updated_policy)
# Generate a new state for the flow
@@ -1067,6 +1068,10 @@ async def before_transition(
updated_policy = context.run.empirical_policy.model_dump()
updated_policy["resuming"] = False
updated_policy["pause_keys"] = set()
+ if proposed_state.is_scheduled():
+ updated_policy["retry_type"] = "reschedule"
+ else:
+ updated_policy["retry_type"] = None
context.run.empirical_policy = core.FlowRunPolicy(**updated_policy)
async def cleanup(
diff --git a/src/prefect/server/schemas/core.py b/src/prefect/server/schemas/core.py
index b80560ae2f81..c6d3a692fb24 100644
--- a/src/prefect/server/schemas/core.py
+++ b/src/prefect/server/schemas/core.py
@@ -93,8 +93,6 @@ class Flow(ORMBaseModel):
class FlowRunPolicy(PrefectBaseModel):
"""Defines of how a flow run should retry."""
- # TODO: Determine how to separate between infrastructure and within-process level
- # retries
max_retries: int = Field(
default=0,
description=(
@@ -121,6 +119,9 @@ class FlowRunPolicy(PrefectBaseModel):
resuming: Optional[bool] = Field(
default=False, description="Indicates if this run is resuming from a pause."
)
+ retry_type: Optional[Literal["in_process", "reschedule"]] = Field(
+ default=None, description="The type of retry this run is undergoing."
+ )
@model_validator(mode="before")
def populate_deprecated_fields(cls, values):
diff --git a/src/prefect/server/utilities/messaging/memory.py b/src/prefect/server/utilities/messaging/memory.py
index 1d151cb6a660..e971dca00b46 100644
--- a/src/prefect/server/utilities/messaging/memory.py
+++ b/src/prefect/server/utilities/messaging/memory.py
@@ -1,7 +1,9 @@
import asyncio
+import copy
from contextlib import asynccontextmanager
-from dataclasses import dataclass
+from dataclasses import asdict, dataclass
from datetime import timedelta
+from pathlib import Path
from typing import (
Any,
AsyncGenerator,
@@ -12,8 +14,11 @@
TypeVar,
Union,
)
+from uuid import uuid4
+import anyio
from cachetools import TTLCache
+from pydantic_core import to_json
from typing_extensions import Self
from prefect.logging import get_logger
@@ -21,6 +26,7 @@
from prefect.server.utilities.messaging import Consumer as _Consumer
from prefect.server.utilities.messaging import Message, MessageHandler, StopConsumer
from prefect.server.utilities.messaging import Publisher as _Publisher
+from prefect.settings.context import get_current_settings
logger = get_logger(__name__)
@@ -29,29 +35,101 @@
class MemoryMessage:
data: Union[bytes, str]
attributes: Dict[str, Any]
+ retry_count: int = 0
class Subscription:
- topic: "Topic"
- _queue: asyncio.Queue
- _retry: asyncio.Queue
-
- def __init__(self, topic: "Topic") -> None:
+ """
+ A subscription to a topic.
+
+ Messages are delivered to the subscription's queue and retried up to a
+ maximum number of times. If a message cannot be delivered after the maximum
+ number of retries it is moved to the dead letter queue.
+
+ The dead letter queue is a directory of JSON files containing the serialized
+ message.
+
+ Messages remain in the dead letter queue until they are removed manually.
+
+ Attributes:
+ topic: The topic that the subscription receives messages from.
+ max_retries: The maximum number of times a message will be retried for
+ this subscription.
+ dead_letter_queue_path: The path to the dead letter queue folder.
+ """
+
+ def __init__(
+ self,
+ topic: "Topic",
+ max_retries: int = 3,
+ dead_letter_queue_path: Union[Path, str, None] = None,
+ ) -> None:
self.topic = topic
+ self.max_retries = max_retries
+ self.dead_letter_queue_path = (
+ Path(dead_letter_queue_path)
+ if dead_letter_queue_path
+ else get_current_settings().home / "dlq"
+ )
self._queue = asyncio.Queue()
self._retry = asyncio.Queue()
async def deliver(self, message: MemoryMessage) -> None:
+ """
+ Deliver a message to the subscription's queue.
+
+ Args:
+ message: The message to deliver.
+ """
await self._queue.put(message)
async def retry(self, message: MemoryMessage) -> None:
- await self._retry.put(message)
+ """
+ Place a message back on the retry queue.
+
+ If the message has retried more than the maximum number of times it is
+ moved to the dead letter queue.
+
+ Args:
+ message: The message to retry.
+ """
+ message.retry_count += 1
+ if message.retry_count > self.max_retries:
+ logger.warning(
+ "Message failed after %d retries and will be moved to the dead letter queue",
+ message.retry_count,
+ extra={"event_message": message},
+ )
+ await self.send_to_dead_letter_queue(message)
+ else:
+ await self._retry.put(message)
async def get(self) -> MemoryMessage:
+ """
+ Get a message from the subscription's queue.
+ """
if self._retry.qsize() > 0:
return await self._retry.get()
return await self._queue.get()
+ async def send_to_dead_letter_queue(self, message: MemoryMessage) -> None:
+ """
+ Send a message to the dead letter queue.
+
+ The dead letter queue is a directory of JSON files containing the
+ serialized messages.
+
+ Args:
+ message: The message to send to the dead letter queue.
+ """
+ self.dead_letter_queue_path.mkdir(parents=True, exist_ok=True)
+ try:
+ await anyio.Path(self.dead_letter_queue_path / uuid4().hex).write_bytes(
+ to_json(asdict(message))
+ )
+ except Exception as e:
+ logger.warning("Failed to write message to dead letter queue", exc_info=e)
+
class Topic:
_topics: Dict[str, "Topic"] = {}
@@ -93,7 +171,8 @@ def clear(self):
async def publish(self, message: MemoryMessage) -> None:
for subscription in self._subscriptions:
- await subscription.deliver(message)
+ # Ensure that each subscription gets its own copy of the message
+ await subscription.deliver(copy.deepcopy(message))
@asynccontextmanager
diff --git a/src/prefect/server/utilities/schemas/bases.py b/src/prefect/server/utilities/schemas/bases.py
index da871d38c862..b50073128a59 100644
--- a/src/prefect/server/utilities/schemas/bases.py
+++ b/src/prefect/server/utilities/schemas/bases.py
@@ -109,9 +109,12 @@ def reset_fields(self: Self) -> Self:
Returns:
PrefectBaseModel: A new instance of the model with the reset fields.
"""
+ data = self.model_dump()
return self.model_copy(
update={
- field: self.model_fields[field].get_default(call_default_factory=True)
+ field: self.model_fields[field].get_default(
+ call_default_factory=True, validated_data=data
+ )
for field in self._reset_fields
}
)
diff --git a/src/prefect/settings/base.py b/src/prefect/settings/base.py
index e0088733f621..3fa42020bcc6 100644
--- a/src/prefect/settings/base.py
+++ b/src/prefect/settings/base.py
@@ -1,3 +1,4 @@
+import inspect
from functools import partial
from typing import Any, Dict, Tuple, Type
@@ -8,9 +9,19 @@
SerializerFunctionWrapHandler,
model_serializer,
)
-from pydantic_settings import BaseSettings, PydanticBaseSettingsSource
+from pydantic_settings import (
+ BaseSettings,
+ PydanticBaseSettingsSource,
+ SettingsConfigDict,
+)
-from prefect.settings.sources import EnvFilterSettingsSource, ProfileSettingsTomlLoader
+from prefect.settings.sources import (
+ EnvFilterSettingsSource,
+ FilteredDotEnvSettingsSource,
+ PrefectTomlConfigSettingsSource,
+ ProfileSettingsTomlLoader,
+ PyprojectTomlConfigSettingsSource,
+)
from prefect.utilities.collections import visit_collection
from prefect.utilities.pydantic import handle_secret_render
@@ -33,13 +44,14 @@ def settings_customise_sources(
See https://docs.pydantic.dev/latest/concepts/pydantic_settings/#customise-settings-sources
"""
env_filter = set()
- for field in settings_cls.model_fields.values():
+ for field_name, field in settings_cls.model_fields.items():
if field.validation_alias is not None and isinstance(
field.validation_alias, AliasChoices
):
for alias in field.validation_alias.choices:
if isinstance(alias, AliasPath) and len(alias.path) > 0:
env_filter.add(alias.path[0])
+ env_filter.add(field_name)
return (
init_settings,
EnvFilterSettingsSource(
@@ -52,8 +64,21 @@ def settings_customise_sources(
env_parse_enums=cls.model_config.get("env_parse_enums"),
env_filter=list(env_filter),
),
- dotenv_settings,
+ FilteredDotEnvSettingsSource(
+ settings_cls,
+ env_file=cls.model_config.get("env_file"),
+ env_file_encoding=cls.model_config.get("env_file_encoding"),
+ case_sensitive=cls.model_config.get("case_sensitive"),
+ env_prefix=cls.model_config.get("env_prefix"),
+ env_nested_delimiter=cls.model_config.get("env_nested_delimiter"),
+ env_ignore_empty=cls.model_config.get("env_ignore_empty"),
+ env_parse_none_str=cls.model_config.get("env_parse_none_str"),
+ env_parse_enums=cls.model_config.get("env_parse_enums"),
+ env_blacklist=list(env_filter),
+ ),
file_secret_settings,
+ PrefectTomlConfigSettingsSource(settings_cls),
+ PyprojectTomlConfigSettingsSource(settings_cls),
ProfileSettingsTomlLoader(settings_cls),
)
@@ -80,7 +105,7 @@ def to_environment_variables(
elif (value := env.get(key)) is not None:
env_variables[
f"{self.model_config.get('env_prefix')}{key.upper()}"
- ] = str(value)
+ ] = _to_environment_variable_value(value)
return env_variables
@model_serializer(
@@ -105,7 +130,7 @@ def ser_model(
if isinstance(child_settings := getattr(self, key), PrefectBaseSettings):
child_jsonable = child_settings.model_dump(
mode=info.mode,
- include=child_include,
+ include=child_include, # type: ignore
exclude=child_exclude,
exclude_unset=info.exclude_unset,
context=info.context,
@@ -129,3 +154,59 @@ def ser_model(
)
return jsonable_self
+
+
+class PrefectSettingsConfigDict(SettingsConfigDict, total=False):
+ """
+ Configuration for the behavior of Prefect settings models.
+ """
+
+ prefect_toml_table_header: tuple[str, ...]
+ """
+ Header of the TOML table within a prefect.toml file to use when filling variables.
+ This is supplied as a `tuple[str, ...]` instead of a `str` to accommodate for headers
+ containing a `.`.
+
+ To use the root table, exclude this config setting or provide an empty tuple.
+ """
+
+
+def _add_environment_variables(
+ schema: Dict[str, Any], model: Type[PrefectBaseSettings]
+) -> None:
+ for property in schema["properties"]:
+ env_vars = []
+ schema["properties"][property]["supported_environment_variables"] = env_vars
+ field = model.model_fields[property]
+ if inspect.isclass(field.annotation) and issubclass(
+ field.annotation, PrefectBaseSettings
+ ):
+ continue
+ elif field.validation_alias:
+ if isinstance(field.validation_alias, AliasChoices):
+ for alias in field.validation_alias.choices:
+ if isinstance(alias, str):
+ env_vars.append(alias.upper())
+ else:
+ env_vars.append(f"{model.model_config.get('env_prefix')}{property.upper()}")
+
+
+def _build_settings_config(
+ path: Tuple[str, ...] = tuple(),
+) -> PrefectSettingsConfigDict:
+ env_prefix = f"PREFECT_{'_'.join(path).upper()}_" if path else "PREFECT_"
+ return PrefectSettingsConfigDict(
+ env_prefix=env_prefix,
+ env_file=".env",
+ extra="ignore",
+ toml_file="prefect.toml",
+ prefect_toml_table_header=path,
+ pyproject_toml_table_header=("tool", "prefect", *path),
+ json_schema_extra=_add_environment_variables,
+ )
+
+
+def _to_environment_variable_value(value: Any) -> str:
+ if isinstance(value, (list, set, tuple)):
+ return ",".join(str(v) for v in value)
+ return str(value)
diff --git a/src/prefect/settings/legacy.py b/src/prefect/settings/legacy.py
index fb6f62e55cca..17f76e3f1404 100644
--- a/src/prefect/settings/legacy.py
+++ b/src/prefect/settings/legacy.py
@@ -64,7 +64,7 @@ def value_from(self: Self, settings: "Settings") -> Any:
for key in path:
current_value = getattr(current_value, key, None)
if isinstance(current_value, _SECRET_TYPES):
- return current_value.get_secret_value()
+ return current_value.get_secret_value() # type: ignore
return current_value
def __bool__(self) -> bool:
diff --git a/src/prefect/settings/models/api.py b/src/prefect/settings/models/api.py
index 02c278edd19b..2ece448009a4 100644
--- a/src/prefect/settings/models/api.py
+++ b/src/prefect/settings/models/api.py
@@ -2,9 +2,11 @@
from typing import Optional
from pydantic import Field, SecretStr
-from pydantic_settings import SettingsConfigDict
-from prefect.settings.base import PrefectBaseSettings
+from prefect.settings.base import (
+ PrefectBaseSettings,
+ _build_settings_config,
+)
class APISettings(PrefectBaseSettings):
@@ -12,9 +14,7 @@ class APISettings(PrefectBaseSettings):
Settings for interacting with the Prefect API
"""
- model_config = SettingsConfigDict(
- env_prefix="PREFECT_API_", env_file=".env", extra="ignore"
- )
+ model_config = _build_settings_config(("api",))
url: Optional[str] = Field(
default=None,
description="The URL of the Prefect API. If not set, the client will attempt to infer it.",
@@ -25,7 +25,7 @@ class APISettings(PrefectBaseSettings):
)
tls_insecure_skip_verify: bool = Field(
default=False,
- description="If `True`, disables SSL checking to allow insecure requests. This is recommended only during development, e.g. when using self-signed certificates.",
+ description="If `True`, disables SSL checking to allow insecure requests. Setting to False is recommended only during development. For example, when using self-signed certificates.",
)
ssl_cert_file: Optional[str] = Field(
default=os.environ.get("SSL_CERT_FILE"),
diff --git a/src/prefect/settings/models/cli.py b/src/prefect/settings/models/cli.py
index 3072674f444e..47afad2ed509 100644
--- a/src/prefect/settings/models/cli.py
+++ b/src/prefect/settings/models/cli.py
@@ -1,9 +1,11 @@
from typing import Optional
from pydantic import Field
-from pydantic_settings import SettingsConfigDict
-from prefect.settings.base import PrefectBaseSettings
+from prefect.settings.base import (
+ PrefectBaseSettings,
+ _build_settings_config,
+)
class CLISettings(PrefectBaseSettings):
@@ -11,9 +13,7 @@ class CLISettings(PrefectBaseSettings):
Settings for controlling CLI behavior
"""
- model_config = SettingsConfigDict(
- env_prefix="PREFECT_CLI_", env_file=".env", extra="ignore"
- )
+ model_config = _build_settings_config(("cli",))
colors: bool = Field(
default=True,
diff --git a/src/prefect/settings/models/client.py b/src/prefect/settings/models/client.py
index 88289e18b6ac..e8f596f87eb5 100644
--- a/src/prefect/settings/models/client.py
+++ b/src/prefect/settings/models/client.py
@@ -1,7 +1,9 @@
from pydantic import AliasChoices, AliasPath, Field
-from pydantic_settings import SettingsConfigDict
-from prefect.settings.base import PrefectBaseSettings
+from prefect.settings.base import (
+ PrefectBaseSettings,
+ _build_settings_config,
+)
from prefect.types import ClientRetryExtraCodes
@@ -10,9 +12,7 @@ class ClientMetricsSettings(PrefectBaseSettings):
Settings for controlling metrics reporting from the client
"""
- model_config = SettingsConfigDict(
- env_prefix="PREFECT_CLIENT_METRICS_", env_file=".env", extra="ignore"
- )
+ model_config = _build_settings_config(("client", "metrics"))
enabled: bool = Field(
default=False,
@@ -37,9 +37,7 @@ class ClientSettings(PrefectBaseSettings):
Settings for controlling API client behavior
"""
- model_config = SettingsConfigDict(
- env_prefix="PREFECT_CLIENT_", env_file=".env", extra="ignore"
- )
+ model_config = _build_settings_config(("client",))
max_retries: int = Field(
default=5,
diff --git a/src/prefect/settings/models/cloud.py b/src/prefect/settings/models/cloud.py
index 40ebad3caa60..cdcc64796824 100644
--- a/src/prefect/settings/models/cloud.py
+++ b/src/prefect/settings/models/cloud.py
@@ -2,10 +2,12 @@
from typing import Optional
from pydantic import Field, model_validator
-from pydantic_settings import SettingsConfigDict
from typing_extensions import Self
-from prefect.settings.base import PrefectBaseSettings
+from prefect.settings.base import (
+ PrefectBaseSettings,
+ _build_settings_config,
+)
def default_cloud_ui_url(settings: "CloudSettings") -> Optional[str]:
@@ -30,9 +32,7 @@ class CloudSettings(PrefectBaseSettings):
Settings for interacting with Prefect Cloud
"""
- model_config = SettingsConfigDict(
- env_prefix="PREFECT_CLOUD_", env_file=".env", extra="ignore"
- )
+ model_config = _build_settings_config(("cloud",))
api_url: str = Field(
default="https://api.prefect.cloud/api",
diff --git a/src/prefect/settings/models/deployments.py b/src/prefect/settings/models/deployments.py
index d6f3fda1aace..d0d405effc5f 100644
--- a/src/prefect/settings/models/deployments.py
+++ b/src/prefect/settings/models/deployments.py
@@ -1,9 +1,8 @@
from typing import Optional
from pydantic import AliasChoices, AliasPath, Field
-from pydantic_settings import SettingsConfigDict
-from prefect.settings.base import PrefectBaseSettings
+from prefect.settings.base import PrefectBaseSettings, _build_settings_config
class DeploymentsSettings(PrefectBaseSettings):
@@ -11,9 +10,7 @@ class DeploymentsSettings(PrefectBaseSettings):
Settings for configuring deployments defaults
"""
- model_config = SettingsConfigDict(
- env_prefix="PREFECT_DEPLOYMENTS_", env_file=".env", extra="ignore"
- )
+ model_config = _build_settings_config(("deployments",))
default_work_pool_name: Optional[str] = Field(
default=None,
diff --git a/src/prefect/settings/models/experiments.py b/src/prefect/settings/models/experiments.py
new file mode 100644
index 000000000000..218128c3dcf1
--- /dev/null
+++ b/src/prefect/settings/models/experiments.py
@@ -0,0 +1,24 @@
+from pydantic import AliasChoices, AliasPath, Field
+
+from prefect.settings.base import PrefectBaseSettings, _build_settings_config
+
+
+class ExperimentsSettings(PrefectBaseSettings):
+ """
+ Settings for configuring experimental features
+ """
+
+ model_config = _build_settings_config(("experiments",))
+
+ warn: bool = Field(
+ default=True,
+ description="If `True`, warn on usage of experimental features.",
+ validation_alias=AliasChoices(
+ AliasPath("warn"), "prefect_experiments_warn", "prefect_experimental_warn"
+ ),
+ )
+
+ telemetry_enabled: bool = Field(
+ default=False,
+ description="Enables sending telemetry to Prefect Cloud.",
+ )
diff --git a/src/prefect/settings/models/flows.py b/src/prefect/settings/models/flows.py
index 8a20485d28c5..52c9be53d50e 100644
--- a/src/prefect/settings/models/flows.py
+++ b/src/prefect/settings/models/flows.py
@@ -1,9 +1,8 @@
from typing import Union
from pydantic import AliasChoices, AliasPath, Field
-from pydantic_settings import SettingsConfigDict
-from prefect.settings.base import PrefectBaseSettings
+from prefect.settings.base import PrefectBaseSettings, _build_settings_config
class FlowsSettings(PrefectBaseSettings):
@@ -11,9 +10,7 @@ class FlowsSettings(PrefectBaseSettings):
Settings for controlling flow behavior
"""
- model_config = SettingsConfigDict(
- env_prefix="PREFECT_FLOWS_", env_file=".env", extra="ignore"
- )
+ model_config = _build_settings_config(("flows",))
default_retries: int = Field(
default=0,
diff --git a/src/prefect/settings/models/internal.py b/src/prefect/settings/models/internal.py
index 33041e537942..ed8287535572 100644
--- a/src/prefect/settings/models/internal.py
+++ b/src/prefect/settings/models/internal.py
@@ -1,14 +1,11 @@
from pydantic import AliasChoices, AliasPath, Field
-from pydantic_settings import SettingsConfigDict
-from prefect.settings.base import PrefectBaseSettings
+from prefect.settings.base import PrefectBaseSettings, _build_settings_config
from prefect.types import LogLevel
class InternalSettings(PrefectBaseSettings):
- model_config = SettingsConfigDict(
- env_prefix="PREFECT_INTERNAL_", env_file=".env", extra="ignore"
- )
+ model_config = _build_settings_config(("internal",))
logging_level: LogLevel = Field(
default="ERROR",
diff --git a/src/prefect/settings/models/logging.py b/src/prefect/settings/models/logging.py
index 634033c9f995..c653bae43039 100644
--- a/src/prefect/settings/models/logging.py
+++ b/src/prefect/settings/models/logging.py
@@ -1,12 +1,18 @@
+from functools import partial
from pathlib import Path
from typing import Annotated, Literal, Optional, Union
-from pydantic import AfterValidator, AliasChoices, AliasPath, Field, model_validator
-from pydantic_settings import SettingsConfigDict
+from pydantic import (
+ AliasChoices,
+ AliasPath,
+ BeforeValidator,
+ Field,
+ model_validator,
+)
from typing_extensions import Self
-from prefect.settings.base import PrefectBaseSettings
-from prefect.types import LogLevel
+from prefect.settings.base import PrefectBaseSettings, _build_settings_config
+from prefect.types import LogLevel, validate_set_T_from_delim_string
def max_log_size_smaller_than_batch_size(values):
@@ -26,9 +32,7 @@ class LoggingToAPISettings(PrefectBaseSettings):
Settings for controlling logging to the API
"""
- model_config = SettingsConfigDict(
- env_prefix="PREFECT_LOGGING_TO_API_", env_file=".env", extra="ignore"
- )
+ model_config = _build_settings_config(("logging", "to_api"))
enabled: bool = Field(
default=True,
@@ -81,9 +85,7 @@ class LoggingSettings(PrefectBaseSettings):
Settings for controlling logging behavior
"""
- model_config = SettingsConfigDict(
- env_prefix="PREFECT_LOGGING_", env_file=".env", extra="ignore"
- )
+ model_config = _build_settings_config(("logging",))
level: LogLevel = Field(
default="INFO",
@@ -102,7 +104,7 @@ class LoggingSettings(PrefectBaseSettings):
extra_loggers: Annotated[
Union[str, list[str], None],
- AfterValidator(lambda v: [n.strip() for n in v.split(",")] if v else []),
+ BeforeValidator(partial(validate_set_T_from_delim_string, type_=str)),
] = Field(
default=None,
description="Additional loggers to attach to Prefect logging at runtime.",
diff --git a/src/prefect/settings/models/results.py b/src/prefect/settings/models/results.py
index 9fcefaa1a86b..81844c9f3336 100644
--- a/src/prefect/settings/models/results.py
+++ b/src/prefect/settings/models/results.py
@@ -2,9 +2,8 @@
from typing import Optional
from pydantic import AliasChoices, AliasPath, Field
-from pydantic_settings import SettingsConfigDict
-from prefect.settings.base import PrefectBaseSettings
+from prefect.settings.base import PrefectBaseSettings, _build_settings_config
class ResultsSettings(PrefectBaseSettings):
@@ -12,9 +11,7 @@ class ResultsSettings(PrefectBaseSettings):
Settings for controlling result storage behavior
"""
- model_config = SettingsConfigDict(
- env_prefix="PREFECT_RESULTS_", env_file=".env", extra="ignore"
- )
+ model_config = _build_settings_config(("results",))
default_serializer: str = Field(
default="pickle",
diff --git a/src/prefect/settings/models/root.py b/src/prefect/settings/models/root.py
index 16524c45385e..75d5d7caa11d 100644
--- a/src/prefect/settings/models/root.py
+++ b/src/prefect/settings/models/root.py
@@ -11,10 +11,9 @@
from urllib.parse import urlparse
from pydantic import BeforeValidator, Field, SecretStr, model_validator
-from pydantic_settings import SettingsConfigDict
from typing_extensions import Self
-from prefect.settings.base import PrefectBaseSettings
+from prefect.settings.base import PrefectBaseSettings, _build_settings_config
from prefect.settings.models.tasks import TasksSettings
from prefect.settings.models.testing import TestingSettings
from prefect.settings.models.worker import WorkerSettings
@@ -25,6 +24,7 @@
from .client import ClientSettings
from .cloud import CloudSettings
from .deployments import DeploymentsSettings
+from .experiments import ExperimentsSettings
from .flows import FlowsSettings
from .internal import InternalSettings
from .logging import LoggingSettings
@@ -43,12 +43,7 @@ class Settings(PrefectBaseSettings):
See https://docs.pydantic.dev/latest/concepts/pydantic_settings
"""
- model_config = SettingsConfigDict(
- env_file=".env",
- env_prefix="PREFECT_",
- env_nested_delimiter=None,
- extra="ignore",
- )
+ model_config = _build_settings_config()
home: Annotated[Path, BeforeValidator(lambda x: Path(x).expanduser())] = Field(
default=Path("~") / ".prefect",
@@ -77,7 +72,7 @@ class Settings(PrefectBaseSettings):
client: ClientSettings = Field(
default_factory=ClientSettings,
- description="Settings for for controlling API client behavior",
+ description="Settings for controlling API client behavior",
)
cloud: CloudSettings = Field(
@@ -90,6 +85,11 @@ class Settings(PrefectBaseSettings):
description="Settings for configuring deployments defaults",
)
+ experiments: ExperimentsSettings = Field(
+ default_factory=ExperimentsSettings,
+ description="Settings for controlling experimental features",
+ )
+
flows: FlowsSettings = Field(
default_factory=FlowsSettings,
description="Settings for controlling flow behavior",
@@ -149,25 +149,6 @@ class Settings(PrefectBaseSettings):
""",
)
- experimental_warn: bool = Field(
- default=True,
- description="If `True`, warn on usage of experimental features.",
- )
-
- # this setting needs to be removed
- async_fetch_state_result: bool = Field(
- default=False,
- description="""
- Determines whether `State.result()` fetches results automatically or not.
- In Prefect 2.6.0, the `State.result()` method was updated to be async
- to facilitate automatic retrieval of results from storage which means when
- writing async code you must `await` the call. For backwards compatibility,
- the result is not retrieved by default for async users. You may opt into this
- per call by passing `fetch=True` or toggle this setting to change the behavior
- globally.
- """,
- )
-
###########################################################################
# allow deprecated access to PREFECT_SOME_SETTING_NAME
@@ -290,9 +271,32 @@ def copy_with_update(
Returns:
A new Settings object.
"""
+ # To restore defaults, we need to resolve the setting path and then
+ # set the default value on the new settings object. When restoring
+ # defaults, all settings sources will be ignored.
restore_defaults_obj = {}
for r in restore_defaults or []:
- set_in_dict(restore_defaults_obj, r.accessor, True)
+ path = r.accessor.split(".")
+ model = self
+ for key in path[:-1]:
+ model = model.model_fields[key].annotation
+ assert model is not None, f"Invalid setting path: {r.accessor}"
+
+ model_field = model.model_fields[path[-1]]
+ assert model is not None, f"Invalid setting path: {r.accessor}"
+ if hasattr(model_field, "default"):
+ default = model_field.default
+ elif (
+ hasattr(model_field, "default_factory") and model_field.default_factory
+ ):
+ default = model_field.default_factory()
+ else:
+ raise ValueError(f"No default value for setting: {r.accessor}")
+ set_in_dict(
+ restore_defaults_obj,
+ r.accessor,
+ default,
+ )
updates = updates or {}
set_defaults = set_defaults or {}
@@ -307,7 +311,8 @@ def copy_with_update(
new_settings = self.__class__.model_validate(
deep_merge_dicts(
set_defaults_obj,
- self.model_dump(exclude_unset=True, exclude=restore_defaults_obj),
+ self.model_dump(exclude_unset=True),
+ restore_defaults_obj,
updates_obj,
)
)
diff --git a/src/prefect/settings/models/runner.py b/src/prefect/settings/models/runner.py
index 83431ded5d24..b505d0d8967d 100644
--- a/src/prefect/settings/models/runner.py
+++ b/src/prefect/settings/models/runner.py
@@ -1,7 +1,6 @@
from pydantic import Field
-from pydantic_settings import SettingsConfigDict
-from prefect.settings.base import PrefectBaseSettings
+from prefect.settings.base import PrefectBaseSettings, _build_settings_config
from prefect.types import LogLevel
@@ -10,9 +9,7 @@ class RunnerServerSettings(PrefectBaseSettings):
Settings for controlling runner server behavior
"""
- model_config = SettingsConfigDict(
- env_prefix="PREFECT_RUNNER_SERVER_", env_file=".env", extra="ignore"
- )
+ model_config = _build_settings_config(("runner", "server"))
enable: bool = Field(
default=False,
@@ -45,9 +42,7 @@ class RunnerSettings(PrefectBaseSettings):
Settings for controlling runner behavior
"""
- model_config = SettingsConfigDict(
- env_prefix="PREFECT_RUNNER_", env_file=".env", extra="ignore"
- )
+ model_config = _build_settings_config(("runner",))
process_limit: int = Field(
default=5,
diff --git a/src/prefect/settings/models/server/api.py b/src/prefect/settings/models/server/api.py
index 5117610c117d..424a3c9d72b9 100644
--- a/src/prefect/settings/models/server/api.py
+++ b/src/prefect/settings/models/server/api.py
@@ -1,9 +1,8 @@
from datetime import timedelta
from pydantic import AliasChoices, AliasPath, Field
-from pydantic_settings import SettingsConfigDict
-from prefect.settings.base import PrefectBaseSettings
+from prefect.settings.base import PrefectBaseSettings, _build_settings_config
class ServerAPISettings(PrefectBaseSettings):
@@ -11,9 +10,7 @@ class ServerAPISettings(PrefectBaseSettings):
Settings for controlling API server behavior
"""
- model_config = SettingsConfigDict(
- env_prefix="PREFECT_SERVER_API_", env_file=".env", extra="ignore"
- )
+ model_config = _build_settings_config(("server", "api"))
host: str = Field(
default="127.0.0.1",
diff --git a/src/prefect/settings/models/server/database.py b/src/prefect/settings/models/server/database.py
index 175a65a6a92f..ae3f7614a0e4 100644
--- a/src/prefect/settings/models/server/database.py
+++ b/src/prefect/settings/models/server/database.py
@@ -3,10 +3,9 @@
from urllib.parse import quote_plus
from pydantic import AliasChoices, AliasPath, Field, SecretStr, model_validator
-from pydantic_settings import SettingsConfigDict
from typing_extensions import Literal, Self
-from prefect.settings.base import PrefectBaseSettings
+from prefect.settings.base import PrefectBaseSettings, _build_settings_config
class ServerDatabaseSettings(PrefectBaseSettings):
@@ -14,11 +13,7 @@ class ServerDatabaseSettings(PrefectBaseSettings):
Settings for controlling server database behavior
"""
- model_config = SettingsConfigDict(
- env_prefix="PREFECT_SERVER_DATABASE_",
- env_file=".env",
- extra="ignore",
- )
+ model_config = _build_settings_config(("server", "database"))
connection_url: Optional[SecretStr] = Field(
default=None,
diff --git a/src/prefect/settings/models/server/deployments.py b/src/prefect/settings/models/server/deployments.py
index efcd3a1cc4b2..0a7c872560e3 100644
--- a/src/prefect/settings/models/server/deployments.py
+++ b/src/prefect/settings/models/server/deployments.py
@@ -1,13 +1,10 @@
from pydantic import AliasChoices, AliasPath, Field
-from pydantic_settings import SettingsConfigDict
-from prefect.settings.base import PrefectBaseSettings
+from prefect.settings.base import PrefectBaseSettings, _build_settings_config
class ServerDeploymentsSettings(PrefectBaseSettings):
- model_config = SettingsConfigDict(
- env_prefix="PREFECT_SERVER_DEPLOYMENTS_", env_file=".env", extra="ignore"
- )
+ model_config = _build_settings_config(("server", "deployments"))
concurrency_slot_wait_seconds: float = Field(
default=30.0,
diff --git a/src/prefect/settings/models/server/ephemeral.py b/src/prefect/settings/models/server/ephemeral.py
index 1e33efab00a8..02997789771b 100644
--- a/src/prefect/settings/models/server/ephemeral.py
+++ b/src/prefect/settings/models/server/ephemeral.py
@@ -1,7 +1,6 @@
from pydantic import AliasChoices, AliasPath, Field
-from pydantic_settings import SettingsConfigDict
-from prefect.settings.base import PrefectBaseSettings
+from prefect.settings.base import PrefectBaseSettings, _build_settings_config
class ServerEphemeralSettings(PrefectBaseSettings):
@@ -9,9 +8,7 @@ class ServerEphemeralSettings(PrefectBaseSettings):
Settings for controlling ephemeral server behavior
"""
- model_config = SettingsConfigDict(
- env_prefix="PREFECT_SERVER_EPHEMERAL_", env_file=".env", extra="ignore"
- )
+ model_config = _build_settings_config(("server", "ephemeral"))
enabled: bool = Field(
default=False,
diff --git a/src/prefect/settings/models/server/events.py b/src/prefect/settings/models/server/events.py
index 3f3c3056ecc9..321ff8321976 100644
--- a/src/prefect/settings/models/server/events.py
+++ b/src/prefect/settings/models/server/events.py
@@ -1,9 +1,8 @@
from datetime import timedelta
from pydantic import AliasChoices, AliasPath, Field
-from pydantic_settings import SettingsConfigDict
-from prefect.settings.base import PrefectBaseSettings
+from prefect.settings.base import PrefectBaseSettings, _build_settings_config
class ServerEventsSettings(PrefectBaseSettings):
@@ -11,9 +10,7 @@ class ServerEventsSettings(PrefectBaseSettings):
Settings for controlling behavior of the events subsystem
"""
- model_config = SettingsConfigDict(
- env_prefix="PREFECT_SERVER_EVENTS_", env_file=".env", extra="ignore"
- )
+ model_config = _build_settings_config(("server", "events"))
###########################################################################
# Events settings
@@ -131,7 +128,7 @@ class ServerEventsSettings(PrefectBaseSettings):
messaging_cache: str = Field(
default="prefect.server.utilities.messaging.memory",
- description="Which cache implementation to use for the events system. Should point to a module that exports a Cache class.",
+ description="Which cache implementation to use for the events system. Should point to a module that exports a Cache class.",
validation_alias=AliasChoices(
AliasPath("messaging_cache"),
"prefect_server_events_messaging_cache",
diff --git a/src/prefect/settings/models/server/flow_run_graph.py b/src/prefect/settings/models/server/flow_run_graph.py
index f7a54745b6d3..5c7a38905066 100644
--- a/src/prefect/settings/models/server/flow_run_graph.py
+++ b/src/prefect/settings/models/server/flow_run_graph.py
@@ -1,7 +1,6 @@
from pydantic import AliasChoices, AliasPath, Field
-from pydantic_settings import SettingsConfigDict
-from prefect.settings.base import PrefectBaseSettings
+from prefect.settings.base import PrefectBaseSettings, _build_settings_config
class ServerFlowRunGraphSettings(PrefectBaseSettings):
@@ -9,9 +8,7 @@ class ServerFlowRunGraphSettings(PrefectBaseSettings):
Settings for controlling behavior of the flow run graph
"""
- model_config = SettingsConfigDict(
- env_prefix="PREFECT_SERVER_FLOW_RUN_GRAPH_", env_file=".env", extra="ignore"
- )
+ model_config = _build_settings_config(("server", "flow_run_graph"))
max_nodes: int = Field(
default=10000,
diff --git a/src/prefect/settings/models/server/root.py b/src/prefect/settings/models/server/root.py
index 244d43d4e515..5e61bdddfd0d 100644
--- a/src/prefect/settings/models/server/root.py
+++ b/src/prefect/settings/models/server/root.py
@@ -2,9 +2,8 @@
from typing import Optional
from pydantic import AliasChoices, AliasPath, Field
-from pydantic_settings import SettingsConfigDict
-from prefect.settings.base import PrefectBaseSettings
+from prefect.settings.base import PrefectBaseSettings, _build_settings_config
from prefect.types import LogLevel
from .api import ServerAPISettings
@@ -23,9 +22,7 @@ class ServerSettings(PrefectBaseSettings):
Settings for controlling server behavior
"""
- model_config = SettingsConfigDict(
- env_prefix="PREFECT_SERVER_", env_file=".env", extra="ignore"
- )
+ model_config = _build_settings_config(("server",))
logging_level: LogLevel = Field(
default="WARNING",
diff --git a/src/prefect/settings/models/server/services.py b/src/prefect/settings/models/server/services.py
index 27c6e84693f3..efaed2a40528 100644
--- a/src/prefect/settings/models/server/services.py
+++ b/src/prefect/settings/models/server/services.py
@@ -1,9 +1,8 @@
from datetime import timedelta
from pydantic import AliasChoices, AliasPath, Field
-from pydantic_settings import SettingsConfigDict
-from prefect.settings.base import PrefectBaseSettings
+from prefect.settings.base import PrefectBaseSettings, _build_settings_config
class ServerServicesCancellationCleanupSettings(PrefectBaseSettings):
@@ -11,10 +10,8 @@ class ServerServicesCancellationCleanupSettings(PrefectBaseSettings):
Settings for controlling the cancellation cleanup service
"""
- model_config = SettingsConfigDict(
- env_prefix="PREFECT_SERVER_SERVICES_CANCELLATION_CLEANUP_",
- env_file=".env",
- extra="ignore",
+ model_config = _build_settings_config(
+ ("server", "services", "cancellation_cleanup")
)
enabled: bool = Field(
@@ -43,11 +40,7 @@ class ServerServicesEventPersisterSettings(PrefectBaseSettings):
Settings for controlling the event persister service
"""
- model_config = SettingsConfigDict(
- env_prefix="PREFECT_SERVER_SERVICES_EVENT_PERSISTER_",
- env_file=".env",
- extra="ignore",
- )
+ model_config = _build_settings_config(("server", "services", "event_persister"))
enabled: bool = Field(
default=True,
@@ -87,10 +80,8 @@ class ServerServicesFlowRunNotificationsSettings(PrefectBaseSettings):
Settings for controlling the flow run notifications service
"""
- model_config = SettingsConfigDict(
- env_prefix="PREFECT_SERVER_SERVICES_FLOW_RUN_NOTIFICATIONS_",
- env_file=".env",
- extra="ignore",
+ model_config = _build_settings_config(
+ ("server", "services", "flow_run_notifications")
)
enabled: bool = Field(
@@ -109,11 +100,7 @@ class ServerServicesForemanSettings(PrefectBaseSettings):
Settings for controlling the foreman service
"""
- model_config = SettingsConfigDict(
- env_prefix="PREFECT_SERVER_SERVICES_FOREMAN_",
- env_file=".env",
- extra="ignore",
- )
+ model_config = _build_settings_config(("server", "services", "foreman"))
enabled: bool = Field(
default=True,
@@ -192,9 +179,7 @@ class ServerServicesLateRunsSettings(PrefectBaseSettings):
Settings for controlling the late runs service
"""
- model_config = SettingsConfigDict(
- env_prefix="PREFECT_SERVER_SERVICES_LATE_RUNS_", env_file=".env", extra="ignore"
- )
+ model_config = _build_settings_config(("server", "services", "late_runs"))
enabled: bool = Field(
default=True,
@@ -236,9 +221,7 @@ class ServerServicesSchedulerSettings(PrefectBaseSettings):
Settings for controlling the scheduler service
"""
- model_config = SettingsConfigDict(
- env_prefix="PREFECT_SERVER_SERVICES_SCHEDULER_", env_file=".env", extra="ignore"
- )
+ model_config = _build_settings_config(("server", "services", "scheduler"))
enabled: bool = Field(
default=True,
@@ -361,11 +344,7 @@ class ServerServicesPauseExpirationsSettings(PrefectBaseSettings):
Settings for controlling the pause expiration service
"""
- model_config = SettingsConfigDict(
- env_prefix="PREFECT_SERVER_SERVICES_PAUSE_EXPIRATIONS_",
- env_file=".env",
- extra="ignore",
- )
+ model_config = _build_settings_config(("server", "services", "pause_expirations"))
enabled: bool = Field(
default=True,
@@ -399,11 +378,7 @@ class ServerServicesTaskRunRecorderSettings(PrefectBaseSettings):
Settings for controlling the task run recorder service
"""
- model_config = SettingsConfigDict(
- env_prefix="PREFECT_SERVER_SERVICES_TASK_RUN_RECORDER_",
- env_file=".env",
- extra="ignore",
- )
+ model_config = _build_settings_config(("server", "services", "task_run_recorder"))
enabled: bool = Field(
default=True,
@@ -421,11 +396,7 @@ class ServerServicesTriggersSettings(PrefectBaseSettings):
Settings for controlling the triggers service
"""
- model_config = SettingsConfigDict(
- env_prefix="PREFECT_SERVER_SERVICES_TRIGGERS_",
- env_file=".env",
- extra="ignore",
- )
+ model_config = _build_settings_config(("server", "services", "triggers"))
enabled: bool = Field(
default=True,
@@ -443,9 +414,7 @@ class ServerServicesSettings(PrefectBaseSettings):
Settings for controlling server services
"""
- model_config = SettingsConfigDict(
- env_prefix="PREFECT_SERVER_SERVICES_", env_file=".env", extra="ignore"
- )
+ model_config = _build_settings_config(("server", "services"))
cancellation_cleanup: ServerServicesCancellationCleanupSettings = Field(
default_factory=ServerServicesCancellationCleanupSettings,
diff --git a/src/prefect/settings/models/server/tasks.py b/src/prefect/settings/models/server/tasks.py
index 2b5821af7d86..4327ef492b5c 100644
--- a/src/prefect/settings/models/server/tasks.py
+++ b/src/prefect/settings/models/server/tasks.py
@@ -1,9 +1,8 @@
from datetime import timedelta
from pydantic import AliasChoices, AliasPath, Field
-from pydantic_settings import SettingsConfigDict
-from prefect.settings.base import PrefectBaseSettings
+from prefect.settings.base import PrefectBaseSettings, _build_settings_config
class ServerTasksSchedulingSettings(PrefectBaseSettings):
@@ -11,11 +10,7 @@ class ServerTasksSchedulingSettings(PrefectBaseSettings):
Settings for controlling server-side behavior related to task scheduling
"""
- model_config = SettingsConfigDict(
- env_file=".env",
- env_prefix="PREFECT_SERVER_TASKS_SCHEDULING_",
- extra="ignore",
- )
+ model_config = _build_settings_config(("server", "tasks", "scheduling"))
max_scheduled_queue_size: int = Field(
default=1000,
@@ -53,11 +48,7 @@ class ServerTasksSettings(PrefectBaseSettings):
Settings for controlling server-side behavior related to tasks
"""
- model_config = SettingsConfigDict(
- env_file=".env",
- env_prefix="PREFECT_SERVER_TASKS_",
- extra="ignore",
- )
+ model_config = _build_settings_config(("server", "tasks"))
tag_concurrency_slot_wait_seconds: float = Field(
default=30,
diff --git a/src/prefect/settings/models/server/ui.py b/src/prefect/settings/models/server/ui.py
index 7cb6de72dd9f..6e7d1d716137 100644
--- a/src/prefect/settings/models/server/ui.py
+++ b/src/prefect/settings/models/server/ui.py
@@ -1,15 +1,12 @@
from typing import Optional
from pydantic import AliasChoices, AliasPath, Field
-from pydantic_settings import SettingsConfigDict
-from prefect.settings.base import PrefectBaseSettings
+from prefect.settings.base import PrefectBaseSettings, _build_settings_config
class ServerUISettings(PrefectBaseSettings):
- model_config = SettingsConfigDict(
- env_prefix="PREFECT_SERVER_UI_", env_file=".env", extra="ignore"
- )
+ model_config = _build_settings_config(("server", "ui"))
enabled: bool = Field(
default=True,
diff --git a/src/prefect/settings/models/tasks.py b/src/prefect/settings/models/tasks.py
index 1fb60c263e08..86b8e20e08ba 100644
--- a/src/prefect/settings/models/tasks.py
+++ b/src/prefect/settings/models/tasks.py
@@ -1,15 +1,12 @@
from typing import Optional, Union
from pydantic import AliasChoices, AliasPath, Field
-from pydantic_settings import SettingsConfigDict
-from prefect.settings.base import PrefectBaseSettings
+from prefect.settings.base import PrefectBaseSettings, _build_settings_config
class TasksRunnerSettings(PrefectBaseSettings):
- model_config = SettingsConfigDict(
- env_prefix="PREFECT_TASKS_RUNNER_", env_file=".env", extra="ignore"
- )
+ model_config = _build_settings_config(("tasks", "runner"))
thread_pool_max_workers: Optional[int] = Field(
default=None,
@@ -24,9 +21,7 @@ class TasksRunnerSettings(PrefectBaseSettings):
class TasksSchedulingSettings(PrefectBaseSettings):
- model_config = SettingsConfigDict(
- env_prefix="PREFECT_TASKS_SCHEDULING_", env_file=".env", extra="ignore"
- )
+ model_config = _build_settings_config(("tasks", "scheduling"))
default_storage_block: Optional[str] = Field(
default=None,
@@ -50,9 +45,7 @@ class TasksSchedulingSettings(PrefectBaseSettings):
class TasksSettings(PrefectBaseSettings):
- model_config = SettingsConfigDict(
- env_prefix="PREFECT_TASKS_", env_file=".env", extra="ignore"
- )
+ model_config = _build_settings_config(("tasks",))
refresh_cache: bool = Field(
default=False,
@@ -80,6 +73,12 @@ class TasksSettings(PrefectBaseSettings):
),
)
+ default_persist_result: Optional[bool] = Field(
+ default=None,
+ description="If `True`, results will be persisted by default for all tasks. Set to `False` to disable persistence by default. "
+ "Note that setting to `False` will override the behavior set by a parent flow or task.",
+ )
+
runner: TasksRunnerSettings = Field(
default_factory=TasksRunnerSettings,
description="Settings for controlling task runner behavior",
diff --git a/src/prefect/settings/models/testing.py b/src/prefect/settings/models/testing.py
index 2c34fc464428..c8946123fc98 100644
--- a/src/prefect/settings/models/testing.py
+++ b/src/prefect/settings/models/testing.py
@@ -1,15 +1,12 @@
from typing import Any, Optional
from pydantic import AliasChoices, AliasPath, Field
-from pydantic_settings import SettingsConfigDict
-from prefect.settings.base import PrefectBaseSettings
+from prefect.settings.base import PrefectBaseSettings, _build_settings_config
class TestingSettings(PrefectBaseSettings):
- model_config = SettingsConfigDict(
- env_prefix="PREFECT_TESTING_", env_file=".env", extra="ignore"
- )
+ model_config = _build_settings_config(("testing",))
test_mode: bool = Field(
default=False,
diff --git a/src/prefect/settings/models/ui.py b/src/prefect/settings/models/ui.py
deleted file mode 100644
index e69de29bb2d1..000000000000
diff --git a/src/prefect/settings/models/worker.py b/src/prefect/settings/models/worker.py
index 569cf2ed24cb..6da0027545ea 100644
--- a/src/prefect/settings/models/worker.py
+++ b/src/prefect/settings/models/worker.py
@@ -1,13 +1,10 @@
from pydantic import Field
-from pydantic_settings import SettingsConfigDict
-from prefect.settings.base import PrefectBaseSettings
+from prefect.settings.base import PrefectBaseSettings, _build_settings_config
class WorkerWebserverSettings(PrefectBaseSettings):
- model_config = SettingsConfigDict(
- env_prefix="PREFECT_WORKER_WEBSERVER_", env_file=".env", extra="ignore"
- )
+ model_config = _build_settings_config(("worker", "webserver"))
host: str = Field(
default="0.0.0.0",
@@ -21,9 +18,7 @@ class WorkerWebserverSettings(PrefectBaseSettings):
class WorkerSettings(PrefectBaseSettings):
- model_config = SettingsConfigDict(
- env_prefix="PREFECT_WORKER_", env_file=".env", extra="ignore"
- )
+ model_config = _build_settings_config(("worker",))
heartbeat_seconds: float = Field(
default=30,
diff --git a/src/prefect/settings/sources.py b/src/prefect/settings/sources.py
index da7400a93165..2d41726c3657 100644
--- a/src/prefect/settings/sources.py
+++ b/src/prefect/settings/sources.py
@@ -4,16 +4,24 @@
from pathlib import Path
from typing import Any, Dict, List, Optional, Tuple, Type
+import dotenv
import toml
from pydantic import AliasChoices
from pydantic.fields import FieldInfo
from pydantic_settings import (
BaseSettings,
+ DotEnvSettingsSource,
EnvSettingsSource,
PydanticBaseSettingsSource,
)
+from pydantic_settings.sources import (
+ ENV_FILE_SENTINEL,
+ ConfigFileSourceMixin,
+ DotenvType,
+)
from prefect.settings.constants import DEFAULT_PREFECT_HOME, DEFAULT_PROFILES_PATH
+from prefect.utilities.collections import get_from_dict
class EnvFilterSettingsSource(EnvSettingsSource):
@@ -53,11 +61,49 @@ def __init__(
else:
self.env_vars = {
key: value
- for key, value in self.env_vars.items()
+ for key, value in self.env_vars.items() # type: ignore
if key.lower() not in env_filter
}
+class FilteredDotEnvSettingsSource(DotEnvSettingsSource):
+ def __init__(
+ self,
+ settings_cls: type[BaseSettings],
+ env_file: Optional[DotenvType] = ENV_FILE_SENTINEL,
+ env_file_encoding: Optional[str] = None,
+ case_sensitive: Optional[bool] = None,
+ env_prefix: Optional[str] = None,
+ env_nested_delimiter: Optional[str] = None,
+ env_ignore_empty: Optional[bool] = None,
+ env_parse_none_str: Optional[str] = None,
+ env_parse_enums: Optional[bool] = None,
+ env_blacklist: Optional[List[str]] = None,
+ ) -> None:
+ super().__init__(
+ settings_cls,
+ env_file,
+ env_file_encoding,
+ case_sensitive,
+ env_prefix,
+ env_nested_delimiter,
+ env_ignore_empty,
+ env_parse_none_str,
+ env_parse_enums,
+ )
+ self.env_blacklist = env_blacklist
+ if self.env_blacklist:
+ if isinstance(self.env_vars, dict):
+ for key in self.env_blacklist:
+ self.env_vars.pop(key, None)
+ else:
+ self.env_vars = {
+ key: value
+ for key, value in self.env_vars.items() # type: ignore
+ if key.lower() not in env_blacklist
+ }
+
+
class ProfileSettingsTomlLoader(PydanticBaseSettingsSource):
"""
Custom pydantic settings source to load profile settings from a toml file.
@@ -108,21 +154,34 @@ def get_field_value(
) -> Tuple[Any, str, bool]:
"""Concrete implementation to get the field value from the profile settings"""
if field.validation_alias:
+ # Use validation alias as the key to ensure profile value does not
+ # higher priority sources. Lower priority sources that use the
+ # field name can override higher priority sources that use the
+ # validation alias as seen in https://github.com/PrefectHQ/prefect/issues/15981
if isinstance(field.validation_alias, str):
value = self.profile_settings.get(field.validation_alias.upper())
if value is not None:
- return value, field_name, self.field_is_complex(field)
+ return value, field.validation_alias, self.field_is_complex(field)
elif isinstance(field.validation_alias, AliasChoices):
+ value = None
+ lowest_priority_alias = next(
+ choice
+ for choice in reversed(field.validation_alias.choices)
+ if isinstance(choice, str)
+ )
for alias in field.validation_alias.choices:
if not isinstance(alias, str):
continue
value = self.profile_settings.get(alias.upper())
if value is not None:
- return value, field_name, self.field_is_complex(field)
+ return (
+ value,
+ lowest_priority_alias,
+ self.field_is_complex(field),
+ )
- value = self.profile_settings.get(
- f"{self.config.get('env_prefix','')}{field_name.upper()}"
- )
+ name = f"{self.config.get('env_prefix','')}{field_name.upper()}"
+ value = self.profile_settings.get(name)
return value, field_name, self.field_is_complex(field)
def __call__(self) -> Dict[str, Any]:
@@ -140,6 +199,93 @@ def __call__(self) -> Dict[str, Any]:
return profile_settings
+DEFAULT_PREFECT_TOML_PATH = Path("prefect.toml")
+
+
+class TomlConfigSettingsSourceBase(PydanticBaseSettingsSource, ConfigFileSourceMixin):
+ def __init__(self, settings_cls: Type[BaseSettings]):
+ super().__init__(settings_cls)
+ self.settings_cls = settings_cls
+ self.toml_data = {}
+
+ def _read_file(self, path: Path) -> Dict[str, Any]:
+ return toml.load(path)
+
+ def get_field_value(
+ self, field: FieldInfo, field_name: str
+ ) -> Tuple[Any, str, bool]:
+ """Concrete implementation to get the field value from toml data"""
+ value = self.toml_data.get(field_name)
+ if isinstance(value, dict):
+ # if the value is a dict, it is likely a nested settings object and a nested
+ # source will handle it
+ value = None
+ name = field_name
+ # Use validation alias as the key to ensure profile value does not
+ # higher priority sources. Lower priority sources that use the
+ # field name can override higher priority sources that use the
+ # validation alias as seen in https://github.com/PrefectHQ/prefect/issues/15981
+ if value is not None:
+ if field.validation_alias and isinstance(field.validation_alias, str):
+ name = field.validation_alias
+ elif field.validation_alias and isinstance(
+ field.validation_alias, AliasChoices
+ ):
+ for alias in reversed(field.validation_alias.choices):
+ if isinstance(alias, str):
+ name = alias
+ break
+ return value, name, self.field_is_complex(field)
+
+ def __call__(self) -> Dict[str, Any]:
+ """Called by pydantic to get the settings from our custom source"""
+ toml_setings: Dict[str, Any] = {}
+ for field_name, field in self.settings_cls.model_fields.items():
+ value, key, is_complex = self.get_field_value(field, field_name)
+ if value is not None:
+ prepared_value = self.prepare_field_value(
+ field_name, field, value, is_complex
+ )
+ toml_setings[key] = prepared_value
+ return toml_setings
+
+
+class PrefectTomlConfigSettingsSource(TomlConfigSettingsSourceBase):
+ """Custom pydantic settings source to load settings from a prefect.toml file"""
+
+ def __init__(
+ self,
+ settings_cls: Type[BaseSettings],
+ ):
+ super().__init__(settings_cls)
+ self.toml_file_path = settings_cls.model_config.get(
+ "toml_file", DEFAULT_PREFECT_TOML_PATH
+ )
+ self.toml_data = self._read_files(self.toml_file_path)
+ self.toml_table_header = settings_cls.model_config.get(
+ "prefect_toml_table_header", tuple()
+ )
+ for key in self.toml_table_header:
+ self.toml_data = self.toml_data.get(key, {})
+
+
+class PyprojectTomlConfigSettingsSource(TomlConfigSettingsSourceBase):
+ """Custom pydantic settings source to load settings from a pyproject.toml file"""
+
+ def __init__(
+ self,
+ settings_cls: Type[BaseSettings],
+ ):
+ super().__init__(settings_cls)
+ self.toml_file_path = Path("pyproject.toml")
+ self.toml_data = self._read_files(self.toml_file_path)
+ self.toml_table_header = settings_cls.model_config.get(
+ "pyproject_toml_table_header", ("tool", "prefect")
+ )
+ for key in self.toml_table_header:
+ self.toml_data = self.toml_data.get(key, {})
+
+
def _is_test_mode() -> bool:
"""Check if the current process is in test mode."""
return bool(
@@ -157,6 +303,25 @@ def _get_profiles_path() -> Path:
return DEFAULT_PROFILES_PATH
if env_path := os.getenv("PREFECT_PROFILES_PATH"):
return Path(env_path)
+ if dotenv_path := dotenv.dotenv_values(".env").get("PREFECT_PROFILES_PATH"):
+ return Path(dotenv_path)
+ if toml_path := _get_profiles_path_from_toml("prefect.toml", ["profiles_path"]):
+ return Path(toml_path)
+ if pyproject_path := _get_profiles_path_from_toml(
+ "pyproject.toml", ["tool", "prefect", "profiles_path"]
+ ):
+ return Path(pyproject_path)
if not (DEFAULT_PREFECT_HOME / "profiles.toml").exists():
return DEFAULT_PROFILES_PATH
return DEFAULT_PREFECT_HOME / "profiles.toml"
+
+
+def _get_profiles_path_from_toml(path: str, keys: List[str]) -> Optional[str]:
+ """Helper to get the profiles path from a toml file."""
+
+ try:
+ toml_data = toml.load(path)
+ except FileNotFoundError:
+ return None
+
+ return get_from_dict(toml_data, keys)
diff --git a/src/prefect/states.py b/src/prefect/states.py
index 2780d9b6d42c..f7acef9c2a48 100644
--- a/src/prefect/states.py
+++ b/src/prefect/states.py
@@ -2,6 +2,7 @@
import datetime
import sys
import traceback
+import uuid
import warnings
from collections import Counter
from types import GeneratorType, TracebackType
@@ -12,6 +13,7 @@
import pendulum
from typing_extensions import TypeGuard
+from prefect._internal.compatibility import deprecated
from prefect.client.schemas import State as State
from prefect.client.schemas import StateDetails, StateType
from prefect.exceptions import (
@@ -32,7 +34,6 @@
ResultRecordMetadata,
ResultStore,
)
-from prefect.settings import PREFECT_ASYNC_FETCH_STATE_RESULT
from prefect.utilities.annotations import BaseAnnotation
from prefect.utilities.asyncutils import in_async_main_thread, sync_compatible
from prefect.utilities.collections import ensure_iterable
@@ -40,10 +41,17 @@
logger = get_logger("states")
+@deprecated.deprecated_parameter(
+ "fetch",
+ when=lambda fetch: fetch is not True,
+ start_date="Oct 2024",
+ end_date="Jan 2025",
+ help="Please ensure you are awaiting the call to `result()` when calling in an async context.",
+)
def get_state_result(
state: State[R],
raise_on_failure: bool = True,
- fetch: Optional[bool] = None,
+ fetch: bool = True,
retry_result_failure: bool = True,
) -> R:
"""
@@ -52,23 +60,17 @@ def get_state_result(
See `State.result()`
"""
- if fetch is None and (
- PREFECT_ASYNC_FETCH_STATE_RESULT or not in_async_main_thread()
- ):
- # Fetch defaults to `True` for sync users or async users who have opted in
- fetch = True
- if not fetch:
- if fetch is None and in_async_main_thread():
- warnings.warn(
- (
- "State.result() was called from an async context but not awaited. "
- "This method will be updated to return a coroutine by default in "
- "the future. Pass `fetch=True` and `await` the call to get rid of "
- "this warning."
- ),
- DeprecationWarning,
- stacklevel=2,
- )
+ if not fetch and in_async_main_thread():
+ warnings.warn(
+ (
+ "State.result() was called from an async context but not awaited. "
+ "This method will be updated to return a coroutine by default in "
+ "the future. Pass `fetch=True` and `await` the call to get rid of "
+ "this warning."
+ ),
+ DeprecationWarning,
+ stacklevel=2,
+ )
return state.data
else:
@@ -219,7 +221,8 @@ async def exception_to_crashed_state(
)
if result_store:
- data = result_store.create_result_record(exc)
+ key = uuid.uuid4().hex
+ data = result_store.create_result_record(exc, key=key)
else:
# Attach the exception for local usage, will not be available when retrieved
# from the API
@@ -252,14 +255,15 @@ async def exception_to_failed_state(
pass
if result_store:
- data = result_store.create_result_record(exc)
+ key = uuid.uuid4().hex
+ data = result_store.create_result_record(exc, key=key)
if write_result:
try:
await result_store.apersist_result_record(data)
- except Exception as exc:
+ except Exception as nested_exc:
local_logger.warning(
"Failed to write result: %s Execution will continue, but the result has not been written",
- exc,
+ nested_exc,
)
else:
# Attach the exception for local usage, will not be available when retrieved
diff --git a/src/prefect/task_engine.py b/src/prefect/task_engine.py
index a4c948fb5bc7..43238d1cbc6c 100644
--- a/src/prefect/task_engine.py
+++ b/src/prefect/task_engine.py
@@ -67,6 +67,7 @@
PREFECT_DEBUG_MODE,
PREFECT_TASKS_REFRESH_CACHE,
)
+from prefect.settings.context import get_current_settings
from prefect.states import (
AwaitingRetry,
Completed,
@@ -150,11 +151,17 @@ def compute_transaction_key(self) -> Optional[str]:
else:
parameters = None
- key = self.task.cache_policy.compute_key(
- task_ctx=task_run_context,
- inputs=self.parameters,
- flow_parameters=parameters,
- )
+ try:
+ key = self.task.cache_policy.compute_key(
+ task_ctx=task_run_context,
+ inputs=self.parameters,
+ flow_parameters=parameters,
+ )
+ except Exception:
+ self.logger.exception(
+ "Error encountered when computing cache key - result will not be persisted.",
+ )
+ key = None
elif self.task.result_storage_key is not None:
key = _format_user_supplied_storage_key(self.task.result_storage_key)
return key
@@ -598,6 +605,8 @@ def setup_run_context(self, client: Optional[SyncPrefectClient] = None):
should_log_prints,
)
+ settings = get_current_settings()
+
if client is None:
client = self.client
if not self.task_run:
@@ -606,6 +615,12 @@ def setup_run_context(self, client: Optional[SyncPrefectClient] = None):
with ExitStack() as stack:
if log_prints := should_log_prints(self.task):
stack.enter_context(patch_print())
+ if self.task.persist_result is not None:
+ persist_result = self.task.persist_result
+ elif settings.tasks.default_persist_result is not None:
+ persist_result = settings.tasks.default_persist_result
+ else:
+ persist_result = should_persist_result()
stack.enter_context(
TaskRunContext(
task=self.task,
@@ -616,9 +631,7 @@ def setup_run_context(self, client: Optional[SyncPrefectClient] = None):
self.task, _sync=True
),
client=client,
- persist_result=self.task.persist_result
- if self.task.persist_result is not None
- else should_persist_result(),
+ persist_result=persist_result,
)
)
stack.enter_context(ConcurrencyContextV1())
@@ -1100,6 +1113,8 @@ async def setup_run_context(self, client: Optional[PrefectClient] = None):
should_log_prints,
)
+ settings = get_current_settings()
+
if client is None:
client = self.client
if not self.task_run:
@@ -1108,6 +1123,12 @@ async def setup_run_context(self, client: Optional[PrefectClient] = None):
with ExitStack() as stack:
if log_prints := should_log_prints(self.task):
stack.enter_context(patch_print())
+ if self.task.persist_result is not None:
+ persist_result = self.task.persist_result
+ elif settings.tasks.default_persist_result is not None:
+ persist_result = settings.tasks.default_persist_result
+ else:
+ persist_result = should_persist_result()
stack.enter_context(
TaskRunContext(
task=self.task,
@@ -1118,9 +1139,7 @@ async def setup_run_context(self, client: Optional[PrefectClient] = None):
self.task, _sync=False
),
client=client,
- persist_result=self.task.persist_result
- if self.task.persist_result is not None
- else should_persist_result(),
+ persist_result=persist_result,
)
)
stack.enter_context(ConcurrencyContext())
diff --git a/src/prefect/telemetry/__init__.py b/src/prefect/telemetry/__init__.py
new file mode 100644
index 000000000000..8b137891791f
--- /dev/null
+++ b/src/prefect/telemetry/__init__.py
@@ -0,0 +1 @@
+
diff --git a/src/prefect/telemetry/bootstrap.py b/src/prefect/telemetry/bootstrap.py
new file mode 100644
index 000000000000..89aedad095f3
--- /dev/null
+++ b/src/prefect/telemetry/bootstrap.py
@@ -0,0 +1,32 @@
+from typing import TYPE_CHECKING, Union
+
+import prefect.settings
+from prefect.client.base import ServerType, determine_server_type
+
+if TYPE_CHECKING:
+ from opentelemetry.sdk._logs import LoggerProvider
+ from opentelemetry.sdk.metrics import MeterProvider
+ from opentelemetry.sdk.trace import TracerProvider
+
+
+def setup_telemetry() -> (
+ Union[
+ tuple["TracerProvider", "MeterProvider", "LoggerProvider"],
+ tuple[None, None, None],
+ ]
+):
+ settings = prefect.settings.get_current_settings()
+ if not settings.experiments.telemetry_enabled:
+ return None, None, None
+
+ server_type = determine_server_type()
+ if server_type != ServerType.CLOUD:
+ return None, None, None
+
+ assert settings.api.key
+ assert settings.api.url
+
+ # This import is here to defer importing of the `opentelemetry` packages.
+ from .instrumentation import setup_exporters
+
+ return setup_exporters(settings.api.url, settings.api.key.get_secret_value())
diff --git a/src/prefect/telemetry/instrumentation.py b/src/prefect/telemetry/instrumentation.py
new file mode 100644
index 000000000000..bb1ddbfcb425
--- /dev/null
+++ b/src/prefect/telemetry/instrumentation.py
@@ -0,0 +1,125 @@
+import logging
+import os
+import re
+from typing import TYPE_CHECKING
+from urllib.parse import urljoin
+from uuid import UUID
+
+from opentelemetry import metrics, trace
+from opentelemetry._logs import set_logger_provider
+from opentelemetry.exporter.otlp.proto.http._log_exporter import OTLPLogExporter
+from opentelemetry.exporter.otlp.proto.http.metric_exporter import OTLPMetricExporter
+from opentelemetry.exporter.otlp.proto.http.trace_exporter import OTLPSpanExporter
+from opentelemetry.sdk._logs import LoggerProvider, LoggingHandler
+from opentelemetry.sdk._logs.export import SimpleLogRecordProcessor
+from opentelemetry.sdk.metrics import MeterProvider
+from opentelemetry.sdk.metrics.export import PeriodicExportingMetricReader
+from opentelemetry.sdk.resources import Resource
+from opentelemetry.sdk.trace import TracerProvider
+
+from .logging import set_log_handler
+from .processors import InFlightSpanProcessor
+
+if TYPE_CHECKING:
+ from opentelemetry.sdk._logs import LoggerProvider
+
+UUID_REGEX = "[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}"
+
+ACCOUNTS_PREFIX = "accounts/"
+ACCOUNT_ID_REGEX = f"{ACCOUNTS_PREFIX}{UUID_REGEX}"
+
+WORKSPACES_PREFIX = "workspaces/"
+WORKSPACE_ID_REGEX = f"{WORKSPACES_PREFIX}{UUID_REGEX}"
+
+
+def extract_account_and_workspace_id(url: str) -> tuple[UUID, UUID]:
+ account_id, workspace_id = None, None
+
+ if res := re.search(ACCOUNT_ID_REGEX, url):
+ account_id = UUID(res.group().removeprefix(ACCOUNTS_PREFIX))
+
+ if res := re.search(WORKSPACE_ID_REGEX, url):
+ workspace_id = UUID(res.group().removeprefix(WORKSPACES_PREFIX))
+
+ if account_id and workspace_id:
+ return account_id, workspace_id
+
+ raise ValueError(
+ f"Could not extract account and workspace id from API url: {url!r}"
+ )
+
+
+def _url_join(base_url: str, path: str) -> str:
+ return urljoin(base_url.rstrip("/") + "/", path.lstrip("/"))
+
+
+def setup_exporters(
+ api_url: str, api_key: str
+) -> tuple[TracerProvider, MeterProvider, "LoggerProvider"]:
+ account_id, workspace_id = extract_account_and_workspace_id(api_url)
+ telemetry_url = _url_join(api_url, "telemetry/")
+
+ headers = {
+ "Authorization": f"Bearer {api_key}",
+ }
+
+ resource = Resource.create(
+ {
+ "service.name": "prefect",
+ "service.instance.id": os.uname().nodename,
+ "prefect.account": str(account_id),
+ "prefect.workspace": str(workspace_id),
+ }
+ )
+
+ trace_provider = _setup_trace_provider(resource, headers, telemetry_url)
+ meter_provider = _setup_meter_provider(resource, headers, telemetry_url)
+ logger_provider = _setup_logger_provider(resource, headers, telemetry_url)
+
+ return trace_provider, meter_provider, logger_provider
+
+
+def _setup_trace_provider(
+ resource: Resource, headers: dict[str, str], telemetry_url: str
+) -> TracerProvider:
+ trace_provider = TracerProvider(resource=resource)
+ otlp_span_exporter = OTLPSpanExporter(
+ endpoint=_url_join(telemetry_url, "v1/traces"),
+ headers=headers,
+ )
+ trace_provider.add_span_processor(InFlightSpanProcessor(otlp_span_exporter))
+ trace.set_tracer_provider(trace_provider)
+
+ return trace_provider
+
+
+def _setup_meter_provider(
+ resource: Resource, headers: dict[str, str], telemetry_url: str
+) -> MeterProvider:
+ metric_reader = PeriodicExportingMetricReader(
+ OTLPMetricExporter(
+ endpoint=_url_join(telemetry_url, "v1/metrics"),
+ headers=headers,
+ )
+ )
+ meter_provider = MeterProvider(resource=resource, metric_readers=[metric_reader])
+ metrics.set_meter_provider(meter_provider)
+
+ return meter_provider
+
+
+def _setup_logger_provider(
+ resource: Resource, headers: dict[str, str], telemetry_url: str
+) -> LoggerProvider:
+ logger_provider = LoggerProvider(resource=resource)
+ otlp_exporter = OTLPLogExporter(
+ endpoint=_url_join(telemetry_url, "v1/logs"),
+ headers=headers,
+ )
+ logger_provider.add_log_record_processor(SimpleLogRecordProcessor(otlp_exporter))
+ set_logger_provider(logger_provider)
+ log_handler = LoggingHandler(level=logging.NOTSET, logger_provider=logger_provider)
+
+ set_log_handler(log_handler)
+
+ return logger_provider
diff --git a/src/prefect/telemetry/logging.py b/src/prefect/telemetry/logging.py
new file mode 100644
index 000000000000..752d57bf3ae6
--- /dev/null
+++ b/src/prefect/telemetry/logging.py
@@ -0,0 +1,26 @@
+import logging
+from typing import TYPE_CHECKING, Optional
+
+if TYPE_CHECKING:
+ from opentelemetry.sdk._logs import LoggingHandler
+
+_log_handler: Optional["LoggingHandler"] = None
+
+
+def set_log_handler(log_handler: Optional["LoggingHandler"]) -> None:
+ """Set the OTLP log handler."""
+ global _log_handler
+ _log_handler = log_handler
+
+
+def get_log_handler() -> Optional["LoggingHandler"]:
+ """Get the OTLP log handler."""
+ global _log_handler
+ return _log_handler
+
+
+def add_telemetry_log_handler(logger: logging.Logger) -> None:
+ """Add the OTLP log handler to the given logger if the log handler has
+ been configured."""
+ if log_handler := get_log_handler():
+ logger.addHandler(log_handler)
diff --git a/src/prefect/telemetry/processors.py b/src/prefect/telemetry/processors.py
new file mode 100644
index 000000000000..f5f1dc663e9c
--- /dev/null
+++ b/src/prefect/telemetry/processors.py
@@ -0,0 +1,57 @@
+import time
+from threading import Event, Lock, Thread
+from typing import Dict, Optional
+
+from opentelemetry.context import Context
+from opentelemetry.sdk.trace import ReadableSpan, Span, SpanProcessor
+from opentelemetry.sdk.trace.export import SpanExporter
+
+
+class InFlightSpanProcessor(SpanProcessor):
+ def __init__(self, span_exporter: SpanExporter):
+ self.span_exporter = span_exporter
+ self._in_flight: Dict[int, Span] = {}
+ self._lock = Lock()
+ self._stop_event = Event()
+ self._export_thread = Thread(target=self._export_periodically, daemon=True)
+ self._export_thread.start()
+
+ def _export_periodically(self) -> None:
+ while not self._stop_event.is_set():
+ time.sleep(1)
+ with self._lock:
+ to_export = [
+ self._readable_span(span) for span in self._in_flight.values()
+ ]
+ if to_export:
+ self.span_exporter.export(to_export)
+
+ def _readable_span(self, span: Span) -> ReadableSpan:
+ readable = span._readable_span()
+ readable._end_time = time.time_ns()
+ readable._attributes = {
+ **(readable._attributes or {}),
+ "prefect.in-flight": True,
+ }
+ return readable
+
+ def on_start(self, span: Span, parent_context: Optional[Context] = None) -> None:
+ if not span.context or not span.context.trace_flags.sampled:
+ return
+ with self._lock:
+ self._in_flight[span.context.span_id] = span
+
+ def on_end(self, span: ReadableSpan) -> None:
+ if not span.context or not span.context.trace_flags.sampled:
+ return
+ with self._lock:
+ del self._in_flight[span.context.span_id]
+ self.span_exporter.export((span,))
+
+ def shutdown(self) -> None:
+ self._stop_event.set()
+ self._export_thread.join()
+ self.span_exporter.shutdown()
+
+ def force_flush(self, timeout_millis: int = 30000) -> bool:
+ return True
diff --git a/src/prefect/types/__init__.py b/src/prefect/types/__init__.py
index ed20547cb095..9c4afda93936 100644
--- a/src/prefect/types/__init__.py
+++ b/src/prefect/types/__init__.py
@@ -7,7 +7,6 @@
from pydantic import (
BeforeValidator,
Field,
- SecretStr,
StrictBool,
StrictFloat,
StrictInt,
@@ -87,10 +86,26 @@ def check_variable_value(value: object) -> object:
LaxUrl = Annotated[str, BeforeValidator(lambda x: str(x).strip())]
-
StatusCode = Annotated[int, Field(ge=100, le=599)]
+def cast_none_to_empty_dict(value: Any) -> dict[str, Any]:
+ if value is None:
+ return {}
+ return value
+
+
+KeyValueLabels = Annotated[
+ dict[str, Union[StrictBool, StrictInt, StrictFloat, str]],
+ BeforeValidator(cast_none_to_empty_dict),
+]
+
+
+ListOfNonEmptyStrings = Annotated[
+ List[str], BeforeValidator(lambda x: [s for s in x if s.strip()])
+]
+
+
class SecretDict(pydantic.Secret[Dict[str, Any]]):
pass
@@ -110,7 +125,7 @@ def validate_set_T_from_delim_string(
T_adapter = TypeAdapter(type_)
delim = delim or ","
if isinstance(value, str):
- return {T_adapter.validate_strings(s) for s in value.split(delim)}
+ return {T_adapter.validate_strings(s.strip()) for s in value.split(delim)}
errors = []
try:
return {T_adapter.validate_python(value)}
@@ -138,6 +153,7 @@ def validate_set_T_from_delim_string(
"LogLevel",
"NonNegativeInteger",
"PositiveInteger",
+ "ListOfNonEmptyStrings",
"NonNegativeFloat",
"Name",
"NameOrEmpty",
diff --git a/src/prefect/utilities/filesystem.py b/src/prefect/utilities/filesystem.py
index dad659f259a0..68a87a410039 100644
--- a/src/prefect/utilities/filesystem.py
+++ b/src/prefect/utilities/filesystem.py
@@ -7,7 +7,7 @@
import threading
from contextlib import contextmanager
from pathlib import Path, PureWindowsPath
-from typing import Optional, Union
+from typing import Optional, Union, cast
import fsspec
import pathspec
@@ -22,8 +22,8 @@ def create_default_ignore_file(path: str) -> bool:
Creates default ignore file in the provided path if one does not already exist; returns boolean specifying
whether a file was created.
"""
- path = pathlib.Path(path)
- ignore_file = path / ".prefectignore"
+ _path = pathlib.Path(path)
+ ignore_file = _path / ".prefectignore"
if ignore_file.exists():
return False
default_file = pathlib.Path(prefect.__module_path__) / ".prefectignore"
@@ -54,12 +54,34 @@ def filter_files(
chdir_lock = threading.Lock()
+def _normalize_path(path: Union[str, Path]) -> str:
+ """
+ Normalize a path, handling UNC paths on Windows specially.
+ """
+ path = Path(path)
+
+ # Handle UNC paths on Windows differently
+ if os.name == "nt" and str(path).startswith("\\\\"):
+ # For UNC paths, use absolute() instead of resolve()
+ # to avoid the Windows path resolution issues
+ return str(path.absolute())
+ else:
+ try:
+ # For non-UNC paths, try resolve() first
+ return str(path.resolve())
+ except OSError:
+ # Fallback to absolute() if resolve() fails
+ return str(path.absolute())
+
+
@contextmanager
def tmpchdir(path: str):
"""
- Change current-working directories for the duration of the context
+ Change current-working directories for the duration of the context,
+ with special handling for UNC paths on Windows.
"""
- path = os.path.abspath(path)
+ path = _normalize_path(path)
+
if os.path.isfile(path) or (not os.path.exists(path) and not path.endswith("/")):
path = os.path.dirname(path)
@@ -67,7 +89,12 @@ def tmpchdir(path: str):
with chdir_lock:
try:
- os.chdir(path)
+ # On Windows with UNC paths, we need to handle the directory change carefully
+ if os.name == "nt" and path.startswith("\\\\"):
+ # Use os.path.abspath to handle UNC paths
+ os.chdir(os.path.abspath(path))
+ else:
+ os.chdir(path)
yield path
finally:
os.chdir(owd)
@@ -76,7 +103,7 @@ def tmpchdir(path: str):
def filename(path: str) -> str:
"""Extract the file name from a path with remote file system support"""
try:
- of: OpenFile = fsspec.open(path)
+ of: OpenFile = cast(OpenFile, fsspec.open(path))
sep = of.fs.sep
except (ImportError, AttributeError):
sep = "\\" if "\\" in path else "/"
@@ -98,7 +125,7 @@ def is_local_path(path: Union[str, pathlib.Path, OpenFile]):
else:
raise TypeError(f"Invalid path of type {type(path).__name__!r}")
- return type(of.fs) == LocalFileSystem
+ return isinstance(of.fs, LocalFileSystem)
def to_display_path(
diff --git a/src/prefect/utilities/hashing.py b/src/prefect/utilities/hashing.py
index 204acf76dea7..2724cb38c3f4 100644
--- a/src/prefect/utilities/hashing.py
+++ b/src/prefect/utilities/hashing.py
@@ -6,6 +6,7 @@
import cloudpickle
+from prefect.exceptions import HashError
from prefect.serializers import JSONSerializer
if sys.version_info[:2] >= (3, 9):
@@ -48,20 +49,44 @@ def file_hash(path: str, hash_algo=_md5) -> str:
return stable_hash(contents, hash_algo=hash_algo)
-def hash_objects(*args, hash_algo=_md5, **kwargs) -> Optional[str]:
+def hash_objects(
+ *args, hash_algo=_md5, raise_on_failure: bool = False, **kwargs
+) -> Optional[str]:
"""
Attempt to hash objects by dumping to JSON or serializing with cloudpickle.
- On failure of both, `None` will be returned
+
+ Args:
+ *args: Positional arguments to hash
+ hash_algo: Hash algorithm to use
+ raise_on_failure: If True, raise exceptions instead of returning None
+ **kwargs: Keyword arguments to hash
+
+ Returns:
+ A hash string or None if hashing failed
+
+ Raises:
+ HashError: If objects cannot be hashed and raise_on_failure is True
"""
+ json_error = None
+ pickle_error = None
+
try:
serializer = JSONSerializer(dumps_kwargs={"sort_keys": True})
return stable_hash(serializer.dumps((args, kwargs)), hash_algo=hash_algo)
- except Exception:
- pass
+ except Exception as e:
+ json_error = str(e)
try:
return stable_hash(cloudpickle.dumps((args, kwargs)), hash_algo=hash_algo)
- except Exception:
- pass
+ except Exception as e:
+ pickle_error = str(e)
+
+ if raise_on_failure:
+ msg = (
+ "Unable to create hash - objects could not be serialized.\n"
+ f" JSON error: {json_error}\n"
+ f" Pickle error: {pickle_error}"
+ )
+ raise HashError(msg)
return None
diff --git a/src/prefect/utilities/urls.py b/src/prefect/utilities/urls.py
index 91d67236fb05..7b99f645b648 100644
--- a/src/prefect/utilities/urls.py
+++ b/src/prefect/utilities/urls.py
@@ -2,6 +2,7 @@
import ipaddress
import socket
import urllib.parse
+from string import Formatter
from typing import TYPE_CHECKING, Any, Literal, Optional, Union
from urllib.parse import urlparse
from uuid import UUID
@@ -22,7 +23,6 @@
# The following objects are excluded from UI URL generation because we lack a
# directly-addressable URL:
-# worker
# artifact
# variable
# saved-search
@@ -38,6 +38,7 @@
"deployment": "deployments/deployment/{obj_id}",
"automation": "automations/automation/{obj_id}",
"received-event": "events/event/{occurred}/{obj_id}",
+ "worker": "work-pools/work-pool/{work_pool_name}/worker/{obj_id}",
}
# The following objects are excluded from API URL generation because we lack a
@@ -134,6 +135,7 @@ def url_for(
obj_id: Optional[Union[str, UUID]] = None,
url_type: URLType = "ui",
default_base_url: Optional[str] = None,
+ **additional_format_kwargs: Any,
) -> Optional[str]:
"""
Returns the URL for a Prefect object.
@@ -149,6 +151,8 @@ def url_for(
Whether to return the URL for the UI (default) or API.
default_base_url (str, optional):
The default base URL to use if no URL is configured.
+ additional_format_kwargs (Dict[str, Any], optional):
+ Additional keyword arguments to pass to the URL format.
Returns:
Optional[str]: The URL for the given object or None if the object is not supported.
@@ -246,7 +250,18 @@ def url_for(
occurred=obj.occurred.strftime("%Y-%m-%d"), obj_id=obj_id
)
else:
- url = url_format.format(obj_id=obj_id)
+ obj_keys = [
+ fname
+ for _, fname, _, _ in Formatter().parse(url_format)
+ if fname is not None and fname != "obj_id"
+ ]
+
+ if not all(key in additional_format_kwargs for key in obj_keys):
+ raise ValueError(
+ f"Unable to generate URL for {name} because the following keys are missing: {', '.join(obj_keys)}"
+ )
+
+ url = url_format.format(obj_id=obj_id, **additional_format_kwargs)
if not base_url.endswith("/"):
base_url += "/"
diff --git a/src/prefect/workers/base.py b/src/prefect/workers/base.py
index e4c05ed551ca..843f8c9a4e62 100644
--- a/src/prefect/workers/base.py
+++ b/src/prefect/workers/base.py
@@ -4,20 +4,29 @@
from contextlib import AsyncExitStack
from functools import partial
from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Set, Type, Union
-from uuid import uuid4
+from uuid import UUID, uuid4
import anyio
import anyio.abc
+import httpx
import pendulum
+from importlib_metadata import distributions
from pydantic import BaseModel, Field, PrivateAttr, field_validator
from pydantic.json_schema import GenerateJsonSchema
from typing_extensions import Literal
import prefect
from prefect._internal.schemas.validators import return_v_or_none
+from prefect.client.base import ServerType
+from prefect.client.cloud import CloudClient, get_cloud_client
from prefect.client.orchestration import PrefectClient, get_client
from prefect.client.schemas.actions import WorkPoolCreate, WorkPoolUpdate
-from prefect.client.schemas.objects import StateType, WorkPool
+from prefect.client.schemas.objects import (
+ Integration,
+ StateType,
+ WorkerMetadata,
+ WorkPool,
+)
from prefect.client.utilities import inject_client
from prefect.events import Event, RelatedResource, emit_event
from prefect.events.related import object_as_related_resource, tags_as_related_resources
@@ -25,7 +34,11 @@
Abort,
ObjectNotFound,
)
-from prefect.logging.loggers import PrefectLogAdapter, flow_run_logger, get_logger
+from prefect.logging.loggers import (
+ PrefectLogAdapter,
+ flow_run_logger,
+ get_worker_logger,
+)
from prefect.plugins import load_prefect_collections
from prefect.settings import (
PREFECT_API_URL,
@@ -49,6 +62,7 @@
resolve_block_document_references,
resolve_variables,
)
+from prefect.utilities.urls import url_for
if TYPE_CHECKING:
from prefect.client.schemas.objects import Flow, FlowRun
@@ -132,26 +146,26 @@ async def from_template_and_values(
Important: this method expects that the base_job_template was already
validated server-side.
"""
- job_config: Dict[str, Any] = base_job_template["job_configuration"]
+ base_config: Dict[str, Any] = base_job_template["job_configuration"]
variables_schema = base_job_template["variables"]
variables = cls._get_base_config_defaults(
variables_schema.get("properties", {})
)
- # copy variable defaults for `env` to job config before they're replaced by
+ # copy variable defaults for `env` to base config before they're replaced by
# deployment overrides
if variables.get("env"):
- job_config["env"] = variables.get("env")
+ base_config["env"] = variables.get("env")
variables.update(values)
# deep merge `env`
- if isinstance(job_config.get("env"), dict) and (
- hardcoded_env := variables.get("env")
+ if isinstance(base_config.get("env"), dict) and (
+ deployment_env := variables.get("env")
):
- job_config["env"] = hardcoded_env | job_config.get("env")
+ base_config["env"] = base_config.get("env") | deployment_env
- populated_configuration = apply_values(template=job_config, values=variables)
+ populated_configuration = apply_values(template=base_config, values=variables)
populated_configuration = await resolve_block_document_references(
template=populated_configuration, client=client
)
@@ -406,7 +420,8 @@ def __init__(
raise ValueError("Worker name cannot contain '/' or '%'")
self.name = name or f"{self.__class__.__name__} {uuid4()}"
self._started_event: Optional[Event] = None
- self._logger = get_logger(f"worker.{self.__class__.type}.{self.name.lower()}")
+ self.backend_id: Optional[UUID] = None
+ self._logger = get_worker_logger(self)
self.is_setup = False
self._create_pool_if_not_found = create_pool_if_not_found
@@ -425,12 +440,14 @@ def __init__(
self._exit_stack: AsyncExitStack = AsyncExitStack()
self._runs_task_group: Optional[anyio.abc.TaskGroup] = None
self._client: Optional[PrefectClient] = None
+ self._cloud_client: Optional[CloudClient] = None
self._last_polled_time: pendulum.DateTime = pendulum.now("utc")
self._limit = limit
self._limiter: Optional[anyio.CapacityLimiter] = None
self._submitting_flow_run_ids = set()
self._cancelling_flow_run_ids = set()
self._scheduled_task_scopes = set()
+ self._worker_metadata_sent = False
@classmethod
def get_documentation_url(cls) -> str:
@@ -488,15 +505,19 @@ def get_name_slug(self):
return slugify(self.name)
def get_flow_run_logger(self, flow_run: "FlowRun") -> PrefectLogAdapter:
+ extra = {
+ "worker_name": self.name,
+ "work_pool_name": (
+ self._work_pool_name if self._work_pool else ""
+ ),
+ "work_pool_id": str(getattr(self._work_pool, "id", "unknown")),
+ }
+ if self.backend_id:
+ extra["worker_id"] = str(self.backend_id)
+
return flow_run_logger(flow_run=flow_run).getChild(
"worker",
- extra={
- "worker_name": self.name,
- "work_pool_name": (
- self._work_pool_name if self._work_pool else ""
- ),
- "work_pool_id": str(getattr(self._work_pool, "id", "unknown")),
- },
+ extra=extra,
)
async def start(
@@ -611,9 +632,14 @@ async def setup(self):
raise ValueError("`PREFECT_API_URL` must be set to start a Worker.")
self._client = get_client()
+
await self._exit_stack.enter_async_context(self._client)
await self._exit_stack.enter_async_context(self._runs_task_group)
+ if self._client.server_type == ServerType.CLOUD:
+ self._cloud_client = get_cloud_client()
+ await self._exit_stack.enter_async_context(self._cloud_client)
+
self.is_setup = True
async def teardown(self, *exc_info):
@@ -623,9 +649,14 @@ async def teardown(self, *exc_info):
for scope in self._scheduled_task_scopes:
scope.cancel()
- await self._exit_stack.__aexit__(*exc_info)
+ # Emit stopped event before closing client
if self._started_event:
- await self._emit_worker_stopped_event(self._started_event)
+ try:
+ await self._emit_worker_stopped_event(self._started_event)
+ except Exception:
+ self._logger.exception("Failed to emit worker stopped event")
+
+ await self._exit_stack.__aexit__(*exc_info)
self._runs_task_group = None
self._client = None
@@ -710,14 +741,73 @@ async def _update_local_work_pool_info(self):
self._work_pool = work_pool
- async def _send_worker_heartbeat(self):
- if self._work_pool:
- await self._client.send_worker_heartbeat(
- work_pool_name=self._work_pool_name,
- worker_name=self.name,
- heartbeat_interval_seconds=self.heartbeat_interval_seconds,
+ async def _worker_metadata(self) -> Optional[WorkerMetadata]:
+ """
+ Returns metadata about installed Prefect collections for the worker.
+ """
+ installed_integrations = load_prefect_collections().keys()
+
+ integration_versions = [
+ Integration(name=dist.metadata["Name"], version=dist.version)
+ for dist in distributions()
+ # PyPI packages often use dashes, but Python package names use underscores
+ # because they must be valid identifiers.
+ if (name := dist.metadata.get("Name"))
+ and (name.replace("-", "_") in installed_integrations)
+ ]
+
+ if integration_versions:
+ return WorkerMetadata(integrations=integration_versions)
+ return None
+
+ async def _send_worker_heartbeat(self) -> Optional[UUID]:
+ """
+ Sends a heartbeat to the API.
+ """
+ if not self._client:
+ self._logger.warning("Client has not been initialized; skipping heartbeat.")
+ return None
+ if not self._work_pool:
+ self._logger.debug("Worker has no work pool; skipping heartbeat.")
+ return None
+
+ should_get_worker_id = self._should_get_worker_id()
+
+ params = {
+ "work_pool_name": self._work_pool_name,
+ "worker_name": self.name,
+ "heartbeat_interval_seconds": self.heartbeat_interval_seconds,
+ "get_worker_id": should_get_worker_id,
+ }
+ if (
+ self._client.server_type == ServerType.CLOUD
+ and not self._worker_metadata_sent
+ ):
+ worker_metadata = await self._worker_metadata()
+ if worker_metadata:
+ params["worker_metadata"] = worker_metadata
+ self._worker_metadata_sent = True
+
+ worker_id = None
+ try:
+ worker_id = await self._client.send_worker_heartbeat(**params)
+ except httpx.HTTPStatusError as e:
+ if e.response.status_code == 422 and should_get_worker_id:
+ self._logger.warning(
+ "Failed to retrieve worker ID from the Prefect API server."
+ )
+ params["get_worker_id"] = False
+ worker_id = await self._client.send_worker_heartbeat(**params)
+ else:
+ raise e
+
+ if should_get_worker_id and worker_id is None:
+ self._logger.warning(
+ "Failed to retrieve worker ID from the Prefect API server."
)
+ return worker_id
+
async def sync_with_backend(self):
"""
Updates the worker's local information about it's current work pool and
@@ -725,9 +815,23 @@ async def sync_with_backend(self):
"""
await self._update_local_work_pool_info()
- await self._send_worker_heartbeat()
+ remote_id = await self._send_worker_heartbeat()
+ if remote_id:
+ self.backend_id = remote_id
+ self._logger = get_worker_logger(self)
- self._logger.debug("Worker synchronized with the Prefect API server.")
+ self._logger.debug(
+ "Worker synchronized with the Prefect API server. "
+ + (f"Remote ID: {self.backend_id}" if self.backend_id else "")
+ )
+
+ def _should_get_worker_id(self):
+ """Determines if the worker should request an ID from the API server."""
+ return (
+ self._client
+ and self._client.server_type == ServerType.CLOUD
+ and self.backend_id is None
+ )
async def _get_scheduled_flow_runs(
self,
@@ -782,6 +886,20 @@ async def _submit_scheduled_flow_runs(
run_logger.info(
f"Worker '{self.name}' submitting flow run '{flow_run.id}'"
)
+ if self.backend_id:
+ try:
+ worker_url = url_for(
+ "worker",
+ obj_id=self.backend_id,
+ work_pool_name=self._work_pool_name,
+ )
+
+ run_logger.info(
+ f"Running on worker id: {self.backend_id}. See worker logs here: {worker_url}"
+ )
+ except ValueError as ve:
+ run_logger.warning(f"Failed to generate worker URL: {ve}")
+
self._submitting_flow_run_ids.add(flow_run.id)
self._runs_task_group.start_soon(
self._submit_run,
@@ -855,10 +973,11 @@ async def _submit_run(self, flow_run: "FlowRun") -> None:
else:
# If the run is not ready to submit, release the concurrency slot
- if self._limiter:
- self._limiter.release_on_behalf_of(flow_run.id)
+ self._release_limit_slot(flow_run.id)
self._submitting_flow_run_ids.remove(flow_run.id)
+ else:
+ self._release_limit_slot(flow_run.id)
async def _submit_run_and_capture_errors(
self, flow_run: "FlowRun", task_status: Optional[anyio.abc.TaskStatus] = None
@@ -868,6 +987,7 @@ async def _submit_run_and_capture_errors(
try:
configuration = await self._get_configuration(flow_run)
submitted_event = self._emit_flow_run_submitted_event(configuration)
+ await self._give_worker_labels_to_flow_run(flow_run.id)
result = await self.run(
flow_run=flow_run,
task_status=task_status,
@@ -892,8 +1012,7 @@ async def _submit_run_and_capture_errors(
)
return exc
finally:
- if self._limiter:
- self._limiter.release_on_behalf_of(flow_run.id)
+ self._release_limit_slot(flow_run.id)
if not task_status._future.done():
run_logger.error(
@@ -918,6 +1037,14 @@ async def _submit_run_and_capture_errors(
return result
+ def _release_limit_slot(self, flow_run_id: str) -> None:
+ """
+ Frees up a slot taken by the given flow run id.
+ """
+ if self._limiter:
+ self._limiter.release_on_behalf_of(flow_run_id)
+ self._logger.debug("Limit slot released for flow run '%s'", flow_run_id)
+
def get_status(self):
"""
Retrieves the status of the current worker including its name, current worker
@@ -1089,6 +1216,19 @@ async def wrapper(task_status):
await self._runs_task_group.start(wrapper)
+ async def _give_worker_labels_to_flow_run(self, flow_run_id: UUID):
+ """
+ Give this worker's identifying labels to the specified flow run.
+ """
+ if self._cloud_client:
+ await self._cloud_client.update_flow_run_labels(
+ flow_run_id,
+ {
+ "prefect.worker.name": self.name,
+ "prefect.worker.type": self.type,
+ },
+ )
+
async def __aenter__(self):
self._logger.debug("Entering worker context...")
await self.setup()
diff --git a/tests/_internal/compatibility/test_async_dispatch.py b/tests/_internal/compatibility/test_async_dispatch.py
new file mode 100644
index 000000000000..ea7b10c4387b
--- /dev/null
+++ b/tests/_internal/compatibility/test_async_dispatch.py
@@ -0,0 +1,184 @@
+import asyncio
+from typing import List, Optional
+
+import pytest
+
+from prefect._internal.compatibility.async_dispatch import (
+ async_dispatch,
+ is_in_async_context,
+)
+from prefect.utilities.asyncutils import run_sync_in_worker_thread
+
+
+class TestAsyncDispatchBasicUsage:
+ def test_async_compatible_fn_in_sync_context(self):
+ data: List[str] = []
+
+ async def my_function_async() -> None:
+ data.append("async")
+
+ @async_dispatch(my_function_async)
+ def my_function() -> None:
+ data.append("sync")
+
+ my_function()
+ assert data == ["sync"]
+
+ async def test_async_compatible_fn_in_async_context(self):
+ data: List[str] = []
+
+ async def my_function_async() -> None:
+ data.append("async")
+
+ @async_dispatch(my_function_async)
+ def my_function() -> None:
+ data.append("sync")
+
+ await my_function()
+ assert data == ["async"]
+
+ async def test_can_force_sync_or_async_dispatch(self):
+ """Verify that we can force sync or async dispatch regardless of context"""
+ data: List[str] = []
+
+ async def my_function_async() -> None:
+ data.append("async")
+
+ @async_dispatch(my_function_async)
+ def my_function() -> None:
+ data.append("sync")
+
+ # Force sync even in async context
+ my_function(_sync=True)
+ assert data == ["sync"]
+
+ data.clear()
+
+ # Force async
+ await my_function(_sync=False)
+ assert data == ["async"]
+
+
+class TestAsyncDispatchValidation:
+ def test_async_compatible_requires_async_implementation(self):
+ """Verify we properly reject non-async implementations"""
+
+ def not_async() -> None:
+ pass
+
+ with pytest.raises(TypeError, match="async_impl must be an async function"):
+
+ @async_dispatch(not_async)
+ def my_function() -> None:
+ pass
+
+ def test_async_compatible_requires_implementation(self):
+ """Verify we properly reject missing implementations"""
+
+ with pytest.raises(
+ TypeError,
+ match=r"async_dispatch\(\) missing 1 required positional argument: 'async_impl'",
+ ):
+
+ @async_dispatch()
+ def my_function() -> None:
+ pass
+
+
+class TestMethodBinding:
+ async def test_method_binding_works_correctly(self):
+ """Verify that self is properly bound for instance methods"""
+
+ class Counter:
+ def __init__(self) -> None:
+ self.count = 0
+
+ async def increment_async(self) -> None:
+ self.count += 1
+
+ @async_dispatch(increment_async)
+ def increment(self) -> None:
+ self.count += 1
+
+ counter = Counter()
+ assert counter.count == 0
+
+ # Test sync
+ counter.increment(_sync=True)
+ assert counter.count == 1
+
+ # Test async
+ await counter.increment(_sync=False)
+ assert counter.count == 2
+
+ async def test_method_binding_respects_context(self):
+ """Verify that methods automatically dispatch based on context"""
+
+ class Counter:
+ def __init__(self) -> None:
+ self.count = 0
+ self.calls: List[str] = []
+
+ async def increment_async(self) -> None:
+ self.calls.append("async")
+ self.count += 1
+
+ @async_dispatch(increment_async)
+ def increment(self) -> None:
+ self.calls.append("sync")
+ self.count += 1
+
+ counter = Counter()
+
+ # In sync context
+ def sync_caller() -> None:
+ counter.increment(_sync=True)
+
+ sync_caller()
+ assert counter.calls == ["sync"]
+ assert counter.count == 1
+
+ # In async context
+ await counter.increment()
+ assert counter.calls == ["sync", "async"]
+ assert counter.count == 2
+
+
+class TestIsInAsyncContext:
+ async def test_is_in_async_context_from_coroutine(self):
+ """Verify detection inside a coroutine"""
+ assert is_in_async_context() is True
+
+ def test_is_in_async_context_from_sync(self):
+ """Verify detection in pure sync context"""
+ assert is_in_async_context() is False
+
+ async def test_is_in_async_context_with_nested_sync_in_worker_thread(self):
+ def sync_func() -> bool:
+ return is_in_async_context()
+
+ assert await run_sync_in_worker_thread(sync_func) is False
+
+ def test_is_in_async_context_with_running_loop(self):
+ """Verify detection with just a running event loop"""
+ loop = asyncio.new_event_loop()
+ asyncio.set_event_loop(loop)
+ result: Optional[bool] = None
+
+ def check_context() -> None:
+ nonlocal result
+ result = is_in_async_context()
+ loop.stop()
+
+ try:
+ loop.call_soon(check_context)
+ loop.run_forever()
+ assert (
+ result is True
+ ), "the result we captured while loop was running should be True"
+ finally:
+ loop.close()
+ asyncio.set_event_loop(None)
+ assert (
+ is_in_async_context() is False
+ ), "the loop should be closed and not considered an async context"
diff --git a/tests/_internal/test_integrations.py b/tests/_internal/test_integrations.py
index e8133f60b88f..e44f28e4eeaf 100644
--- a/tests/_internal/test_integrations.py
+++ b/tests/_internal/test_integrations.py
@@ -13,6 +13,7 @@ def extract_extras_require(setup_py_content):
client_requires = []
install_requires = []
dev_requires = []
+ otel_requires = []
markdown_requirements = []
markdown_tests_requires = []
@@ -23,6 +24,7 @@ def extract_extras_require(setup_py_content):
"client_requires": client_requires,
"install_requires": install_requires,
"dev_requires": dev_requires,
+ "otel_requires": otel_requires,
"markdown_requirements": markdown_requirements,
"markdown_tests_requires": markdown_tests_requires,
},
diff --git a/tests/blocks/test_notifications.py b/tests/blocks/test_notifications.py
index fa0ffad4440b..a3da617adce4 100644
--- a/tests/blocks/test_notifications.py
+++ b/tests/blocks/test_notifications.py
@@ -292,7 +292,7 @@ async def test_notify_async(self):
AppriseMock.assert_called_once()
apprise_instance_mock.add.assert_called_once_with(
- f"opsgenie://{self.API_KEY}//?action=map®ion=us&priority=normal&"
+ f"opsgenie://{self.API_KEY}//?action=new®ion=us&priority=normal&"
"batch=no&%3Ainfo=note&%3Asuccess=close&%3Awarning=new&%3Afailure="
"new&format=text&overflow=upstream"
)
@@ -304,7 +304,7 @@ async def test_notify_async(self):
def _test_notify_sync(self, targets="", params=None, **kwargs):
with patch("apprise.Apprise", autospec=True) as AppriseMock:
if params is None:
- params = "action=map®ion=us&priority=normal&batch=no"
+ params = "action=new®ion=us&priority=normal&batch=no"
apprise_instance_mock = AppriseMock.return_value
apprise_instance_mock.async_notify = AsyncMock()
@@ -331,7 +331,7 @@ def test_notify_sync_simple(self):
self._test_notify_sync()
def test_notify_sync_params(self):
- params = "action=map®ion=eu&priority=low&batch=yes"
+ params = "action=new®ion=eu&priority=low&batch=yes"
self._test_notify_sync(params=params, region_name="eu", priority=1, batch=True)
def test_notify_sync_targets(self):
@@ -349,7 +349,7 @@ def test_notify_sync_users(self):
self._test_notify_sync(targets=targets, target_user=["user1", "user2"])
def test_notify_sync_details(self):
- params = "action=map®ion=us&priority=normal&batch=no&%2Bkey1=value1&%2Bkey2=value2"
+ params = "action=new®ion=us&priority=normal&batch=no&%2Bkey1=value1&%2Bkey2=value2"
self._test_notify_sync(
params=params,
details={
diff --git a/tests/cli/cloud/test_cloud.py b/tests/cli/cloud/test_cloud.py
index e6157647cc29..bff581618213 100644
--- a/tests/cli/cloud/test_cloud.py
+++ b/tests/cli/cloud/test_cloud.py
@@ -872,7 +872,7 @@ def test_login_already_logged_in_to_another_profile_cancel_during_select(respx_m
invoke_and_assert(
["cloud", "login"],
- expected_code=1,
+ expected_code=130, # assumes typer>=0.13.0
user_input=(
# Yes, switch profiles
"y"
@@ -884,7 +884,6 @@ def test_login_already_logged_in_to_another_profile_cancel_during_select(respx_m
"? Would you like to switch profiles? [Y/n]:",
"? Which authenticated profile would you like to switch to?",
"logged-in-profile",
- "Aborted",
],
)
diff --git a/tests/cli/test_config.py b/tests/cli/test_config.py
index 88072aea20de..76ce8b8e9e17 100644
--- a/tests/cli/test_config.py
+++ b/tests/cli/test_config.py
@@ -1,6 +1,7 @@
import sys
import pytest
+import toml
from typer import Exit
import prefect.context
@@ -9,6 +10,7 @@
from prefect.settings import (
PREFECT_API_DATABASE_TIMEOUT,
PREFECT_API_KEY,
+ PREFECT_CLIENT_RETRY_EXTRA_CODES,
PREFECT_LOGGING_TO_API_MAX_LOG_SIZE,
PREFECT_PROFILES_PATH,
PREFECT_SERVER_ALLOW_EPHEMERAL_MODE,
@@ -21,11 +23,15 @@
)
from prefect.settings.legacy import _get_valid_setting_names
from prefect.testing.cli import invoke_and_assert
+from prefect.utilities.filesystem import tmpchdir
# Source strings displayed by `prefect config view`
FROM_DEFAULT = "(from defaults)"
FROM_ENV = "(from env)"
FROM_PROFILE = "(from profile)"
+FROM_DOT_ENV = "(from .env file)"
+FROM_PREFECT_TOML = "(from prefect.toml)"
+FROM_PYPROJECT_TOML = "(from pyproject.toml)"
@pytest.fixture(autouse=True)
@@ -544,3 +550,135 @@ def test_view_shows_secrets(monkeypatch, command):
if "--show-defaults" in command:
assert f"PREFECT_API_DATABASE_PASSWORD='None' {FROM_DEFAULT}" in lines
+
+
+def test_view_with_env_file(tmp_path):
+ with tmpchdir(tmp_path):
+ with open(".env", "w") as f:
+ f.write("PREFECT_CLIENT_RETRY_EXTRA_CODES=300\n")
+
+ res = invoke_and_assert(["config", "view"])
+
+ assert "PREFECT_CLIENT_RETRY_EXTRA_CODES='300'" in res.stdout
+ assert FROM_DOT_ENV in res.stdout
+
+
+def test_view_with_env_file_and_env_var(monkeypatch, tmp_path):
+ monkeypatch.setenv("PREFECT_CLIENT_RETRY_EXTRA_CODES", "400")
+
+ with tmpchdir(tmp_path):
+ with open(".env", "w") as f:
+ f.write("PREFECT_CLIENT_RETRY_EXTRA_CODES=300\n")
+
+ res = invoke_and_assert(["config", "view"])
+
+ assert "PREFECT_CLIENT_RETRY_EXTRA_CODES='400'" in res.stdout
+ assert FROM_DOT_ENV not in res.stdout
+
+
+def test_view_with_env_file_and_profile(tmp_path):
+ with tmpchdir(tmp_path):
+ with open(".env", "w") as f:
+ f.write("PREFECT_CLIENT_RETRY_EXTRA_CODES=300\n")
+
+ with prefect.context.use_profile(
+ prefect.settings.Profile(
+ name="foo",
+ settings={PREFECT_CLIENT_RETRY_EXTRA_CODES: [400]},
+ ),
+ include_current_context=False,
+ ):
+ res = invoke_and_assert(["config", "view"])
+
+ assert "PREFECT_CLIENT_RETRY_EXTRA_CODES='300'" in res.stdout
+ assert FROM_DOT_ENV in res.stdout
+
+
+def test_view_with_prefect_toml_file(tmp_path):
+ with tmpchdir(tmp_path):
+ toml_data = {"client": {"retry_extra_codes": "300"}}
+ with open("prefect.toml", "w") as f:
+ toml.dump(toml_data, f)
+
+ res = invoke_and_assert(["config", "view"])
+
+ assert "PREFECT_CLIENT_RETRY_EXTRA_CODES='300'" in res.stdout
+ assert FROM_PREFECT_TOML in res.stdout
+
+
+def test_view_with_prefect_toml_file_and_env_var(monkeypatch, tmp_path):
+ monkeypatch.setenv("PREFECT_CLIENT_RETRY_EXTRA_CODES", "400")
+
+ with tmpchdir(tmp_path):
+ toml_data = {"client": {"retry_extra_codes": "300"}}
+ with open("prefect.toml", "w") as f:
+ toml.dump(toml_data, f)
+
+ res = invoke_and_assert(["config", "view"])
+
+ assert "PREFECT_CLIENT_RETRY_EXTRA_CODES='400'" in res.stdout
+ assert FROM_PREFECT_TOML not in res.stdout
+
+
+def test_view_with_prefect_toml_file_and_profile(tmp_path):
+ with tmpchdir(tmp_path):
+ toml_data = {"client": {"retry_extra_codes": "300"}}
+ with open("prefect.toml", "w") as f:
+ toml.dump(toml_data, f)
+
+ with prefect.context.use_profile(
+ prefect.settings.Profile(
+ name="foo",
+ settings={PREFECT_CLIENT_RETRY_EXTRA_CODES: [400]},
+ ),
+ include_current_context=False,
+ ):
+ res = invoke_and_assert(["config", "view"])
+
+ assert "PREFECT_CLIENT_RETRY_EXTRA_CODES='300'" in res.stdout
+ assert FROM_PREFECT_TOML in res.stdout
+
+
+def test_view_with_pyproject_toml_file(tmp_path):
+ with tmpchdir(tmp_path):
+ toml_data = {"tool": {"prefect": {"client": {"retry_extra_codes": "300"}}}}
+ with open("pyproject.toml", "w") as f:
+ toml.dump(toml_data, f)
+
+ res = invoke_and_assert(["config", "view"])
+
+ assert "PREFECT_CLIENT_RETRY_EXTRA_CODES='300'" in res.stdout
+ assert FROM_PYPROJECT_TOML in res.stdout
+
+
+def test_view_with_pyproject_toml_file_and_env_var(monkeypatch, tmp_path):
+ monkeypatch.setenv("PREFECT_CLIENT_RETRY_EXTRA_CODES", "400")
+
+ with tmpchdir(tmp_path):
+ toml_data = {"tool": {"prefect": {"client": {"retry_extra_codes": "300"}}}}
+ with open("pyproject.toml", "w") as f:
+ toml.dump(toml_data, f)
+
+ res = invoke_and_assert(["config", "view"])
+
+ assert "PREFECT_CLIENT_RETRY_EXTRA_CODES='400'" in res.stdout
+ assert FROM_PYPROJECT_TOML not in res.stdout
+
+
+def test_view_with_pyproject_toml_file_and_profile(tmp_path):
+ with tmpchdir(tmp_path):
+ toml_data = {"tool": {"prefect": {"client": {"retry_extra_codes": "300"}}}}
+ with open("pyproject.toml", "w") as f:
+ toml.dump(toml_data, f)
+
+ with prefect.context.use_profile(
+ prefect.settings.Profile(
+ name="foo",
+ settings={PREFECT_CLIENT_RETRY_EXTRA_CODES: [400]},
+ ),
+ include_current_context=False,
+ ):
+ res = invoke_and_assert(["config", "view"])
+
+ assert "PREFECT_CLIENT_RETRY_EXTRA_CODES='300'" in res.stdout
+ assert FROM_PYPROJECT_TOML in res.stdout
diff --git a/tests/cli/test_shell.py b/tests/cli/test_shell.py
index c85db78a03b8..0a209ecb4cfc 100644
--- a/tests/cli/test_shell.py
+++ b/tests/cli/test_shell.py
@@ -1,5 +1,7 @@
+import os
from unittest.mock import AsyncMock, patch
+from prefect.cli.shell import run_shell_process
from prefect.testing.cli import invoke_and_assert
from prefect.utilities.asyncutils import run_sync_in_worker_thread
@@ -145,3 +147,33 @@ async def test_shell_runner_integration(monkeypatch):
)
runner_mock.assert_awaited_once_with(run_once=True)
+
+
+class TestRunShellProcess:
+ def test_run_shell_process_basic(self, tmp_path):
+ """Test basic command execution"""
+ test_file = tmp_path / "test.txt"
+ run_shell_process(f"touch {test_file}")
+ assert test_file.exists()
+
+ def test_run_shell_process_with_cwd(self, tmp_path):
+ """Test command execution with custom working directory"""
+ subdir = tmp_path / "subdir"
+ subdir.mkdir()
+ test_file = "test.txt"
+
+ run_shell_process(f"touch {test_file}", popen_kwargs={"cwd": str(subdir)})
+
+ assert (subdir / test_file).exists()
+
+ def test_run_shell_process_with_env(self, tmp_path):
+ """Test command execution with custom environment variables"""
+ custom_env = os.environ.copy()
+ custom_env["TEST_VAR"] = "hello"
+
+ run_shell_process(
+ "echo $TEST_VAR > output.txt",
+ popen_kwargs={"env": custom_env, "cwd": str(tmp_path)},
+ )
+
+ assert (tmp_path / "output.txt").read_text().strip() == "hello"
diff --git a/tests/client/test_prefect_client.py b/tests/client/test_prefect_client.py
index dad4fadd9b8a..867ab0fc50a2 100644
--- a/tests/client/test_prefect_client.py
+++ b/tests/client/test_prefect_client.py
@@ -3,6 +3,7 @@
from contextlib import asynccontextmanager
from datetime import timedelta
from typing import Generator, List
+from unittest import mock
from unittest.mock import ANY, MagicMock, Mock
from uuid import UUID, uuid4
@@ -55,9 +56,11 @@
Flow,
FlowRunNotificationPolicy,
FlowRunPolicy,
+ Integration,
StateType,
TaskRun,
Variable,
+ WorkerMetadata,
WorkQueue,
)
from prefect.client.schemas.responses import (
@@ -69,6 +72,7 @@
from prefect.client.utilities import inject_client
from prefect.events import AutomationCore, EventTrigger, Posture
from prefect.server.api.server import create_app
+from prefect.server.database.orm_models import WorkPool
from prefect.settings import (
PREFECT_API_DATABASE_MIGRATE_ON_START,
PREFECT_API_KEY,
@@ -2698,3 +2702,59 @@ def test_raise_for_api_version_mismatch_with_incompatible_versions(
f"Found incompatible versions: client: {client_version}, server: {api_version}. "
in str(e.value)
)
+
+
+class TestPrefectClientWorkerHeartbeat:
+ async def test_worker_heartbeat(
+ self, prefect_client: PrefectClient, work_pool: WorkPool
+ ):
+ work_pool_name = str(work_pool.name)
+ await prefect_client.send_worker_heartbeat(
+ work_pool_name=work_pool_name,
+ worker_name="test-worker",
+ heartbeat_interval_seconds=10,
+ )
+ workers = await prefect_client.read_workers_for_work_pool(work_pool_name)
+ assert len(workers) == 1
+ assert workers[0].name == "test-worker"
+ assert workers[0].heartbeat_interval_seconds == 10
+
+ async def test_worker_heartbeat_sends_metadata_if_passed(
+ self, prefect_client: PrefectClient
+ ):
+ with mock.patch(
+ "prefect.client.orchestration.PrefectHttpxAsyncClient.post",
+ return_value=httpx.Response(status_code=204),
+ ) as mock_post:
+ await prefect_client.send_worker_heartbeat(
+ work_pool_name="work-pool",
+ worker_name="test-worker",
+ heartbeat_interval_seconds=10,
+ worker_metadata=WorkerMetadata(
+ integrations=[Integration(name="prefect-aws", version="1.0.0")]
+ ),
+ )
+ assert mock_post.call_args[1]["json"] == {
+ "name": "test-worker",
+ "heartbeat_interval_seconds": 10,
+ "metadata": {
+ "integrations": [{"name": "prefect-aws", "version": "1.0.0"}]
+ },
+ }
+
+ async def test_worker_heartbeat_does_not_send_metadata_if_not_passed(
+ self, prefect_client: PrefectClient
+ ):
+ with mock.patch(
+ "prefect.client.orchestration.PrefectHttpxAsyncClient.post",
+ return_value=httpx.Response(status_code=204),
+ ) as mock_post:
+ await prefect_client.send_worker_heartbeat(
+ work_pool_name="work-pool",
+ worker_name="test-worker",
+ heartbeat_interval_seconds=10,
+ )
+ assert mock_post.call_args[1]["json"] == {
+ "name": "test-worker",
+ "heartbeat_interval_seconds": 10,
+ }
diff --git a/tests/conftest.py b/tests/conftest.py
index 74166e9b3ca9..b342d26649f3 100644
--- a/tests/conftest.py
+++ b/tests/conftest.py
@@ -51,7 +51,6 @@
PREFECT_API_SERVICES_TASK_RUN_RECORDER_ENABLED,
PREFECT_API_SERVICES_TRIGGERS_ENABLED,
PREFECT_API_URL,
- PREFECT_ASYNC_FETCH_STATE_RESULT,
PREFECT_CLI_COLORS,
PREFECT_CLI_WRAP_LINES,
PREFECT_HOME,
@@ -84,6 +83,7 @@
from .fixtures.events import *
from .fixtures.logging import *
from .fixtures.storage import *
+from .fixtures.telemetry import *
from .fixtures.time import *
@@ -323,7 +323,6 @@ def pytest_sessionstart(session):
PREFECT_CLI_COLORS: False,
PREFECT_CLI_WRAP_LINES: False,
# Enable future change
- PREFECT_ASYNC_FETCH_STATE_RESULT: True,
# Enable debug logging
PREFECT_LOGGING_LEVEL: "DEBUG",
PREFECT_LOGGING_INTERNAL_LEVEL: "DEBUG",
diff --git a/tests/deployment/test_flow_runs.py b/tests/deployment/test_flow_runs.py
index ee317cda40ab..2444d193de85 100644
--- a/tests/deployment/test_flow_runs.py
+++ b/tests/deployment/test_flow_runs.py
@@ -164,6 +164,54 @@ async def test_returns_flow_run_immediately_when_timeout_is_zero(
assert len(flow_polls.calls) == 0
assert flow_run.state.is_scheduled()
+ async def test_returns_flow_run_from_2_dot_0(
+ self,
+ test_deployment,
+ use_hosted_api_server,
+ ):
+ """
+ See https://github.com/PrefectHQ/prefect/issues/15694
+ """
+ deployment = test_deployment
+
+ mock_flowrun_response = {
+ "id": str(uuid4()),
+ "flow_id": str(uuid4()),
+ }
+
+ side_effects = [
+ Response(
+ 200, json={**mock_flowrun_response, "state": {"type": "SCHEDULED"}}
+ )
+ ]
+ side_effects.append(
+ Response(
+ 200,
+ json={
+ **mock_flowrun_response,
+ "state": {"type": "COMPLETED", "data": {"type": "unpersisted"}},
+ },
+ )
+ )
+
+ with respx.mock(
+ base_url=PREFECT_API_URL.value(),
+ assert_all_mocked=True,
+ assert_all_called=False,
+ ) as router:
+ router.get("/csrf-token", params={"client": mock.ANY}).pass_through()
+ router.get(f"/deployments/name/foo/{deployment.name}").pass_through()
+ router.post(f"/deployments/{deployment.id}/create_flow_run").pass_through()
+ router.request(
+ "GET", re.compile(PREFECT_API_URL.value() + "/flow_runs/.*")
+ ).mock(side_effect=side_effects)
+
+ flow_run = await run_deployment(
+ f"foo/{deployment.name}", timeout=None, poll_interval=0
+ )
+ assert flow_run.state.is_completed()
+ assert flow_run.state.data is None
+
async def test_polls_indefinitely(
self,
test_deployment,
diff --git a/tests/deployment/test_steps.py b/tests/deployment/test_steps.py
index 317cd0cf7288..82f41bb885c0 100644
--- a/tests/deployment/test_steps.py
+++ b/tests/deployment/test_steps.py
@@ -910,7 +910,24 @@ async def test_pip_install_reqs_with_directory_step_output_succeeds(
stdout=ANY,
)
- async def test_pip_install_fails_on_error(self):
+ async def test_pip_install_fails_on_error(self, monkeypatch):
+ open_process_mock = MagicMock(return_value=MockProcess(1))
+ monkeypatch.setattr(
+ "prefect.deployments.steps.utility.open_process",
+ open_process_mock,
+ )
+
+ mock_stream_capture = AsyncMock()
+ mock_stream_capture.side_effect = lambda *args, **kwargs: kwargs[
+ "stderr_sink"
+ ].write(
+ "ERROR: Could not open requirements file: [Errno 2] No such file or directory: 'doesnt-exist.txt'"
+ )
+
+ monkeypatch.setattr(
+ "prefect.deployments.steps.utility._stream_capture_process_output",
+ mock_stream_capture,
+ )
with pytest.raises(RuntimeError) as exc:
await run_step(
{
diff --git a/tests/events/server/test_events_api.py b/tests/events/server/test_events_api.py
index cb47e1db7ae5..d068251441b4 100644
--- a/tests/events/server/test_events_api.py
+++ b/tests/events/server/test_events_api.py
@@ -8,7 +8,7 @@
import pytest
from httpx import AsyncClient
from pendulum.datetime import DateTime
-from pydantic_core import Url
+from pydantic.networks import AnyHttpUrl
from prefect.server.events.counting import Countable, TimeUnit
from prefect.server.events.filters import (
@@ -135,7 +135,8 @@ async def test_querying_for_events_returns_first_page(
assert first_page.events == events_page_one
assert first_page.total == 123
- assert first_page.next_page == Url(
+ assert isinstance(first_page.next_page, AnyHttpUrl)
+ assert str(first_page.next_page) == (
f"http://test/api/events/filter/next?page-token={ENCODED_MOCK_PAGE_TOKEN}"
)
@@ -213,7 +214,8 @@ async def test_querying_for_subsequent_page_returns_it(
assert second_page.events == events_page_two
assert second_page.total == 123
- assert second_page.next_page == Url(
+ assert isinstance(second_page.next_page, AnyHttpUrl)
+ assert str(second_page.next_page) == (
f"http://test/api/events/filter/next?page-token={expected_token}"
)
diff --git a/tests/fixtures/telemetry.py b/tests/fixtures/telemetry.py
new file mode 100644
index 000000000000..32a509cb352b
--- /dev/null
+++ b/tests/fixtures/telemetry.py
@@ -0,0 +1,9 @@
+import pytest
+from tests.telemetry.instrumentation_tester import InstrumentationTester
+
+
+@pytest.fixture
+def instrumentation():
+ instrumentation_tester = InstrumentationTester()
+ yield instrumentation_tester
+ instrumentation_tester.reset()
diff --git a/tests/results/test_result_fetch.py b/tests/results/test_result_fetch.py
index 1e0cd8ceb871..643f2c8f165d 100644
--- a/tests/results/test_result_fetch.py
+++ b/tests/results/test_result_fetch.py
@@ -1,21 +1,6 @@
-import pytest
-
from prefect import flow, task
-from prefect.settings import PREFECT_ASYNC_FETCH_STATE_RESULT, temporary_settings
-
-
-@pytest.fixture(autouse=True)
-def disable_fetch_by_default():
- """
- The test suite defaults to the future behavior.
-
- For these tests, we enable the default user behavior.
- """
- with temporary_settings({PREFECT_ASYNC_FETCH_STATE_RESULT: False}):
- yield
-@pytest.mark.skip(reason="This test is flaky and needs to be fixed")
async def test_async_result_warnings_are_not_raised_by_engine():
# Since most of our tests are run with the opt-in globally enabled, this test
# covers a bunch of features to cover remaining cases where we may internally
@@ -92,8 +77,7 @@ async def foo():
return 1
state = await foo(return_state=True)
- with temporary_settings({PREFECT_ASYNC_FETCH_STATE_RESULT: True}):
- coro = state.result(fetch=True)
+ coro = state.result()
assert await coro == 1
diff --git a/tests/results/test_result_store.py b/tests/results/test_result_store.py
index 513d186410a8..f516af249c9d 100644
--- a/tests/results/test_result_store.py
+++ b/tests/results/test_result_store.py
@@ -16,6 +16,7 @@
PREFECT_LOCAL_STORAGE_PATH,
PREFECT_RESULTS_DEFAULT_SERIALIZER,
PREFECT_RESULTS_PERSIST_BY_DEFAULT,
+ PREFECT_TASKS_DEFAULT_PERSIST_RESULT,
temporary_settings,
)
from prefect.testing.utilities import assert_blocks_equal
@@ -440,6 +441,55 @@ def bar():
assert persist_result is True
+ with temporary_settings({PREFECT_TASKS_DEFAULT_PERSIST_RESULT: True}):
+ persist_result = bar()
+
+ assert persist_result is True
+
+
+async def test_task_can_opt_out_of_result_persistence_with_setting():
+ with temporary_settings({PREFECT_TASKS_DEFAULT_PERSIST_RESULT: True}):
+
+ @task(persist_result=False)
+ def bar():
+ return should_persist_result()
+
+ persist_result = bar()
+ assert persist_result is False
+
+ async def abar():
+ return should_persist_result()
+
+ persist_result = await abar()
+ assert persist_result is False
+
+
+async def test_can_opt_out_of_result_persistence_with_setting_when_flow_uses_feature():
+ with temporary_settings({PREFECT_TASKS_DEFAULT_PERSIST_RESULT: False}):
+
+ @flow(persist_result=True)
+ def foo():
+ return bar()
+
+ @task
+ def bar():
+ return should_persist_result()
+
+ persist_result = foo()
+
+ assert persist_result is False
+
+ @flow(persist_result=True)
+ async def afoo():
+ return await abar()
+
+ @task
+ async def abar():
+ return should_persist_result()
+
+ persist_result = await afoo()
+ assert persist_result is False
+
def test_nested_flow_custom_persist_setting():
@flow(persist_result=True)
diff --git a/tests/results/test_task_results.py b/tests/results/test_task_results.py
index bd51481f2acb..72b5193cacf4 100644
--- a/tests/results/test_task_results.py
+++ b/tests/results/test_task_results.py
@@ -1,4 +1,5 @@
from pathlib import Path
+from uuid import UUID
import pytest
@@ -262,6 +263,42 @@ def bar():
assert task_state.data.metadata.storage_key == "test"
+async def test_task_failure_is_persisted_randomly(
+ prefect_client, tmp_path, events_pipeline
+):
+ """
+ If we use the result storage key it can interfere with proper caching.
+ """
+ storage = LocalFileSystem(basepath=tmp_path / "test-storage")
+ await storage.save("tmp-test-storage")
+
+ @flow
+ def foo():
+ return bar(return_state=True)
+
+ @task(result_storage=storage, persist_result=True, result_storage_key="not-a-uuid")
+ def bar():
+ raise ValueError("oops I messed up")
+
+ flow_state = foo(return_state=True)
+ task_state = await flow_state.result(raise_on_failure=False)
+ assert task_state.is_failed()
+ with pytest.raises(ValueError, match="oops I messed up"):
+ await task_state.result()
+
+ assert UUID(task_state.data.metadata.storage_key)
+
+ await events_pipeline.process_events()
+
+ api_state = (
+ await prefect_client.read_task_run(task_state.state_details.task_run_id)
+ ).state
+ with pytest.raises(ValueError, match="oops I messed up"):
+ await api_state.result()
+
+ assert UUID(task_state.data.metadata.storage_key)
+
+
async def test_task_result_parameter_formatted_storage_key(
prefect_client, tmp_path, events_pipeline
):
diff --git a/tests/server/database/test_queries.py b/tests/server/database/test_queries.py
index 5cd298a94de2..b17938fb637c 100644
--- a/tests/server/database/test_queries.py
+++ b/tests/server/database/test_queries.py
@@ -48,7 +48,7 @@ async def deployment_3(self, session, flow, work_queue_2):
@pytest.fixture
async def fr_1(self, session, deployment_1):
- return await models.flow_runs.create_flow_run(
+ flow_run = await models.flow_runs.create_flow_run(
session=session,
flow_run=schemas.core.FlowRun(
name="fr1",
@@ -58,6 +58,7 @@ async def fr_1(self, session, deployment_1):
state=schemas.states.Scheduled(pendulum.now("UTC").subtract(minutes=2)),
),
)
+ return flow_run
@pytest.fixture
async def fr_2(self, session, deployment_2):
diff --git a/tests/server/orchestration/api/test_flow_run_graph_v2.py b/tests/server/orchestration/api/test_flow_run_graph_v2.py
index 991604a9d487..ec3487489596 100644
--- a/tests/server/orchestration/api/test_flow_run_graph_v2.py
+++ b/tests/server/orchestration/api/test_flow_run_graph_v2.py
@@ -447,6 +447,81 @@ async def test_reading_graph_for_flow_run_with_nested_tasks(
]
+@pytest.fixture
+async def nested_tasks_including_parent_with_multiple_params(
+ db: PrefectDBInterface,
+ session: AsyncSession,
+ flow_run, # db.FlowRun,
+ base_time: pendulum.DateTime,
+) -> List:
+ task_runs = []
+
+ task_runs.append(
+ db.TaskRun(
+ id=uuid4(),
+ flow_run_id=flow_run.id,
+ name="task-0",
+ task_key="task-0",
+ dynamic_key="task-0",
+ state_type=StateType.COMPLETED,
+ state_name="Irrelevant",
+ expected_start_time=base_time.add(seconds=1).subtract(microseconds=1),
+ start_time=base_time.add(seconds=1),
+ end_time=base_time.add(minutes=1, seconds=1),
+ task_inputs={
+ "first_arg": [],
+ "second_arg": [],
+ },
+ )
+ )
+
+ task_runs.append(
+ db.TaskRun(
+ id=uuid4(),
+ flow_run_id=flow_run.id,
+ name="task-1",
+ task_key="task-1",
+ dynamic_key="task-1",
+ state_type=StateType.COMPLETED,
+ state_name="Irrelevant",
+ expected_start_time=base_time.add(seconds=2).subtract(microseconds=1),
+ start_time=base_time.add(seconds=2),
+ end_time=base_time.add(minutes=1, seconds=2),
+ task_inputs={
+ "__parents__": [
+ {"id": task_runs[0].id, "input_type": "task_run"},
+ ],
+ },
+ )
+ )
+
+ session.add_all(task_runs)
+ await session.commit()
+ return task_runs
+
+
+async def test_reading_graph_nested_tasks_including_parent_with_multiple_params(
+ session: AsyncSession,
+ flow_run,
+ nested_tasks_including_parent_with_multiple_params: List,
+ base_time: pendulum.DateTime,
+):
+ graph = await read_flow_run_graph(
+ session=session,
+ flow_run_id=flow_run.id,
+ )
+
+ parent_task, child_task = nested_tasks_including_parent_with_multiple_params
+
+ # Check that the encapsulating relationships are correct and deduplicated
+ nodes_by_id = {node.id: node for _, node in graph.nodes}
+ child_node = nodes_by_id[child_task.id]
+
+ # Verify the child task has exactly one encapsulating reference to the parent
+ assert len(child_node.encapsulating) == 1
+ assert child_node.encapsulating[0].id == parent_task.id
+
+
@pytest.fixture
async def linked_tasks(
db: PrefectDBInterface,
diff --git a/tests/server/orchestration/test_core_policy.py b/tests/server/orchestration/test_core_policy.py
index a33ba36d07af..ff14b257f62a 100644
--- a/tests/server/orchestration/test_core_policy.py
+++ b/tests/server/orchestration/test_core_policy.py
@@ -550,6 +550,30 @@ async def test_stops_retrying_eventually(
assert ctx.response_status == SetStateStatus.ACCEPT
assert ctx.validated_state_type == states.StateType.FAILED
+ async def test_sets_retry_type(
+ self,
+ session,
+ initialize_orchestration,
+ ):
+ retry_policy = [RetryFailedFlows]
+ initial_state_type = states.StateType.RUNNING
+ proposed_state_type = states.StateType.FAILED
+ intended_transition = (initial_state_type, proposed_state_type)
+ ctx = await initialize_orchestration(
+ session,
+ "flow",
+ *intended_transition,
+ )
+ ctx.run.run_count = 2
+ ctx.run_settings.retries = 3
+
+ async with contextlib.AsyncExitStack() as stack:
+ for rule in retry_policy:
+ ctx = await stack.enter_async_context(rule(ctx, *intended_transition))
+ await ctx.validate_proposed_state()
+
+ assert ctx.run.empirical_policy.retry_type == "in_process"
+
class TestManualFlowRetries:
async def test_can_manual_retry_with_arbitrary_state_name(
@@ -655,6 +679,37 @@ async def test_manual_retrying_bypasses_terminal_state_protection(
assert ctx.response_status == SetStateStatus.ACCEPT
assert ctx.run.run_count == 3
+ @pytest.mark.parametrize(
+ "proposed_state_type",
+ [states.StateType.SCHEDULED, states.StateType.FAILED],
+ )
+ async def test_manual_retry_updates_retry_type(
+ self,
+ session,
+ initialize_orchestration,
+ proposed_state_type,
+ ):
+ manual_retry_policy = [HandleFlowTerminalStateTransitions]
+ initial_state_type = states.StateType.FAILED
+ intended_transition = (initial_state_type, proposed_state_type)
+ ctx = await initialize_orchestration(
+ session,
+ "flow",
+ *intended_transition,
+ )
+ ctx.proposed_state.name = "AwaitingRetry"
+ ctx.run.deployment_id = uuid4()
+ ctx.run.run_count = 2
+
+ async with contextlib.AsyncExitStack() as stack:
+ for rule in manual_retry_policy:
+ ctx = await stack.enter_async_context(rule(ctx, *intended_transition))
+
+ if proposed_state_type == states.StateType.SCHEDULED:
+ assert ctx.run.empirical_policy.retry_type == "reschedule"
+ else:
+ assert ctx.run.empirical_policy.retry_type is None
+
class TestUpdatingFlowRunTrackerOnTasks:
@pytest.mark.parametrize(
diff --git a/tests/server/services/test_task_run_recorder.py b/tests/server/services/test_task_run_recorder.py
index e373a76c972e..231594a87eba 100644
--- a/tests/server/services/test_task_run_recorder.py
+++ b/tests/server/services/test_task_run_recorder.py
@@ -1,6 +1,7 @@
import asyncio
from datetime import timedelta
from itertools import permutations
+from pathlib import Path
from typing import AsyncGenerator
from uuid import UUID
@@ -18,7 +19,7 @@
from prefect.server.schemas.core import FlowRun, TaskRunPolicy
from prefect.server.schemas.states import StateDetails, StateType
from prefect.server.services import task_run_recorder
-from prefect.server.utilities.messaging import MessageHandler
+from prefect.server.utilities.messaging import MessageHandler, create_publisher
from prefect.server.utilities.messaging.memory import MemoryMessage
@@ -29,6 +30,7 @@ async def test_start_and_stop_service():
await service.started_event.wait()
assert service.consumer_task is not None
+ assert service.consumer is not None
await service.stop()
assert service.consumer_task is None
@@ -753,3 +755,48 @@ async def test_task_run_recorder_handles_all_out_of_order_permutations(
state_types = set(state.type for state in states)
assert state_types == {StateType.PENDING, StateType.RUNNING, StateType.COMPLETED}
+
+
+async def test_task_run_recorder_sends_repeated_failed_messages_to_dead_letter(
+ pending_event: ReceivedEvent,
+ tmp_path: Path,
+):
+ """
+ Test to ensure situations like the one described in https://github.com/PrefectHQ/prefect/issues/15607
+ don't overwhelm the task run recorder.
+ """
+ pending_transition_time = pendulum.datetime(2024, 1, 1, 0, 0, 0, 0, "UTC")
+ assert pending_event.occurred == pending_transition_time
+
+ service = task_run_recorder.TaskRunRecorder()
+
+ service_task = asyncio.create_task(service.start())
+ await service.started_event.wait()
+ service.consumer.subscription.dead_letter_queue_path = tmp_path / "dlq"
+
+ async with create_publisher("events") as publisher:
+ await publisher.publish_data(
+ message(pending_event).data, message(pending_event).attributes
+ )
+ # Sending a task run event with the same task run id and timestamp but
+ # a different id will raise an error when trying to insert it into the
+ # database
+ duplicate_pending_event = pending_event.model_copy()
+ duplicate_pending_event.id = UUID("bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb")
+ await publisher.publish_data(
+ message(duplicate_pending_event).data,
+ message(duplicate_pending_event).attributes,
+ )
+
+ while not list(service.consumer.subscription.dead_letter_queue_path.glob("*")):
+ await asyncio.sleep(0.1)
+
+ assert (
+ len(list(service.consumer.subscription.dead_letter_queue_path.glob("*"))) == 1
+ )
+
+ service_task.cancel()
+ try:
+ await service_task
+ except asyncio.CancelledError:
+ pass
diff --git a/tests/server/utilities/test_messaging.py b/tests/server/utilities/test_messaging.py
index 2ee6bf8e5325..7130624655d7 100644
--- a/tests/server/utilities/test_messaging.py
+++ b/tests/server/utilities/test_messaging.py
@@ -1,5 +1,7 @@
import asyncio
import importlib
+import json
+from pathlib import Path
from typing import (
AsyncContextManager,
AsyncGenerator,
@@ -24,6 +26,12 @@
create_publisher,
ephemeral_subscription,
)
+from prefect.server.utilities.messaging.memory import (
+ Consumer as MemoryConsumer,
+)
+from prefect.server.utilities.messaging.memory import (
+ MemoryMessage,
+)
from prefect.settings import (
PREFECT_MESSAGING_BROKER,
PREFECT_MESSAGING_CACHE,
@@ -441,3 +449,52 @@ async def handler(message: Message):
# TODO: is there a way we can test that ephemeral subscriptions really have cleaned
# up after themselves after they have exited? This will differ significantly by
# each broker implementation, so it's hard to write a generic test.
+
+
+async def test_repeatedly_failed_message_is_moved_to_dead_letter_queue(
+ deduplicating_publisher: Publisher,
+ consumer: MemoryConsumer,
+ tmp_path: Path,
+):
+ captured_messages: List[Message] = []
+
+ async def handler(message: Message):
+ captured_messages.append(message)
+ raise ValueError("Simulated failure")
+
+ consumer.subscription.dead_letter_queue_path = tmp_path / "dlq"
+
+ consumer_task = asyncio.create_task(consumer.run(handler))
+
+ async with deduplicating_publisher as p:
+ await p.publish_data(
+ b"hello, world", {"howdy": "partner", "my-message-id": "A"}
+ )
+
+ while not list(consumer.subscription.dead_letter_queue_path.glob("*")):
+ await asyncio.sleep(0.1)
+
+ try:
+ consumer_task.cancel()
+ await consumer_task
+ except asyncio.CancelledError:
+ pass
+
+ # Message should have been moved to DLQ after multiple retries
+ assert len(captured_messages) == 4 # Original attempt + 3 retries
+ for message in captured_messages:
+ assert message.data == b"hello, world"
+ assert message.attributes == {"howdy": "partner", "my-message-id": "A"}
+
+ # Verify message is in DLQ
+ assert len(list(consumer.subscription.dead_letter_queue_path.glob("*"))) == 1
+ dlq_message_file = next(
+ iter(consumer.subscription.dead_letter_queue_path.glob("*"))
+ )
+ dlq_message = MemoryMessage(**json.loads(dlq_message_file.read_text()))
+ assert dlq_message.data == "hello, world"
+ assert dlq_message.attributes == {"howdy": "partner", "my-message-id": "A"}
+ assert dlq_message.retry_count > 3
+
+ remaining_message = await drain_one(consumer)
+ assert not remaining_message
diff --git a/tests/telemetry/conftest.py b/tests/telemetry/conftest.py
new file mode 100644
index 000000000000..e13c23bc2f07
--- /dev/null
+++ b/tests/telemetry/conftest.py
@@ -0,0 +1,66 @@
+from uuid import UUID
+
+import pytest
+from opentelemetry.test.globals_test import (
+ reset_logging_globals,
+ reset_metrics_globals,
+ reset_trace_globals,
+)
+
+from prefect.settings import (
+ PREFECT_API_KEY,
+ PREFECT_API_URL,
+ PREFECT_EXPERIMENTS_TELEMETRY_ENABLED,
+ temporary_settings,
+)
+
+ACCOUNT_ID = UUID("11111111-1111-1111-1111-111111111111")
+WORKSPACE_ID = UUID("22222222-2222-2222-2222-222222222222")
+
+
+@pytest.fixture
+def telemetry_account_id():
+ return ACCOUNT_ID
+
+
+@pytest.fixture
+def telemetry_workspace_id():
+ return WORKSPACE_ID
+
+
+@pytest.fixture
+def enable_telemetry(telemetry_account_id: UUID, telemetry_workspace_id: UUID):
+ with temporary_settings(
+ {
+ PREFECT_API_URL: f"https://api.prefect.cloud/api/accounts/{telemetry_account_id}/workspaces/{telemetry_workspace_id}",
+ PREFECT_API_KEY: "my-token",
+ PREFECT_EXPERIMENTS_TELEMETRY_ENABLED: True,
+ }
+ ):
+ yield
+
+
+@pytest.fixture
+def hosted_server_with_telemetry_enabled():
+ with temporary_settings(
+ {
+ PREFECT_EXPERIMENTS_TELEMETRY_ENABLED: True,
+ PREFECT_API_URL: "https://prefect.example.com/api",
+ }
+ ):
+ yield
+
+
+@pytest.fixture
+def disable_telemetry():
+ with temporary_settings({PREFECT_EXPERIMENTS_TELEMETRY_ENABLED: False}):
+ yield
+
+
+@pytest.fixture(autouse=True)
+def reset_otel_globals():
+ yield
+
+ reset_logging_globals()
+ reset_metrics_globals()
+ reset_trace_globals()
diff --git a/tests/telemetry/instrumentation_tester.py b/tests/telemetry/instrumentation_tester.py
new file mode 100644
index 000000000000..61d6f66a122d
--- /dev/null
+++ b/tests/telemetry/instrumentation_tester.py
@@ -0,0 +1,105 @@
+from typing import Any, Dict, Protocol, Tuple, Union
+
+from opentelemetry import metrics as metrics_api
+from opentelemetry import trace as trace_api
+from opentelemetry.sdk.metrics import MeterProvider
+from opentelemetry.sdk.metrics.export import InMemoryMetricReader
+from opentelemetry.sdk.trace import ReadableSpan, Span, TracerProvider, export
+from opentelemetry.sdk.trace.export.in_memory_span_exporter import (
+ InMemorySpanExporter,
+)
+from opentelemetry.test.globals_test import (
+ reset_metrics_globals,
+ reset_trace_globals,
+)
+from opentelemetry.util.types import Attributes
+
+
+def create_tracer_provider(**kwargs) -> Tuple[TracerProvider, InMemorySpanExporter]:
+ """Helper to create a configured tracer provider.
+
+ Creates and configures a `TracerProvider` with a
+ `SimpleSpanProcessor` and a `InMemorySpanExporter`.
+ All the parameters passed are forwarded to the TracerProvider
+ constructor.
+
+ Returns:
+ A list with the tracer provider in the first element and the
+ in-memory span exporter in the second.
+ """
+ tracer_provider = TracerProvider(**kwargs)
+ memory_exporter = InMemorySpanExporter()
+ span_processor = export.SimpleSpanProcessor(memory_exporter)
+ tracer_provider.add_span_processor(span_processor)
+
+ return tracer_provider, memory_exporter
+
+
+def create_meter_provider(**kwargs) -> Tuple[MeterProvider, InMemoryMetricReader]:
+ """Helper to create a configured meter provider
+ Creates a `MeterProvider` and an `InMemoryMetricReader`.
+ Returns:
+ A tuple with the meter provider in the first element and the
+ in-memory metrics exporter in the second
+ """
+ memory_reader = InMemoryMetricReader()
+ metric_readers = kwargs.get("metric_readers", [])
+ metric_readers.append(memory_reader)
+ kwargs["metric_readers"] = metric_readers
+ meter_provider = MeterProvider(**kwargs)
+ return meter_provider, memory_reader
+
+
+class HasAttributesViaProperty(Protocol):
+ @property
+ def attributes(self) -> Attributes:
+ ...
+
+
+class HasAttributesViaAttr(Protocol):
+ attributes: Attributes
+
+
+HasAttributes = Union[HasAttributesViaProperty, HasAttributesViaAttr]
+
+
+class InstrumentationTester:
+ tracer_provider: TracerProvider
+ memory_exporter: InMemorySpanExporter
+ meter_provider: MeterProvider
+ memory_metrics_reader: InMemoryMetricReader
+
+ def __init__(self):
+ self.tracer_provider, self.memory_exporter = create_tracer_provider()
+ # This is done because set_tracer_provider cannot override the
+ # current tracer provider.
+ reset_trace_globals()
+ trace_api.set_tracer_provider(self.tracer_provider)
+
+ self.memory_exporter.clear()
+ # This is done because set_meter_provider cannot override the
+ # current meter provider.
+ reset_metrics_globals()
+
+ self.meter_provider, self.memory_metrics_reader = create_meter_provider()
+ metrics_api.set_meter_provider(self.meter_provider)
+
+ def reset(self):
+ reset_trace_globals()
+ reset_metrics_globals()
+
+ def get_finished_spans(self):
+ return self.memory_exporter.get_finished_spans()
+
+ @staticmethod
+ def assert_has_attributes(obj: HasAttributes, attributes: Dict[str, Any]):
+ assert obj.attributes is not None
+ for key, val in attributes.items():
+ assert key in obj.attributes
+ assert obj.attributes[key] == val
+
+ @staticmethod
+ def assert_span_instrumented_for(span: Union[Span, ReadableSpan], module):
+ assert span.instrumentation_scope is not None
+ assert span.instrumentation_scope.name == module.__name__
+ assert span.instrumentation_scope.version == module.__version__
diff --git a/tests/telemetry/test_instrumentation.py b/tests/telemetry/test_instrumentation.py
new file mode 100644
index 000000000000..a86e4f725b9e
--- /dev/null
+++ b/tests/telemetry/test_instrumentation.py
@@ -0,0 +1,162 @@
+import os
+from uuid import UUID
+
+import pytest
+from opentelemetry import metrics, trace
+from opentelemetry._logs._internal import get_logger_provider
+from opentelemetry.exporter.otlp.proto.http._log_exporter import OTLPLogExporter
+from opentelemetry.exporter.otlp.proto.http.metric_exporter import OTLPMetricExporter
+from opentelemetry.sdk._logs import LoggerProvider, LoggingHandler
+from opentelemetry.sdk._logs.export import SimpleLogRecordProcessor
+from opentelemetry.sdk.metrics import MeterProvider
+from opentelemetry.sdk.metrics.export import PeriodicExportingMetricReader
+from opentelemetry.sdk.trace import TracerProvider
+
+from prefect.telemetry.bootstrap import setup_telemetry
+from prefect.telemetry.instrumentation import extract_account_and_workspace_id
+from prefect.telemetry.logging import get_log_handler
+from prefect.telemetry.processors import InFlightSpanProcessor
+
+
+def test_extract_account_and_workspace_id_valid_url(
+ telemetry_account_id: UUID, telemetry_workspace_id: UUID
+):
+ url = (
+ f"https://api.prefect.cloud/api/accounts/{telemetry_account_id}/"
+ f"workspaces/{telemetry_workspace_id}"
+ )
+ account_id, workspace_id = extract_account_and_workspace_id(url)
+ assert account_id == telemetry_account_id
+ assert workspace_id == telemetry_workspace_id
+
+
+@pytest.mark.parametrize(
+ "url",
+ [
+ "https://api.prefect.cloud/api/invalid",
+ "https://api.prefect.cloud/api/accounts/invalid-uuid/workspaces/22222222-2222-2222-2222-222222222222",
+ "https://api.prefect.cloud/api/accounts/22222222-2222-2222-2222-222222222222/invalid",
+ "https://api.prefect.cloud/api/workspaces/22222222-2222-2222-2222-222222222222",
+ ],
+)
+def test_extract_account_and_workspace_id_invalid_urls(url):
+ with pytest.raises(
+ ValueError,
+ match=f"Could not extract account and workspace id from API url: {url!r}",
+ ):
+ extract_account_and_workspace_id(url)
+
+
+def test_telemetry_disabled(disable_telemetry):
+ trace_provider, meter_provider, logger_provider = setup_telemetry()
+
+ assert trace_provider is None
+ assert meter_provider is None
+ assert logger_provider is None
+
+
+def test_non_cloud_server(hosted_server_with_telemetry_enabled):
+ trace_provider, meter_provider, logger_provider = setup_telemetry()
+
+ assert trace_provider is None
+ assert meter_provider is None
+ assert logger_provider is None
+
+
+def test_trace_provider(
+ enable_telemetry: None, telemetry_account_id: UUID, telemetry_workspace_id: UUID
+):
+ trace_provider, _, _ = setup_telemetry()
+
+ assert isinstance(trace_provider, TracerProvider)
+
+ resource_attributes = {
+ k: v
+ for k, v in trace_provider.resource.attributes.items()
+ if not k.startswith("telemetry.sdk")
+ }
+
+ assert resource_attributes == {
+ "service.name": "prefect",
+ "service.instance.id": os.uname().nodename,
+ "prefect.account": str(telemetry_account_id),
+ "prefect.workspace": str(telemetry_workspace_id),
+ }
+
+ span_processor = trace_provider._active_span_processor._span_processors[0]
+
+ assert isinstance(span_processor, InFlightSpanProcessor)
+ assert (
+ span_processor.span_exporter._endpoint # type: ignore
+ == (
+ f"https://api.prefect.cloud/api/accounts/{telemetry_account_id}/"
+ f"workspaces/{telemetry_workspace_id}/telemetry/v1/traces"
+ )
+ )
+
+ assert trace.get_tracer_provider() == trace_provider
+
+
+def test_meter_provider(
+ enable_telemetry: None, telemetry_account_id: UUID, telemetry_workspace_id: UUID
+):
+ _, meter_provider, _ = setup_telemetry()
+ assert isinstance(meter_provider, MeterProvider)
+
+ metric_reader = list(meter_provider._all_metric_readers)[0]
+ exporter = metric_reader._exporter
+ assert isinstance(metric_reader, PeriodicExportingMetricReader)
+ assert isinstance(exporter, OTLPMetricExporter)
+
+ resource_attributes = {
+ k: v
+ for k, v in meter_provider._sdk_config.resource.attributes.items()
+ if not k.startswith("telemetry.sdk")
+ }
+
+ assert resource_attributes == {
+ "service.name": "prefect",
+ "service.instance.id": os.uname().nodename,
+ "prefect.account": str(telemetry_account_id),
+ "prefect.workspace": str(telemetry_workspace_id),
+ }
+
+ assert (
+ metric_reader._exporter._endpoint # type: ignore
+ == (
+ f"https://api.prefect.cloud/api/accounts/{telemetry_account_id}/"
+ f"workspaces/{telemetry_workspace_id}/telemetry/v1/metrics"
+ )
+ )
+
+ assert metrics.get_meter_provider() == meter_provider
+
+
+def test_logger_provider(
+ enable_telemetry: None, telemetry_account_id: UUID, telemetry_workspace_id: UUID
+):
+ _, _, logger_provider = setup_telemetry()
+
+ assert isinstance(logger_provider, LoggerProvider)
+
+ processor = list(
+ logger_provider._multi_log_record_processor._log_record_processors
+ )[0]
+ exporter = processor._exporter # type: ignore
+
+ assert isinstance(processor, SimpleLogRecordProcessor)
+ assert isinstance(exporter, OTLPLogExporter)
+
+ assert (
+ exporter._endpoint # type: ignore
+ == (
+ f"https://api.prefect.cloud/api/accounts/{telemetry_account_id}/"
+ f"workspaces/{telemetry_workspace_id}/telemetry/v1/logs"
+ )
+ )
+
+ assert get_logger_provider() == logger_provider
+
+ log_handler = get_log_handler()
+ assert isinstance(log_handler, LoggingHandler)
+ assert log_handler._logger_provider == logger_provider
diff --git a/tests/telemetry/test_logging.py b/tests/telemetry/test_logging.py
new file mode 100644
index 000000000000..265029c54cda
--- /dev/null
+++ b/tests/telemetry/test_logging.py
@@ -0,0 +1,31 @@
+import logging
+
+from prefect.telemetry.bootstrap import setup_telemetry
+from prefect.telemetry.logging import (
+ add_telemetry_log_handler,
+ get_log_handler,
+ set_log_handler,
+)
+
+
+def test_add_telemetry_log_handler_with_handler(enable_telemetry):
+ logger = logging.getLogger("test")
+ initial_handlers = list(logger.handlers)
+
+ setup_telemetry()
+
+ handler = get_log_handler()
+ assert handler is not None
+
+ add_telemetry_log_handler(logger)
+ assert list(logger.handlers) == initial_handlers + [handler]
+
+
+def test_add_telemetry_log_handler_without_handler():
+ logger = logging.getLogger("test")
+ initial_handlers = list(logger.handlers)
+
+ set_log_handler(None)
+
+ add_telemetry_log_handler(logger)
+ assert list(logger.handlers) == initial_handlers
diff --git a/tests/telemetry/test_processors.py b/tests/telemetry/test_processors.py
new file mode 100644
index 000000000000..2a91214bb8c1
--- /dev/null
+++ b/tests/telemetry/test_processors.py
@@ -0,0 +1,144 @@
+from unittest.mock import Mock, patch
+
+import pytest
+from opentelemetry.sdk.trace import ReadableSpan, Span
+from opentelemetry.trace import SpanContext, TraceFlags
+
+from prefect.telemetry.processors import InFlightSpanProcessor
+
+
+@pytest.fixture
+def mock_span_exporter():
+ return Mock()
+
+
+@pytest.fixture
+def mock_span():
+ span = Mock(spec=Span)
+ span.context = Mock(
+ spec=SpanContext, span_id=123, trace_flags=TraceFlags(TraceFlags.SAMPLED)
+ )
+ mock_readable = Mock(spec=ReadableSpan)
+ mock_readable._attributes = {}
+ span._readable_span.return_value = mock_readable
+ return span
+
+
+@pytest.fixture
+def processor(mock_span_exporter: Mock):
+ return InFlightSpanProcessor(mock_span_exporter)
+
+
+class TestInFlightSpanProcessor:
+ def test_initialization(
+ self, processor: InFlightSpanProcessor, mock_span_exporter: Mock
+ ):
+ assert processor.span_exporter == mock_span_exporter
+ assert processor._in_flight == {}
+ assert not processor._stop_event.is_set()
+ assert processor._export_thread.daemon
+ assert processor._export_thread.is_alive()
+
+ def test_span_processing_lifecycle(
+ self,
+ processor: InFlightSpanProcessor,
+ mock_span: Mock,
+ ):
+ processor.on_start(mock_span)
+ assert mock_span.context.span_id in processor._in_flight
+ assert processor._in_flight[mock_span.context.span_id] == mock_span
+
+ readable_span = Mock(spec=ReadableSpan)
+ readable_span.context = mock_span.context
+ processor.on_end(readable_span)
+
+ assert mock_span.context.span_id not in processor._in_flight
+ processor.span_exporter.export.assert_called_once_with((readable_span,))
+
+ def test_unsampled_span_ignored(self, processor: InFlightSpanProcessor):
+ unsampled_span = Mock(spec=Span)
+ unsampled_span.context = Mock(
+ spec=SpanContext, trace_flags=TraceFlags(TraceFlags.DEFAULT)
+ )
+
+ processor.on_start(unsampled_span)
+ assert len(processor._in_flight) == 0
+
+ processor.on_end(unsampled_span)
+ processor.span_exporter.export.assert_not_called()
+
+ def test_periodic_export(self, mock_span_exporter: Mock, mock_span: Mock):
+ with patch("time.sleep"):
+ processor = InFlightSpanProcessor(mock_span_exporter)
+ processor.on_start(mock_span)
+
+ with processor._lock:
+ to_export = [
+ processor._readable_span(span)
+ for span in processor._in_flight.values()
+ ]
+ if to_export:
+ processor.span_exporter.export(to_export)
+
+ assert mock_span_exporter.export.called
+ exported_spans = mock_span_exporter.export.call_args[0][0]
+ assert len(exported_spans) == 1
+ assert exported_spans[0]._attributes["prefect.in-flight"] is True
+
+ processor.shutdown()
+
+ def test_concurrent_spans(self, processor: InFlightSpanProcessor):
+ spans = [
+ Mock(
+ spec=Span,
+ context=Mock(
+ spec=SpanContext,
+ span_id=i,
+ trace_flags=TraceFlags(TraceFlags.SAMPLED),
+ ),
+ )
+ for i in range(3)
+ ]
+
+ for span in spans:
+ processor.on_start(span)
+
+ assert len(processor._in_flight) == 3
+
+ for span in reversed(spans):
+ readable_span = Mock(spec=ReadableSpan)
+ readable_span.context = span.context
+ processor.on_end(readable_span)
+
+ assert len(processor._in_flight) == 0
+
+ def test_shutdown(self, processor: InFlightSpanProcessor):
+ processor.shutdown()
+
+ assert processor._stop_event.is_set()
+ assert not processor._export_thread.is_alive()
+ processor.span_exporter.shutdown.assert_called_once()
+
+ def test_span_without_context(self, processor: InFlightSpanProcessor):
+ span_without_context = Mock(spec=Span)
+ span_without_context.context = None
+
+ processor.on_start(span_without_context)
+ assert len(processor._in_flight) == 0
+
+ processor.on_end(span_without_context)
+ processor.span_exporter.export.assert_not_called()
+
+ def test_readable_span_attributes(
+ self, processor: InFlightSpanProcessor, mock_span: Mock
+ ):
+ readable = processor._readable_span(mock_span)
+
+ assert readable._attributes
+ assert "prefect.in-flight" in readable._attributes
+ assert readable._attributes["prefect.in-flight"] is True
+ assert isinstance(readable._end_time, int)
+
+ def test_force_flush(self, processor: InFlightSpanProcessor):
+ assert processor.force_flush() is True
+ assert processor.force_flush(timeout_millis=100) is True
diff --git a/tests/test_flow_engine.py b/tests/test_flow_engine.py
index 4335f092cf96..db0016756aa7 100644
--- a/tests/test_flow_engine.py
+++ b/tests/test_flow_engine.py
@@ -10,11 +10,13 @@
import anyio
import pydantic
import pytest
+from opentelemetry import trace
+import prefect
from prefect import Flow, __development_base_path__, flow, task
from prefect.client.orchestration import PrefectClient, SyncPrefectClient
from prefect.client.schemas.filters import FlowFilter, FlowRunFilter
-from prefect.client.schemas.objects import StateType
+from prefect.client.schemas.objects import FlowRun, StateType
from prefect.client.schemas.sorting import FlowRunSort
from prefect.concurrency.asyncio import concurrency as aconcurrency
from prefect.concurrency.sync import concurrency
@@ -43,9 +45,12 @@
from prefect.server.schemas.core import ConcurrencyLimitV2
from prefect.server.schemas.core import FlowRun as ServerFlowRun
from prefect.testing.utilities import AsyncMock
+from prefect.types import KeyValueLabels
from prefect.utilities.callables import get_call_parameters
from prefect.utilities.filesystem import tmpchdir
+from .telemetry.instrumentation_tester import InstrumentationTester
+
@flow
async def foo():
@@ -1802,3 +1807,253 @@ async def expensive_flow():
concurrency_limit_v2.name
)
assert response.active_slots == 0
+
+
+class TestFlowRunInstrumentation:
+ def test_flow_run_instrumentation(self, instrumentation: InstrumentationTester):
+ @flow
+ def instrumented_flow():
+ from prefect.states import Completed
+
+ return Completed(message="The flow is with you")
+
+ instrumented_flow()
+
+ spans = instrumentation.get_finished_spans()
+ assert len(spans) == 1
+ span = spans[0]
+ assert span is not None
+ instrumentation.assert_span_instrumented_for(span, prefect)
+
+ instrumentation.assert_has_attributes(
+ span,
+ {
+ "prefect.run.type": "flow",
+ "prefect.tags": (),
+ "prefect.flow.name": "instrumented-flow",
+ "prefect.run.id": mock.ANY,
+ },
+ )
+ assert span.status.status_code == trace.StatusCode.OK
+
+ assert len(span.events) == 2
+ assert span.events[0].name == "Running"
+ instrumentation.assert_has_attributes(
+ span.events[0],
+ {
+ "prefect.state.message": "",
+ "prefect.state.type": StateType.RUNNING,
+ "prefect.state.name": "Running",
+ "prefect.state.id": mock.ANY,
+ },
+ )
+
+ assert span.events[1].name == "Completed"
+ instrumentation.assert_has_attributes(
+ span.events[1],
+ {
+ "prefect.state.message": "The flow is with you",
+ "prefect.state.type": StateType.COMPLETED,
+ "prefect.state.name": "Completed",
+ "prefect.state.id": mock.ANY,
+ },
+ )
+
+ def test_flow_run_instrumentation_captures_tags(
+ self,
+ instrumentation: InstrumentationTester,
+ ):
+ from prefect import tags
+
+ @flow
+ def instrumented_flow():
+ pass
+
+ with tags("foo", "bar"):
+ instrumented_flow()
+
+ spans = instrumentation.get_finished_spans()
+ assert len(spans) == 1
+ span = spans[0]
+ assert span is not None
+ instrumentation.assert_span_instrumented_for(span, prefect)
+
+ instrumentation.assert_has_attributes(
+ span,
+ {
+ "prefect.run.type": "flow",
+ "prefect.flow.name": "instrumented-flow",
+ "prefect.run.id": mock.ANY,
+ },
+ )
+ # listy span attributes are serialized to tuples -- order seems nondeterministic so ignore rather than flake
+ assert set(span.attributes.get("prefect.tags")) == {"foo", "bar"} # type: ignore
+ assert span.status.status_code == trace.StatusCode.OK
+
+ def test_flow_run_instrumentation_captures_labels(
+ self, instrumentation: InstrumentationTester, monkeypatch
+ ):
+ # simulate server responding with labels on flow run
+ class FlowRunWithLabels(FlowRun):
+ labels: KeyValueLabels = pydantic.Field(
+ default_factory=lambda: {
+ "prefect.deployment.id": "some-id",
+ "my-label": "my-value",
+ }
+ )
+
+ monkeypatch.setattr(
+ "prefect.client.orchestration.FlowRun",
+ FlowRunWithLabels,
+ )
+
+ @flow
+ def instrumented_flow():
+ pass
+
+ instrumented_flow()
+
+ spans = instrumentation.get_finished_spans()
+ assert len(spans) == 1
+ span = spans[0]
+ assert span is not None
+
+ instrumentation.assert_has_attributes(
+ span,
+ {
+ "prefect.run.type": "flow",
+ "prefect.flow.name": "instrumented-flow",
+ "prefect.run.id": mock.ANY,
+ "prefect.deployment.id": "some-id",
+ "my-label": "my-value",
+ },
+ )
+
+ def test_flow_run_instrumentation_on_exception(
+ self, instrumentation: InstrumentationTester
+ ):
+ @flow
+ def a_broken_flow():
+ raise Exception("This flow broke!")
+
+ with pytest.raises(Exception):
+ a_broken_flow()
+
+ spans = instrumentation.get_finished_spans()
+ assert len(spans) == 1
+ span = spans[0]
+ assert span is not None
+ instrumentation.assert_span_instrumented_for(span, prefect)
+
+ instrumentation.assert_has_attributes(
+ span,
+ {
+ "prefect.run.type": "flow",
+ "prefect.tags": (),
+ "prefect.flow.name": "a-broken-flow",
+ "prefect.run.id": mock.ANY,
+ },
+ )
+
+ assert span.status.status_code == trace.StatusCode.ERROR
+ assert (
+ span.status.description
+ == "Flow run encountered an exception: Exception: This flow broke!"
+ )
+
+ assert len(span.events) == 3
+ assert span.events[0].name == "Running"
+ instrumentation.assert_has_attributes(
+ span.events[0],
+ {
+ "prefect.state.message": "",
+ "prefect.state.type": StateType.RUNNING,
+ "prefect.state.name": "Running",
+ "prefect.state.id": mock.ANY,
+ },
+ )
+
+ assert span.events[1].name == "Failed"
+ instrumentation.assert_has_attributes(
+ span.events[1],
+ {
+ "prefect.state.message": "Flow run encountered an exception: Exception: This flow broke!",
+ "prefect.state.type": StateType.FAILED,
+ "prefect.state.name": "Failed",
+ "prefect.state.id": mock.ANY,
+ },
+ )
+
+ assert span.events[2].name == "exception"
+ instrumentation.assert_has_attributes(
+ span.events[2],
+ {
+ "exception.type": "Exception",
+ "exception.message": "This flow broke!",
+ "exception.stacktrace": mock.ANY,
+ "exception.escaped": "False",
+ },
+ )
+
+ def test_flow_run_instrumentation_on_timeout(
+ self, instrumentation: InstrumentationTester
+ ):
+ @flow(timeout_seconds=0.1)
+ def a_slow_flow():
+ time.sleep(1)
+
+ with pytest.raises(TimeoutError):
+ a_slow_flow()
+
+ spans = instrumentation.get_finished_spans()
+ assert len(spans) == 1
+ span = spans[0]
+ assert span is not None
+ instrumentation.assert_span_instrumented_for(span, prefect)
+
+ instrumentation.assert_has_attributes(
+ span,
+ {
+ "prefect.run.type": "flow",
+ "prefect.tags": (),
+ "prefect.flow.name": "a-slow-flow",
+ "prefect.run.id": mock.ANY,
+ },
+ )
+
+ assert span.status.status_code == trace.StatusCode.ERROR
+ assert span.status.description == "Flow run exceeded timeout of 0.1 second(s)"
+
+ assert len(span.events) == 3
+ assert span.events[0].name == "Running"
+ instrumentation.assert_has_attributes(
+ span.events[0],
+ {
+ "prefect.state.message": "",
+ "prefect.state.type": StateType.RUNNING,
+ "prefect.state.name": "Running",
+ "prefect.state.id": mock.ANY,
+ },
+ )
+
+ assert span.events[1].name == "TimedOut"
+ instrumentation.assert_has_attributes(
+ span.events[1],
+ {
+ "prefect.state.message": "Flow run exceeded timeout of 0.1 second(s)",
+ "prefect.state.type": StateType.FAILED,
+ "prefect.state.name": "TimedOut",
+ "prefect.state.id": mock.ANY,
+ },
+ )
+
+ assert span.events[2].name == "exception"
+ instrumentation.assert_has_attributes(
+ span.events[2],
+ {
+ "exception.type": "prefect.flow_engine.FlowRunTimeoutError",
+ "exception.message": "Scope timed out after 0.1 second(s).",
+ "exception.stacktrace": mock.ANY,
+ "exception.escaped": "False",
+ },
+ )
diff --git a/tests/test_logging.py b/tests/test_logging.py
index 7248dc004a64..d11cf2207d1e 100644
--- a/tests/test_logging.py
+++ b/tests/test_logging.py
@@ -6,6 +6,8 @@
from contextlib import nullcontext
from functools import partial
from io import StringIO
+from typing import Type
+from unittest import mock
from unittest.mock import ANY, MagicMock
import pendulum
@@ -30,7 +32,12 @@
)
from prefect.logging.filters import ObfuscateApiKeyFilter
from prefect.logging.formatters import JsonFormatter
-from prefect.logging.handlers import APILogHandler, APILogWorker, PrefectConsoleHandler
+from prefect.logging.handlers import (
+ APILogHandler,
+ APILogWorker,
+ PrefectConsoleHandler,
+ WorkerAPILogHandler,
+)
from prefect.logging.highlighters import PrefectConsoleHighlighter
from prefect.logging.loggers import (
PrefectLogAdapter,
@@ -39,6 +46,7 @@
flow_run_logger,
get_logger,
get_run_logger,
+ get_worker_logger,
patch_print,
task_run_logger,
)
@@ -60,6 +68,7 @@
from prefect.testing.cli import temporary_console_width
from prefect.testing.utilities import AsyncMock
from prefect.utilities.names import obfuscate
+from prefect.workers.base import BaseJobConfiguration, BaseWorker
@pytest.fixture
@@ -627,6 +636,118 @@ def test_handler_knows_how_large_logs_are(self):
assert handler._get_payload_size(dict_log) == log_size
+WORKER_ID = uuid.uuid4()
+
+
+class TestWorkerLogging:
+ class CloudWorkerTestImpl(BaseWorker):
+ type: str = "cloud_logging_test"
+ job_configuration: Type[BaseJobConfiguration] = BaseJobConfiguration
+
+ async def _send_worker_heartbeat(self, *_, **__):
+ """
+ Workers only return an ID here if they're connected to Cloud,
+ so this simulates the worker being connected to Cloud.
+ """
+ return WORKER_ID
+
+ async def run(self, *_, **__):
+ pass
+
+ class ServerWorkerTestImpl(BaseWorker):
+ type: str = "server_logging_test"
+ job_configuration: Type[BaseJobConfiguration] = BaseJobConfiguration
+
+ async def run(self, *_, **__):
+ pass
+
+ async def _send_worker_heartbeat(self, *_, **__):
+ """
+ Workers only return an ID here if they're connected to Cloud,
+ so this simulates the worker not being connected to Cloud.
+ """
+ return None
+
+ @pytest.fixture
+ def logging_to_api_enabled(self):
+ with temporary_settings(updates={PREFECT_LOGGING_TO_API_ENABLED: True}):
+ yield
+
+ @pytest.fixture
+ def worker_handler(self):
+ yield WorkerAPILogHandler()
+
+ @pytest.fixture
+ def logger(self, worker_handler):
+ logger = logging.getLogger(__name__)
+ logger.setLevel(logging.DEBUG)
+ logger.addHandler(worker_handler)
+ yield logger
+ logger.removeHandler(worker_handler)
+
+ async def test_get_worker_logger_works_with_no_backend_id(self):
+ async with self.CloudWorkerTestImpl(
+ name="test", work_pool_name="test-work-pool"
+ ) as worker:
+ logger = get_worker_logger(worker)
+ assert logger.name == "prefect.workers.cloud_logging_test.test"
+
+ async def test_get_worker_logger_works_with_backend_id(self):
+ async with self.CloudWorkerTestImpl(
+ name="test", work_pool_name="test-work-pool"
+ ) as worker:
+ await worker.sync_with_backend()
+ logger = get_worker_logger(worker)
+ assert logger.name == "prefect.workers.cloud_logging_test.test"
+ assert logger.extra["worker_id"] == str(WORKER_ID)
+
+ async def test_worker_emits_logs_with_worker_id(self, caplog):
+ async with self.CloudWorkerTestImpl(
+ name="test", work_pool_name="test-work-pool"
+ ) as worker:
+ await worker.sync_with_backend()
+ worker._logger.info("testing_with_extras")
+
+ record_with_extras = [
+ r for r in caplog.records if "testing_with_extras" in r.message
+ ]
+
+ assert "testing_with_extras" in caplog.text
+ assert record_with_extras[0].worker_id == str(worker.backend_id)
+ assert worker._logger.extra["worker_id"] == str(worker.backend_id)
+
+ async def test_worker_logger_sends_log_to_api_worker_when_connected_to_cloud(
+ self, mock_log_worker, worker_handler, logging_to_api_enabled
+ ):
+ async with self.CloudWorkerTestImpl(
+ name="test", work_pool_name="test-work-pool"
+ ) as worker:
+ await worker.sync_with_backend()
+ worker._logger.debug("test-worker-log")
+
+ log_statement = [
+ log
+ for call in mock_log_worker.instance().send.call_args_list
+ for log in call.args
+ if log["name"] == worker._logger.name
+ and log["message"] == "test-worker-log"
+ ]
+
+ assert len(log_statement) == 1
+ assert log_statement[0]["worker_id"] == str(worker.backend_id)
+
+ async def test_worker_logger_does_not_send_logs_when_not_connected_to_cloud(
+ self, mock_log_worker, worker_handler, logging_to_api_enabled
+ ):
+ async with self.ServerWorkerTestImpl(
+ name="test", work_pool_name="test-work-pool"
+ ) as worker:
+ assert isinstance(worker._logger, logging.Logger)
+ worker._logger.debug("test-worker-log")
+
+ mock_log_worker.instance().send.assert_not_called()
+
+
class TestAPILogWorker:
@pytest.fixture
async def worker(self):
@@ -746,6 +867,22 @@ async def test_logs_are_sent_immediately_when_flushed(
logs = await prefect_client.read_logs()
assert len(logs) == 2
+ async def test_logs_include_worker_id_if_available(
+ self, worker, log_dict, prefect_client
+ ):
+ worker_id = str(uuid.uuid4())
+ log_dict["worker_id"] = worker_id
+
+ with mock.patch(
+ "prefect.client.orchestration.PrefectClient.create_logs", autospec=True
+ ) as mock_create_logs:
+ worker.send(log_dict)
+ await worker.drain()
+ assert mock_create_logs.call_count == 1
+ logs = mock_create_logs.call_args.args[1]
+ assert len(logs) == 1
+ assert logs[0]["worker_id"] == worker_id
+
def test_flow_run_logger(flow_run):
logger = flow_run_logger(flow_run)
diff --git a/tests/test_settings.py b/tests/test_settings.py
index 740047c0aa89..ff7163c0b481 100644
--- a/tests/test_settings.py
+++ b/tests/test_settings.py
@@ -51,6 +51,7 @@
save_profiles,
temporary_settings,
)
+from prefect.settings.base import _to_environment_variable_value
from prefect.settings.constants import DEFAULT_PROFILES_PATH
from prefect.settings.legacy import (
_env_var_to_accessor,
@@ -58,6 +59,7 @@
_get_valid_setting_names,
)
from prefect.settings.models.api import APISettings
+from prefect.settings.models.client import ClientSettings
from prefect.settings.models.logging import LoggingSettings
from prefect.settings.models.server import ServerSettings
from prefect.settings.models.server.api import ServerAPISettings
@@ -178,7 +180,6 @@
"PREFECT_API_TASK_CACHE_KEY_MAX_LENGTH": {"test_value": 10, "legacy": True},
"PREFECT_API_TLS_INSECURE_SKIP_VERIFY": {"test_value": True},
"PREFECT_API_URL": {"test_value": "https://api.prefect.io"},
- "PREFECT_ASYNC_FETCH_STATE_RESULT": {"test_value": True},
"PREFECT_CLIENT_CSRF_SUPPORT_ENABLED": {"test_value": True},
"PREFECT_CLIENT_ENABLE_METRICS": {"test_value": True, "legacy": True},
"PREFECT_CLIENT_MAX_RETRIES": {"test_value": 3},
@@ -186,7 +187,7 @@
"test_value": True,
},
"PREFECT_CLIENT_METRICS_PORT": {"test_value": 9000},
- "PREFECT_CLIENT_RETRY_EXTRA_CODES": {"test_value": "400"},
+ "PREFECT_CLIENT_RETRY_EXTRA_CODES": {"test_value": {400, 300}},
"PREFECT_CLIENT_RETRY_JITTER_FACTOR": {"test_value": 0.5},
"PREFECT_CLI_COLORS": {"test_value": True},
"PREFECT_CLI_PROMPT": {"test_value": True},
@@ -227,7 +228,9 @@
"legacy": True,
},
"PREFECT_EVENTS_WEBSOCKET_BACKFILL_PAGE_SIZE": {"test_value": 10, "legacy": True},
- "PREFECT_EXPERIMENTAL_WARN": {"test_value": True},
+ "PREFECT_EXPERIMENTAL_WARN": {"test_value": True, "legacy": True},
+ "PREFECT_EXPERIMENTS_TELEMETRY_ENABLED": {"test_value": False},
+ "PREFECT_EXPERIMENTS_WARN": {"test_value": True},
"PREFECT_FLOW_DEFAULT_RETRIES": {"test_value": 10, "legacy": True},
"PREFECT_FLOWS_DEFAULT_RETRIES": {"test_value": 10},
"PREFECT_FLOW_DEFAULT_RETRY_DELAY_SECONDS": {"test_value": 10, "legacy": True},
@@ -240,7 +243,7 @@
},
"PREFECT_LOGGING_COLORS": {"test_value": True},
"PREFECT_LOGGING_CONFIG_PATH": {"test_value": Path("/path/to/settings.yaml")},
- "PREFECT_LOGGING_EXTRA_LOGGERS": {"test_value": "foo"},
+ "PREFECT_LOGGING_EXTRA_LOGGERS": {"test_value": ["foo", "bar"]},
"PREFECT_LOGGING_INTERNAL_LEVEL": {"test_value": "INFO", "legacy": True},
"PREFECT_LOGGING_LEVEL": {"test_value": "INFO"},
"PREFECT_LOGGING_LOG_PRINTS": {"test_value": True},
@@ -388,6 +391,7 @@
"PREFECT_SILENCE_API_URL_MISCONFIGURATION": {"test_value": True},
"PREFECT_SQLALCHEMY_MAX_OVERFLOW": {"test_value": 10, "legacy": True},
"PREFECT_SQLALCHEMY_POOL_SIZE": {"test_value": 10, "legacy": True},
+ "PREFECT_TASKS_DEFAULT_PERSIST_RESULT": {"test_value": True},
"PREFECT_TASKS_DEFAULT_RETRIES": {"test_value": 10},
"PREFECT_TASKS_DEFAULT_RETRY_DELAY_SECONDS": {"test_value": 10},
"PREFECT_TASKS_REFRESH_CACHE": {"test_value": True},
@@ -453,6 +457,23 @@ def _create_temp_env(content):
env_file.unlink()
+@pytest.fixture
+def temporary_toml_file(tmp_path):
+ with tmpchdir(tmp_path):
+ toml_file = Path("prefect.toml")
+
+ def _create_temp_toml(content, path=toml_file):
+ nonlocal toml_file
+ with path.open("w") as f:
+ toml.dump(content, f)
+ toml_file = path # update toml_file in case path was changed
+
+ yield _create_temp_toml
+
+ if toml_file.exists():
+ toml_file.unlink()
+
+
class TestSettingClass:
def test_setting_equality_with_value(self):
with temporary_settings({PREFECT_TEST_SETTING: "foo"}):
@@ -516,6 +537,15 @@ def test_settings_copy_with_update(self):
), "Changed, existing value was default"
assert new_settings.client.retry_extra_codes == {400, 500}
+ def test_settings_copy_with_update_restore_defaults(self, monkeypatch):
+ monkeypatch.setenv("PREFECT_TESTING_TEST_SETTING", "Not the default")
+ settings = Settings()
+ assert settings.testing.test_setting == "Not the default"
+ new_settings = settings.copy_with_update(
+ restore_defaults={PREFECT_TEST_SETTING},
+ )
+ assert new_settings.testing.test_setting == "FOO"
+
def test_settings_loads_environment_variables_at_instantiation(self, monkeypatch):
assert PREFECT_TEST_MODE.value() is True
@@ -560,6 +590,30 @@ def test_settings_to_environment_casts_to_strings(self):
).to_environment_variables()["PREFECT_SERVER_API_PORT"]
== "3000"
)
+ assert (
+ Settings(
+ logging=LoggingSettings(extra_loggers="foo")
+ ).to_environment_variables()["PREFECT_LOGGING_EXTRA_LOGGERS"]
+ == "foo"
+ )
+ assert Settings(
+ logging=LoggingSettings(extra_loggers=["foo", "bar"])
+ ).to_environment_variables()["PREFECT_LOGGING_EXTRA_LOGGERS"] in (
+ "foo,bar",
+ "bar,foo",
+ )
+ assert (
+ Settings(
+ client=ClientSettings(retry_extra_codes=300)
+ ).to_environment_variables()["PREFECT_CLIENT_RETRY_EXTRA_CODES"]
+ == "300"
+ )
+ assert Settings(
+ client=ClientSettings(retry_extra_codes={300, 400})
+ ).to_environment_variables()["PREFECT_CLIENT_RETRY_EXTRA_CODES"] in (
+ "300,400",
+ "400,300",
+ )
@pytest.mark.parametrize("exclude_unset", [True, False])
def test_settings_to_environment_roundtrip(self, exclude_unset, monkeypatch):
@@ -571,8 +625,8 @@ def test_settings_to_environment_roundtrip(self, exclude_unset, monkeypatch):
assert settings.model_dump() == new_settings.model_dump()
def test_settings_hash_key(self):
- settings = Settings(testing=dict(test_mode=True))
- diff_settings = Settings(testing=dict(test_mode=False))
+ settings = Settings(testing=dict(test_mode=True)) # type: ignore
+ diff_settings = Settings(testing=dict(test_mode=False)) # type: ignore
assert settings.hash_key() == settings.hash_key()
@@ -703,17 +757,26 @@ def test_settings_in_truthy_statements_use_value(self):
@pytest.mark.parametrize(
"value,expected",
[
+ (None, []),
("foo", ["foo"]),
("foo,bar", ["foo", "bar"]),
("foo, bar, foobar ", ["foo", "bar", "foobar"]),
+ (["foo", "bar"], ["foo", "bar"]),
+ ],
+ ids=[
+ "none",
+ "string",
+ "comma_separated",
+ "comma_separated_with_spaces",
+ "python_list",
],
)
def test_extra_loggers(self, value, expected):
settings = Settings(logging=LoggingSettings(extra_loggers=value))
- assert PREFECT_LOGGING_EXTRA_LOGGERS.value_from(settings) == expected
+ assert set(PREFECT_LOGGING_EXTRA_LOGGERS.value_from(settings)) == set(expected)
def test_prefect_home_expands_tilde_in_path(self):
- settings = Settings(home="~/test")
+ settings = Settings(home="~/test") # type: ignore
assert PREFECT_HOME.value_from(settings) == Path("~/test").expanduser()
@pytest.mark.parametrize(
@@ -1091,12 +1154,34 @@ def test_resolution_order(self, temporary_env_file, monkeypatch, tmp_path):
assert Settings().client.retry_extra_codes == {420, 500}
+ pyproject_toml_data = {
+ "tool": {"prefect": {"client": {"retry_extra_codes": "200"}}}
+ }
+ with open("pyproject.toml", "w") as f:
+ toml.dump(pyproject_toml_data, f)
+
+ assert Settings().client.retry_extra_codes == {200}
+
+ prefect_toml_data = {"client": {"retry_extra_codes": "300"}}
+ with open("prefect.toml", "w") as f:
+ toml.dump(prefect_toml_data, f)
+
+ assert Settings().client.retry_extra_codes == {300}
+
temporary_env_file("PREFECT_CLIENT_RETRY_EXTRA_CODES=429,500")
assert Settings().client.retry_extra_codes == {429, 500}
os.unlink(".env")
+ assert Settings().client.retry_extra_codes == {300}
+
+ os.unlink("prefect.toml")
+
+ assert Settings().client.retry_extra_codes == {200}
+
+ os.unlink("pyproject.toml")
+
assert Settings().client.retry_extra_codes == {420, 500}
monkeypatch.setenv("PREFECT_TEST_MODE", "1")
@@ -1156,6 +1241,209 @@ def test_resolution_order_with_nested_settings(
assert Settings().api.url == "http://example.com:4200"
+ def test_profiles_path_from_env_source(
+ self, temporary_env_file, monkeypatch, tmp_path
+ ):
+ profiles_path = tmp_path / "custom_profiles.toml"
+
+ monkeypatch.delenv("PREFECT_TESTING_TEST_MODE", raising=False)
+ monkeypatch.delenv("PREFECT_TESTING_UNIT_TEST_MODE", raising=False)
+
+ profiles_path.write_text(
+ textwrap.dedent(
+ """
+ active = "foo"
+
+ [profiles.foo]
+ PREFECT_CLIENT_RETRY_EXTRA_CODES = "420,500"
+ """
+ )
+ )
+
+ temporary_env_file(f"PREFECT_PROFILES_PATH={profiles_path}")
+
+ assert Settings().profiles_path == profiles_path
+ assert Settings().client.retry_extra_codes == {420, 500}
+
+ os.unlink(".env")
+
+ monkeypatch.setenv("PREFECT_TEST_MODE", "1")
+ monkeypatch.setenv("PREFECT_TESTING_UNIT_TEST_MODE", "1")
+
+ assert Settings().client.retry_extra_codes == set()
+
+ def test_profiles_path_from_toml_source(
+ self, temporary_toml_file, monkeypatch, tmp_path
+ ):
+ profiles_path = tmp_path / "custom_profiles.toml"
+
+ monkeypatch.delenv("PREFECT_TESTING_TEST_MODE", raising=False)
+ monkeypatch.delenv("PREFECT_TESTING_UNIT_TEST_MODE", raising=False)
+
+ profiles_path.write_text(
+ textwrap.dedent(
+ """
+ active = "foo"
+
+ [profiles.foo]
+ PREFECT_CLIENT_RETRY_EXTRA_CODES = "420,500"
+ """
+ )
+ )
+
+ temporary_toml_file({"profiles_path": str(profiles_path)})
+
+ assert Settings().profiles_path == profiles_path
+ assert Settings().client.retry_extra_codes == {420, 500}
+
+ os.unlink("prefect.toml")
+
+ monkeypatch.setenv("PREFECT_TEST_MODE", "1")
+ monkeypatch.setenv("PREFECT_TESTING_UNIT_TEST_MODE", "1")
+
+ assert Settings().client.retry_extra_codes == set()
+
+ def test_profiles_path_from_pyproject_source(
+ self, temporary_toml_file, monkeypatch, tmp_path
+ ):
+ monkeypatch.delenv("PREFECT_TESTING_TEST_MODE", raising=False)
+ monkeypatch.delenv("PREFECT_TESTING_UNIT_TEST_MODE", raising=False)
+
+ profiles_path = tmp_path / "custom_profiles.toml"
+ profiles_path.write_text(
+ textwrap.dedent(
+ """
+ active = "foo"
+
+ [profiles.foo]
+ PREFECT_CLIENT_RETRY_EXTRA_CODES = "420,500"
+ """
+ )
+ )
+
+ temporary_toml_file(
+ {"tool": {"prefect": {"profiles_path": str(profiles_path)}}},
+ path=Path("pyproject.toml"),
+ )
+
+ assert Settings().profiles_path == profiles_path
+ assert Settings().client.retry_extra_codes == {420, 500}
+
+ os.unlink("pyproject.toml")
+
+ monkeypatch.setenv("PREFECT_TEST_MODE", "1")
+ monkeypatch.setenv("PREFECT_TESTING_UNIT_TEST_MODE", "1")
+
+ assert Settings().client.retry_extra_codes == set()
+
+ def test_profiles_path_resolution_order_from_sources(
+ self, temporary_env_file, monkeypatch, tmp_path
+ ):
+ monkeypatch.delenv("PREFECT_TESTING_TEST_MODE", raising=False)
+ monkeypatch.delenv("PREFECT_TESTING_UNIT_TEST_MODE", raising=False)
+
+ pyproject_profiles_path = tmp_path / "pyproject_profiles.toml"
+ pyproject_profiles_path.write_text(
+ textwrap.dedent(
+ """
+ active = "foo"
+
+ [profiles.foo]
+ PREFECT_CLIENT_RETRY_EXTRA_CODES = "420,500"
+ """
+ )
+ )
+
+ toml_profiles_path = tmp_path / "toml_profiles.toml"
+ toml_profiles_path.write_text(
+ textwrap.dedent(
+ """
+ active = "foo"
+
+ [profiles.foo]
+ PREFECT_CLIENT_RETRY_EXTRA_CODES = "300"
+ """
+ )
+ )
+
+ env_profiles_path = tmp_path / "env_profiles.toml"
+ env_profiles_path.write_text(
+ textwrap.dedent(
+ """
+ active = "foo"
+
+ [profiles.foo]
+ PREFECT_CLIENT_RETRY_EXTRA_CODES = "200"
+ """
+ )
+ )
+
+ with open("pyproject.toml", "w") as f:
+ toml.dump(
+ {"tool": {"prefect": {"profiles_path": str(pyproject_profiles_path)}}},
+ f,
+ )
+
+ assert Settings().profiles_path == pyproject_profiles_path
+ assert Settings().client.retry_extra_codes == {420, 500}
+
+ with open("prefect.toml", "w") as f:
+ toml.dump({"profiles_path": str(toml_profiles_path)}, f)
+
+ assert Settings().profiles_path == toml_profiles_path
+ assert Settings().client.retry_extra_codes == {300}
+
+ temporary_env_file(f"PREFECT_PROFILES_PATH={env_profiles_path}")
+
+ assert Settings().profiles_path == env_profiles_path
+ assert Settings().client.retry_extra_codes == {200}
+
+ os.unlink(".env")
+
+ assert Settings().profiles_path == toml_profiles_path
+ assert Settings().client.retry_extra_codes == {300}
+
+ os.unlink("prefect.toml")
+
+ assert Settings().profiles_path == pyproject_profiles_path
+ assert Settings().client.retry_extra_codes == {420, 500}
+
+ os.unlink("pyproject.toml")
+
+ monkeypatch.setenv("PREFECT_TEST_MODE", "1")
+ monkeypatch.setenv("PREFECT_TESTING_UNIT_TEST_MODE", "1")
+
+ assert Settings().client.retry_extra_codes == set()
+
+ def test_dot_env_filters_as_expected(self, temporary_env_file):
+ expected_home = Settings().home
+ expected_db_name = Settings().server.database.name
+ temporary_env_file("HOME=foo\nNAME=bar")
+ assert Settings().home == expected_home
+ assert Settings().home != "foo"
+ assert Settings().server.database.name == expected_db_name
+ assert Settings().server.database.name != "bar"
+
+ def test_environment_variables_take_precedence_over_toml_settings(
+ self, monkeypatch, temporary_toml_file
+ ):
+ """
+ Test to ensure that fields with multiple validation aliases respect the
+ expected precedence of sources.
+
+ Regression test for https://github.com/PrefectHQ/prefect/issues/15981
+ """
+ for env_var in os.environ:
+ if env_var.startswith("PREFECT_"):
+ monkeypatch.delenv(env_var, raising=False)
+
+ monkeypatch.setenv("PREFECT_SERVER_ALLOW_EPHEMERAL_MODE", "false")
+
+ temporary_toml_file({"server": {"ephemeral": {"enabled": "true"}}})
+
+ assert not Settings().server.ephemeral.enabled
+ assert not PREFECT_SERVER_ALLOW_EPHEMERAL_MODE.value()
+
class TestLoadProfiles:
@pytest.fixture(autouse=True)
@@ -1167,6 +1455,20 @@ def temporary_profiles_path(self, tmp_path):
def test_load_profiles_no_profiles_file(self):
assert load_profiles()
+ def test_env_variables_respected_when_no_profiles_file(self, monkeypatch):
+ """
+ Regression test for https://github.com/PrefectHQ/prefect/issues/15981
+ """
+ for env_var in os.environ:
+ if env_var.startswith("PREFECT_"):
+ monkeypatch.delenv(env_var, raising=False)
+
+ monkeypatch.setenv("PREFECT_SERVER_ALLOW_EPHEMERAL_MODE", "false")
+
+ # Will be true if the profile is incorrectly taking priority
+ assert not Settings().server.ephemeral.enabled
+ assert not PREFECT_SERVER_ALLOW_EPHEMERAL_MODE.value()
+
def test_load_profiles_missing_ephemeral(self, temporary_profiles_path):
temporary_profiles_path.write_text(
textwrap.dedent(
@@ -1323,7 +1625,7 @@ def test_save_profiles_additional_profiles(self, temporary_profiles_path):
class TestProfile:
def test_init_casts_names_to_setting_types(self):
- profile = Profile(name="test", settings={"PREFECT_DEBUG_MODE": 1})
+ profile = Profile(name="test", settings={"PREFECT_DEBUG_MODE": 1}) # type: ignore
assert profile.settings == {PREFECT_DEBUG_MODE: 1}
def test_validate_settings(self):
@@ -1621,19 +1923,20 @@ def check_setting_value(self, setting, value):
if isinstance(settings_value, pydantic.SecretStr):
settings_value = settings_value.get_secret_value()
- if setting == "PREFECT_CLIENT_RETRY_EXTRA_CODES":
- assert settings_value == {int(value)}
- assert getattr(prefect.settings, setting).value() == {int(value)}
- assert current_settings.to_environment_variables(exclude_unset=True)[
- setting
- ] == str([int(value)])
-
- elif setting == "PREFECT_LOGGING_EXTRA_LOGGERS":
- assert settings_value == [value]
- assert getattr(prefect.settings, setting).value() == [value]
- assert current_settings.to_environment_variables(exclude_unset=True)[
- setting
- ] == str([value])
+
+ if setting == "PREFECT_LOGGING_EXTRA_LOGGERS":
+ assert isinstance(settings_value, list)
+ settings_value.sort()
+ value.sort()
+ assert sorted(settings_value) == sorted(value)
+ legacy_value = getattr(prefect.settings, setting).value()
+ assert isinstance(legacy_value, list)
+ legacy_value.sort()
+ assert legacy_value == value
+ env_value = current_settings.to_environment_variables(
+ exclude_unset=True
+ )[setting]
+ assert env_value.split(",") == to_jsonable_python(value)
else:
assert settings_value == value
# get value from legacy setting object
@@ -1643,7 +1946,9 @@ def check_setting_value(self, setting, value):
if not SUPPORTED_SETTINGS[setting].get("legacy"):
assert current_settings.to_environment_variables(
exclude_unset=True
- )[setting] == str(to_jsonable_python(value))
+ )[setting] == _to_environment_variable_value(
+ to_jsonable_python(value)
+ )
def test_set_via_env_var(self, setting_and_value, monkeypatch):
setting, value = setting_and_value
@@ -1655,7 +1960,7 @@ def test_set_via_env_var(self, setting_and_value, monkeypatch):
monkeypatch.setenv("PREFECT_TEST_MODE", "True")
# mock set the env var
- monkeypatch.setenv(setting, str(value))
+ monkeypatch.setenv(setting, _to_environment_variable_value(value))
self.check_setting_value(setting, value)
@@ -1696,6 +2001,50 @@ def test_set_via_dot_env_file(
):
monkeypatch.setenv("PREFECT_TEST_MODE", "True")
- temporary_env_file(f"{setting}={value}")
+ temporary_env_file(f"{setting}={_to_environment_variable_value(value)}")
+
+ self.check_setting_value(setting, value)
+
+ def test_set_via_prefect_toml_file(
+ self, setting_and_value, temporary_toml_file, monkeypatch
+ ):
+ setting, value = setting_and_value
+ if setting == "PREFECT_PROFILES_PATH":
+ monkeypatch.delenv("PREFECT_PROFILES_PATH", raising=False)
+ if (
+ setting == "PREFECT_TEST_SETTING"
+ or setting == "PREFECT_TESTING_TEST_SETTING"
+ ):
+ monkeypatch.setenv("PREFECT_TEST_MODE", "True")
+
+ settings_fields = _get_settings_fields(prefect.settings.Settings)
+ toml_dict = {}
+ set_in_dict(
+ toml_dict, settings_fields[setting].accessor, to_jsonable_python(value)
+ )
+ temporary_toml_file(toml_dict)
+
+ self.check_setting_value(setting, value)
+
+ def test_set_via_pyproject_toml_file(
+ self, setting_and_value, temporary_toml_file, monkeypatch
+ ):
+ setting, value = setting_and_value
+ if setting == "PREFECT_PROFILES_PATH":
+ monkeypatch.delenv("PREFECT_PROFILES_PATH", raising=False)
+ if (
+ setting == "PREFECT_TEST_SETTING"
+ or setting == "PREFECT_TESTING_TEST_SETTING"
+ ):
+ monkeypatch.setenv("PREFECT_TEST_MODE", "True")
+
+ settings_fields = _get_settings_fields(prefect.settings.Settings)
+ toml_dict = {}
+ set_in_dict(
+ toml_dict,
+ f"tool.prefect.{settings_fields[setting].accessor}",
+ to_jsonable_python(value),
+ )
+ temporary_toml_file(toml_dict, path=Path("pyproject.toml"))
self.check_setting_value(setting, value)
diff --git a/tests/test_task_engine.py b/tests/test_task_engine.py
index 76f9dbfa3564..c6702f6a11de 100644
--- a/tests/test_task_engine.py
+++ b/tests/test_task_engine.py
@@ -14,7 +14,7 @@
import pytest
from prefect import Task, flow, tags, task
-from prefect.cache_policies import FLOW_PARAMETERS
+from prefect.cache_policies import FLOW_PARAMETERS, INPUTS, TASK_SOURCE
from prefect.client.orchestration import PrefectClient, SyncPrefectClient
from prefect.client.schemas.objects import StateType
from prefect.concurrency.asyncio import concurrency as aconcurrency
@@ -1804,6 +1804,35 @@ async def async_task():
assert first_val is None
assert second_val is None
+ async def test_error_handling_on_cache_policies(self, prefect_client, tmp_path):
+ fs = LocalFileSystem(basepath=tmp_path)
+ await fs.save("error-handling-test")
+
+ @task(
+ cache_policy=TASK_SOURCE + INPUTS,
+ result_storage=fs,
+ )
+ def my_random_task(x: int, cmplx_input):
+ return random.randint(0, x)
+
+ @flow
+ def my_param_flow(x: int):
+ import threading
+
+ thread = threading.Thread()
+
+ first_val = my_random_task(x, cmplx_input=thread, return_state=True)
+ second_val = my_random_task(x, cmplx_input=thread, return_state=True)
+ return first_val, second_val
+
+ first, second = my_param_flow(4200)
+ assert first.name == "Completed"
+ assert second.name == "Completed"
+
+ first_result = await first.result()
+ second_result = await second.result()
+ assert first_result != second_result
+
async def test_flow_parameter_caching(self, prefect_client, tmp_path):
fs = LocalFileSystem(basepath=tmp_path)
await fs.save("param-test")
diff --git a/tests/test_tasks.py b/tests/test_tasks.py
index 3cdd7569a965..88cc1c4d086a 100644
--- a/tests/test_tasks.py
+++ b/tests/test_tasks.py
@@ -2,6 +2,7 @@
import datetime
import inspect
import json
+import threading
import time
from asyncio import Event, sleep
from functools import partial
@@ -1969,6 +1970,80 @@ def foo(x):
):
foo(1)
+ async def test_unhashable_input_provides_helpful_error(self, caplog):
+ """Test that trying to cache a task with unhashable inputs provides helpful error message"""
+ lock = threading.Lock()
+
+ @task(persist_result=True)
+ def foo(x, lock_obj):
+ return x
+
+ foo(42, lock_obj=lock)
+
+ error_msg = caplog.text
+
+ # First we see the cache policy's message
+ assert (
+ "This often occurs when task inputs contain objects that cannot be cached"
+ in error_msg
+ )
+ assert "like locks, file handles, or other system resources." in error_msg
+ assert "To resolve this, you can:" in error_msg
+ assert (
+ "1. Exclude these arguments by defining a custom `cache_key_fn`"
+ in error_msg
+ )
+ assert "2. Disable caching by passing `cache_policy=NONE`" in error_msg
+
+ # Then we see the original HashError details
+ assert "Unable to create hash - objects could not be serialized." in error_msg
+ assert (
+ "JSON error: Unable to serialize unknown type: "
+ in error_msg
+ )
+ assert "Pickle error: cannot pickle '_thread.lock' object" in error_msg
+
+ async def test_unhashable_input_workarounds(self):
+ """Test workarounds for handling unhashable inputs"""
+ lock = threading.Lock()
+
+ # Solution 1: Use cache_key_fn to exclude problematic argument
+ def cache_on_x_only(context, parameters):
+ return str(parameters.get("x"))
+
+ @task(cache_key_fn=cache_on_x_only, persist_result=True)
+ def foo_with_key_fn(x, lock_obj):
+ return x
+
+ # Solution 2: Disable caching entirely
+ @task(cache_policy=NONE, persist_result=True)
+ def foo_with_none_policy(x, lock_obj):
+ return x
+
+ @flow
+ def test_flow():
+ # Both approaches should work without errors
+ return (
+ foo_with_key_fn(42, lock_obj=lock, return_state=True),
+ foo_with_key_fn(42, lock_obj=lock, return_state=True),
+ foo_with_none_policy(42, lock_obj=lock, return_state=True),
+ foo_with_none_policy(42, lock_obj=lock, return_state=True),
+ )
+
+ s1, s2, s3, s4 = test_flow()
+
+ # Key fn approach should still cache based on x
+ assert s1.name == "Completed"
+ assert s2.name == "Cached"
+ assert await s1.result() == 42
+ assert await s2.result() == 42
+
+ # NONE policy approach should never cache
+ assert s3.name == "Completed"
+ assert s4.name == "Completed"
+ assert await s3.result() == 42
+ assert await s4.result() == 42
+
class TestCacheFunctionBuiltins:
async def test_task_input_hash_within_flows(
@@ -2038,7 +2113,7 @@ def __init__(self, x):
self.x = x
def __eq__(self, other) -> bool:
- return type(self) == type(other) and self.x == other.x
+ return type(self) is type(other) and self.x == other.x
@task(
cache_key_fn=task_input_hash,
@@ -2071,7 +2146,7 @@ def __init__(self, x):
self.x = x
def __eq__(self, other) -> bool:
- return type(self) == type(other) and self.x == other.x
+ return type(self) is type(other) and self.x == other.x
@task(
cache_key_fn=task_input_hash,
diff --git a/tests/test_types.py b/tests/test_types.py
new file mode 100644
index 000000000000..7f1d962031d0
--- /dev/null
+++ b/tests/test_types.py
@@ -0,0 +1,10 @@
+from pydantic import BaseModel
+
+from prefect.types import KeyValueLabels
+
+
+def test_allow_none_as_empty_dict():
+ class Model(BaseModel):
+ labels: KeyValueLabels
+
+ assert Model(labels=None).labels == {} # type: ignore
diff --git a/tests/utilities/test_hashing.py b/tests/utilities/test_hashing.py
index 6b28ec1dd12c..ac1e1547d386 100644
--- a/tests/utilities/test_hashing.py
+++ b/tests/utilities/test_hashing.py
@@ -1,8 +1,11 @@
import hashlib
+import threading
+from unittest.mock import MagicMock
import pytest
-from prefect.utilities.hashing import file_hash, stable_hash
+from prefect.exceptions import HashError
+from prefect.utilities.hashing import file_hash, hash_objects, stable_hash
@pytest.mark.parametrize(
@@ -55,3 +58,28 @@ def test_file_hash_hashes(self, tmp_path):
assert val == hashlib.md5(b"0").hexdigest()
# Check if the hash is stable
assert val == "cfcd208495d565ef66e7dff9f98764da"
+
+
+class TestHashObjects:
+ def test_hash_objects_handles_unhashable_objects_gracefully(self):
+ """Test that unhashable objects return None by default"""
+ lock = threading.Lock()
+ result = hash_objects({"data": "hello", "lock": lock})
+ assert result is None
+
+ def test_hash_objects_raises_with_helpful_message(self):
+ """Test that unhashable objects raise HashError when raise_on_failure=True"""
+ lock = threading.Lock()
+ mock_file = MagicMock()
+ mock_file.__str__ = lambda _: ""
+
+ with pytest.raises(HashError) as exc:
+ hash_objects(
+ {"data": "hello", "lock": lock, "file": mock_file},
+ raise_on_failure=True,
+ )
+
+ error_msg = str(exc.value)
+ assert "Unable to create hash" in error_msg
+ assert "JSON error" in error_msg
+ assert "Pickle error" in error_msg
diff --git a/tests/utilities/test_urls.py b/tests/utilities/test_urls.py
index 700db678b195..940453b03afe 100644
--- a/tests/utilities/test_urls.py
+++ b/tests/utilities/test_urls.py
@@ -390,3 +390,24 @@ class UnsupportedType:
with temporary_settings({PREFECT_UI_URL: MOCK_PREFECT_UI_URL}):
assert url_for(obj=unsupported_obj) is None # type: ignore
+
+
+def test_url_for_with_additional_format_kwargs():
+ with temporary_settings({PREFECT_UI_URL: MOCK_PREFECT_UI_URL}):
+ url = url_for(
+ obj="worker",
+ obj_id="123e4567-e89b-12d3-a456-426614174000",
+ work_pool_name="my-work-pool",
+ )
+ assert (
+ url
+ == f"{MOCK_PREFECT_UI_URL}/work-pools/work-pool/my-work-pool/worker/123e4567-e89b-12d3-a456-426614174000"
+ )
+
+
+def test_url_for_with_additional_format_kwargs_raises_if_placeholder_not_replaced():
+ with pytest.raises(
+ ValueError,
+ match="Unable to generate URL for worker because the following keys are missing: work_pool_name",
+ ):
+ url_for(obj="worker", obj_id="123e4567-e89b-12d3-a456-42661417400")
diff --git a/tests/workers/test_base_worker.py b/tests/workers/test_base_worker.py
index dc6493a62f66..dc870a4a07b6 100644
--- a/tests/workers/test_base_worker.py
+++ b/tests/workers/test_base_worker.py
@@ -1,17 +1,22 @@
import uuid
from typing import Any, Dict, Optional, Type
-from unittest.mock import MagicMock
+from unittest import mock
+from unittest.mock import MagicMock, Mock
+import httpx
import pendulum
import pytest
from packaging import version
from pydantic import Field
+from starlette import status
import prefect
import prefect.client.schemas as schemas
from prefect.blocks.core import Block
+from prefect.client.base import ServerType
from prefect.client.orchestration import PrefectClient, get_client
from prefect.client.schemas import FlowRun
+from prefect.client.schemas.objects import StateType, WorkerMetadata
from prefect.exceptions import (
CrashedRun,
ObjectNotFound,
@@ -28,7 +33,13 @@
get_current_settings,
temporary_settings,
)
-from prefect.states import Completed, Pending, Running, Scheduled
+from prefect.states import (
+ Completed,
+ Failed,
+ Pending,
+ Running,
+ Scheduled,
+)
from prefect.testing.utilities import AsyncMock
from prefect.utilities.pydantic import parse_obj_as
from prefect.workers.base import (
@@ -161,6 +172,42 @@ async def test_worker_sends_heartbeat_messages(
assert second_heartbeat > first_heartbeat
+async def test_worker_sends_heartbeat_gets_id(respx_mock):
+ work_pool_name = "test-work-pool"
+ test_worker_id = uuid.UUID("028EC481-5899-49D7-B8C5-37A2726E9840")
+ async with WorkerTestImpl(name="test", work_pool_name=work_pool_name) as worker:
+ setattr(worker, "_should_get_worker_id", lambda: True)
+ # Pass through the non-relevant paths
+ respx_mock.get(f"api/work_pools/{work_pool_name}").pass_through()
+ respx_mock.get("api/csrf-token?").pass_through()
+ respx_mock.post("api/work_pools/").pass_through()
+ respx_mock.patch(f"api/work_pools/{work_pool_name}").pass_through()
+
+ respx_mock.post(
+ f"api/work_pools/{work_pool_name}/workers/heartbeat",
+ ).mock(
+ return_value=httpx.Response(status.HTTP_200_OK, text=str(test_worker_id))
+ )
+
+ await worker.sync_with_backend()
+
+ assert worker.backend_id == test_worker_id
+
+
+async def test_worker_sends_heartbeat_only_gets_id_once():
+ async with WorkerTestImpl(name="test", work_pool_name="test-work-pool") as worker:
+ worker._client.server_type = ServerType.CLOUD
+ mock = AsyncMock(return_value="test")
+ setattr(worker._client, "send_worker_heartbeat", mock)
+ await worker.sync_with_backend()
+ await worker.sync_with_backend()
+
+ second_call = mock.await_args_list[1]
+
+ assert worker.backend_id == "test"
+ assert not second_call.kwargs["get_worker_id"]
+
+
async def test_worker_with_work_pool(
prefect_client: PrefectClient, worker_deployment_wq1, work_pool
):
@@ -250,6 +297,73 @@ def create_run_with_deployment_2(state):
assert {flow_run.id for flow_run in submitted_flow_runs} == set(flow_run_ids[1:3])
+async def test_workers_do_not_submit_flow_runs_awaiting_retry(
+ prefect_client: PrefectClient,
+ work_queue_1,
+ work_pool,
+):
+ """
+ Regression test for https://github.com/PrefectHQ/prefect/issues/15458
+
+ Ensure that flows in `AwaitingRetry` state are not submitted by workers. Previously,
+ with a retry delay long enough, workers would pick up flow runs in `AwaitingRetry`
+ state and submit them, even though the process they were initiated from is responsible
+ for retrying them.
+
+ The flows would be picked up by the worker because `AwaitingRetry` is a `SCHEDULED`
+ state type.
+
+ This test goes through the following steps:
+ - Create a flow
+ - Create a deployment for the flow
+ - Create a flow run for the deployment
+ - Set the flow run to `Running`
+ - Set the flow run to failed
+ - The server will reject this transition and put the flow run in an `AwaitingRetry` state
+ - Have the worker pick up any available flow runs to make sure that the flow run in `AwaitingRetry` state
+ is not picked up by the worker
+ """
+
+ @flow(retries=2)
+ def test_flow():
+ pass
+
+ flow_id = await prefect_client.create_flow(
+ flow=test_flow,
+ )
+ deployment_id = await prefect_client.create_deployment(
+ flow_id=flow_id,
+ name="test-deployment",
+ work_queue_name=work_queue_1.name,
+ work_pool_name=work_pool.name,
+ )
+ flow_run = await prefect_client.create_flow_run_from_deployment(
+ deployment_id, state=Running()
+ )
+ # Need to update empirical policy so the server is aware of the retries
+ flow_run.empirical_policy.retries = 2
+ await prefect_client.update_flow_run(
+ flow_run_id=flow_run.id,
+ flow_version=test_flow.version,
+ empirical_policy=flow_run.empirical_policy,
+ )
+ # Set the flow run to failed
+ response = await prefect_client.set_flow_run_state(flow_run.id, state=Failed())
+ # The transition should be rejected and the flow run should be in `AwaitingRetry` state
+ assert response.state.name == "AwaitingRetry"
+ assert response.state.type == StateType.SCHEDULED
+
+ flow_run = await prefect_client.read_flow_run(flow_run.id)
+ # Check to ensure that the flow has a scheduled time earlier than now to rule out
+ # that the worker doesn't pick up the flow run due to its scheduled time being in the future
+ assert flow_run.state.state_details.scheduled_time < pendulum.now("utc")
+
+ async with WorkerTestImpl(work_pool_name=work_pool.name) as worker:
+ submitted_flow_runs = await worker.get_and_submit_flow_runs()
+
+ assert submitted_flow_runs == []
+
+
async def test_priority_trumps_lateness(
prefect_client: PrefectClient,
worker_deployment_wq1,
@@ -288,6 +402,34 @@ def create_run_with_deployment_2(state):
assert {flow_run.id for flow_run in submitted_flow_runs} == set(flow_run_ids[1:2])
+async def test_worker_releases_limit_slot_when_aborting_a_change_to_pending(
+ prefect_client: PrefectClient, worker_deployment_wq1, work_pool
+):
+ """Regression test for https://github.com/PrefectHQ/prefect/issues/15952"""
+
+ def create_run_with_deployment(state):
+ return prefect_client.create_flow_run_from_deployment(
+ worker_deployment_wq1.id, state=state
+ )
+
+ flow_run = await create_run_with_deployment(
+ Scheduled(scheduled_time=pendulum.now("utc").subtract(days=1))
+ )
+
+ run_mock = AsyncMock()
+ release_mock = Mock()
+
+ async with WorkerTestImpl(work_pool_name=work_pool.name, limit=1) as worker:
+ worker.run = run_mock
+ worker._propose_pending_state = AsyncMock(return_value=False)
+ worker._release_limit_slot = release_mock
+
+ await worker.get_and_submit_flow_runs()
+
+ run_mock.assert_not_called()
+ release_mock.assert_called_once_with(flow_run.id)
+
+
async def test_worker_with_work_pool_and_limit(
prefect_client: PrefectClient, worker_deployment_wq1, work_pool
):
@@ -1050,7 +1192,7 @@ async def test_job_configuration_from_template_overrides_with_remote_variables()
class ArbitraryJobConfiguration(BaseJobConfiguration):
var1: str
- env: Dict[str, str]
+ env: Dict[str, str] = Field(default_factory=dict)
config = await ArbitraryJobConfiguration.from_template_and_values(
base_job_template=template,
@@ -1488,7 +1630,7 @@ def test_prepare_for_flow_run_with_deployment_and_flow(
assert job_config.command == "prefect flow-run execute"
-async def test_get_flow_run_logger(
+async def test_get_flow_run_logger_without_worker_id_set(
prefect_client: PrefectClient, worker_deployment_wq1, work_pool
):
flow_run = await prefect_client.create_flow_run_from_deployment(
@@ -1499,6 +1641,35 @@ async def test_get_flow_run_logger(
name="test", work_pool_name=work_pool.name, create_pool_if_not_found=False
) as worker:
await worker.sync_with_backend()
+ assert worker.backend_id is None
+ logger = worker.get_flow_run_logger(flow_run)
+
+ assert logger.name == "prefect.flow_runs.worker"
+ assert logger.extra == {
+ "flow_run_name": flow_run.name,
+ "flow_run_id": str(flow_run.id),
+ "flow_name": "",
+ "worker_name": "test",
+ "work_pool_name": work_pool.name,
+ "work_pool_id": str(work_pool.id),
+ }
+
+
+async def test_get_flow_run_logger_with_worker_id_set(
+ prefect_client: PrefectClient,
+ worker_deployment_wq1,
+ work_pool,
+):
+ flow_run = await prefect_client.create_flow_run_from_deployment(
+ worker_deployment_wq1.id
+ )
+
+ async with WorkerTestImpl(
+ name="test", work_pool_name=work_pool.name, create_pool_if_not_found=False
+ ) as worker:
+ await worker.sync_with_backend()
+ worker_id = uuid.uuid4()
+ worker.backend_id = worker_id
logger = worker.get_flow_run_logger(flow_run)
assert logger.name == "prefect.flow_runs.worker"
@@ -1509,6 +1680,7 @@ async def test_get_flow_run_logger(
"worker_name": "test",
"work_pool_name": work_pool.name,
"work_pool_id": str(work_pool.id),
+ "worker_id": str(worker_id),
}
@@ -1698,6 +1870,12 @@ def create_run_with_deployment(state):
{"another-var": "boo"},
{"test-var": "foo", "another-var": "boo"},
),
+ (
+ {"A": "1", "B": "2"},
+ {"A": "1", "B": "3"},
+ {},
+ {"A": "1", "B": "3"},
+ ),
(
{"A": "1", "B": "2"},
{"C": "3", "D": "4"},
@@ -1712,14 +1890,15 @@ def create_run_with_deployment(state):
),
(
{"A": "1", "B": "2"},
- {"B": ""}, # will be treated as unset and not apply
+ {"B": ""}, # empty strings are considered values and will still override
{},
- {"A": "1", "B": "2"},
+ {"A": "1", "B": ""},
),
],
ids=[
"flow_run_into_deployment",
- "deployment_into_work_pool",
+ "deployment_into_work_pool_overlap",
+ "deployment_into_work_pool_no_overlap",
"flow_run_into_work_pool",
"try_overwrite_with_empty_str",
],
@@ -1798,3 +1977,164 @@ async def test_env_merge_logic_is_deep(
for key, value in expected_env.items():
assert config.env[key] == value
+
+
+class TestBaseWorkerHeartbeat:
+ async def test_worker_heartbeat_sends_integrations(
+ self, work_pool, hosted_api_server
+ ):
+ async with WorkerTestImpl(work_pool_name=work_pool.name) as worker:
+ await worker.start(run_once=True)
+ with mock.patch(
+ "prefect.workers.base.load_prefect_collections"
+ ) as mock_load_prefect_collections, mock.patch(
+ "prefect.client.orchestration.PrefectHttpxAsyncClient.post"
+ ) as mock_send_worker_heartbeat_post, mock.patch(
+ "prefect.workers.base.distributions"
+ ) as mock_distributions:
+ mock_load_prefect_collections.return_value = {
+ "prefect_aws": "1.0.0",
+ }
+ mock_distributions.return_value = [
+ mock.MagicMock(
+ metadata={"Name": "prefect-aws"},
+ version="1.0.0",
+ )
+ ]
+
+ async with get_client() as client:
+ worker._client = client
+ worker._client.server_type = ServerType.CLOUD
+ await worker.sync_with_backend()
+
+ mock_send_worker_heartbeat_post.assert_called_once_with(
+ f"/work_pools/{work_pool.name}/workers/heartbeat",
+ json={
+ "name": worker.name,
+ "heartbeat_interval_seconds": worker.heartbeat_interval_seconds,
+ "metadata": {
+ "integrations": [
+ {"name": "prefect-aws", "version": "1.0.0"}
+ ]
+ },
+ "return_id": True,
+ },
+ )
+
+ assert worker._worker_metadata_sent
+
+ async def test_custom_worker_can_send_arbitrary_metadata(
+ self, work_pool, hosted_api_server
+ ):
+ class CustomWorker(BaseWorker):
+ type: str = "test-custom-metadata"
+ job_configuration: Type[BaseJobConfiguration] = BaseJobConfiguration
+
+ async def run(self):
+ pass
+
+ async def _worker_metadata(self) -> WorkerMetadata:
+ return WorkerMetadata(
+ **{
+ "integrations": [{"name": "prefect-aws", "version": "1.0.0"}],
+ "custom_field": "heya",
+ }
+ )
+
+ async with CustomWorker(work_pool_name=work_pool.name) as worker:
+ await worker.start(run_once=True)
+ with mock.patch(
+ "prefect.workers.base.load_prefect_collections"
+ ) as mock_load_prefect_collections, mock.patch(
+ "prefect.client.orchestration.PrefectHttpxAsyncClient.post"
+ ) as mock_send_worker_heartbeat_post, mock.patch(
+ "prefect.workers.base.distributions"
+ ) as mock_distributions:
+ mock_load_prefect_collections.return_value = {
+ "prefect_aws": "1.0.0",
+ }
+ mock_distributions.return_value = [
+ mock.MagicMock(
+ metadata={"Name": "prefect-aws"},
+ version="1.0.0",
+ )
+ ]
+
+ async with get_client() as client:
+ worker._client = client
+ worker._client.server_type = ServerType.CLOUD
+ await worker.sync_with_backend()
+
+ mock_send_worker_heartbeat_post.assert_called_once_with(
+ f"/work_pools/{work_pool.name}/workers/heartbeat",
+ json={
+ "name": worker.name,
+ "heartbeat_interval_seconds": worker.heartbeat_interval_seconds,
+ "metadata": {
+ "integrations": [
+ {"name": "prefect-aws", "version": "1.0.0"}
+ ],
+ "custom_field": "heya",
+ },
+ "return_id": True,
+ },
+ )
+
+ assert worker._worker_metadata_sent
+
+
+async def test_worker_gives_labels_to_flow_runs_when_using_cloud_api(
+ prefect_client: PrefectClient, worker_deployment_wq1, work_pool
+):
+ CloudClientMock = AsyncMock()
+
+ def create_run_with_deployment(state):
+ return prefect_client.create_flow_run_from_deployment(
+ worker_deployment_wq1.id, state=state
+ )
+
+ flow_run = await create_run_with_deployment(
+ Scheduled(scheduled_time=pendulum.now("utc").subtract(days=1))
+ )
+
+ async with WorkerTestImpl(work_pool_name=work_pool.name) as worker:
+ assert worker._client is not None
+ worker._client.server_type = ServerType.CLOUD
+ worker._cloud_client = CloudClientMock
+
+ worker._work_pool = work_pool
+ worker.run = AsyncMock()
+
+ await worker.get_and_submit_flow_runs()
+
+ CloudClientMock.update_flow_run_labels.assert_awaited_once_with(
+ flow_run.id,
+ {"prefect.worker.name": worker.name, "prefect.worker.type": worker.type},
+ )
+
+
+async def test_worker_does_not_give_labels_to_flow_runs_when_not_using_cloud_api(
+ prefect_client: PrefectClient, worker_deployment_wq1, work_pool
+):
+ update_labels_mock = AsyncMock()
+
+ def create_run_with_deployment(state):
+ return prefect_client.create_flow_run_from_deployment(
+ worker_deployment_wq1.id, state=state
+ )
+
+ await create_run_with_deployment(
+ Scheduled(scheduled_time=pendulum.now("utc").subtract(days=1))
+ )
+
+ async with WorkerTestImpl(work_pool_name=work_pool.name) as worker:
+ assert worker._client is not None
+ worker._client.server_type = ServerType.SERVER # Not cloud
+ worker._client.update_flow_run_labels = update_labels_mock
+
+ worker._work_pool = work_pool
+ worker.run = AsyncMock()
+
+ await worker.get_and_submit_flow_runs()
+
+ update_labels_mock.assert_not_awaited()
diff --git a/ui-v2/.gitignore b/ui-v2/.gitignore
new file mode 100644
index 000000000000..01da00be70af
--- /dev/null
+++ b/ui-v2/.gitignore
@@ -0,0 +1,29 @@
+# Logs
+logs
+*.log
+npm-debug.log*
+yarn-debug.log*
+yarn-error.log*
+pnpm-debug.log*
+lerna-debug.log*
+
+node_modules
+dist
+dist-ssr
+*.local
+
+# Editor directories and files
+.vscode/*
+!.vscode/extensions.json
+.idea
+.DS_Store
+*.suo
+*.ntvs*
+*.njsproj
+*.sln
+*.sw?
+
+# Generated files
+oss_schema.json
+*.tsbuildinfo
+*storybook.log
diff --git a/ui-v2/.husky/pre-commit b/ui-v2/.husky/pre-commit
new file mode 100644
index 000000000000..9cb33f011be4
--- /dev/null
+++ b/ui-v2/.husky/pre-commit
@@ -0,0 +1,3 @@
+cd ui-v2
+npm run lint
+npm run format
diff --git a/ui-v2/.storybook/main.ts b/ui-v2/.storybook/main.ts
new file mode 100644
index 000000000000..a77d8032e856
--- /dev/null
+++ b/ui-v2/.storybook/main.ts
@@ -0,0 +1,10 @@
+import type { StorybookConfig } from "@storybook/react-vite";
+
+export default {
+ stories: ["../src/**/*.mdx", "../src/**/*.stories.@(js|jsx|mjs|ts|tsx)"],
+ addons: ["@storybook/addon-essentials", "@storybook/addon-interactions"],
+ framework: {
+ name: "@storybook/react-vite",
+ options: {},
+ },
+} satisfies StorybookConfig;
diff --git a/ui-v2/.storybook/preview.ts b/ui-v2/.storybook/preview.ts
new file mode 100644
index 000000000000..c18b2ea665d7
--- /dev/null
+++ b/ui-v2/.storybook/preview.ts
@@ -0,0 +1,14 @@
+import type { Preview } from "@storybook/react";
+
+import "../src/index.css";
+
+export default {
+ parameters: {
+ controls: {
+ matchers: {
+ color: /(background|color)$/i,
+ date: /Date$/i,
+ },
+ },
+ },
+} satisfies Preview;
diff --git a/ui-v2/README.md b/ui-v2/README.md
new file mode 100644
index 000000000000..917233551f94
--- /dev/null
+++ b/ui-v2/README.md
@@ -0,0 +1,73 @@
+# Prefect UI
+
+## Project setup
+
+```
+npm ci
+```
+
+### Compiles and hot-reloads for development
+
+```
+npm run dev
+```
+
+### Compiles and minifies for production
+
+```
+npm run build
+```
+
+### Lints and fixes files
+
+```
+npm run lint
+```
+This template provides a minimal setup to get React working in Vite with HMR and some ESLint rules.
+
+Currently, two official plugins are available:
+
+- [@vitejs/plugin-react](https://github.com/vitejs/vite-plugin-react/blob/main/packages/plugin-react/README.md) uses [Babel](https://babeljs.io/) for Fast Refresh
+- [@vitejs/plugin-react-swc](https://github.com/vitejs/vite-plugin-react-swc) uses [SWC](https://swc.rs/) for Fast Refresh
+
+## Expanding the ESLint configuration
+
+If you are developing a production application, we recommend updating the configuration to enable type aware lint rules:
+
+- Configure the top-level `parserOptions` property like this:
+
+```js
+export default tseslint.config({
+ languageOptions: {
+ // other options...
+ parserOptions: {
+ project: ['./tsconfig.node.json', './tsconfig.app.json'],
+ tsconfigRootDir: import.meta.dirname,
+ },
+ },
+})
+```
+
+- Replace `tseslint.configs.recommended` to `tseslint.configs.recommendedTypeChecked` or `tseslint.configs.strictTypeChecked`
+- Optionally add `...tseslint.configs.stylisticTypeChecked`
+- Install [eslint-plugin-react](https://github.com/jsx-eslint/eslint-plugin-react) and update the config:
+
+```js
+// eslint.config.js
+import react from 'eslint-plugin-react'
+
+export default tseslint.config({
+ // Set the react version
+ settings: { react: { version: '18.3' } },
+ plugins: {
+ // Add the react plugin
+ react,
+ },
+ rules: {
+ // other rules...
+ // Enable its recommended rules
+ ...react.configs.recommended.rules,
+ ...react.configs['jsx-runtime'].rules,
+ },
+})
+```
diff --git a/ui-v2/biome.json b/ui-v2/biome.json
new file mode 100644
index 000000000000..4af4c5e34f5c
--- /dev/null
+++ b/ui-v2/biome.json
@@ -0,0 +1,30 @@
+{
+ "$schema": "https://biomejs.dev/schemas/1.9.4/schema.json",
+ "vcs": {
+ "enabled": false,
+ "clientKind": "git",
+ "useIgnoreFile": false
+ },
+ "files": {
+ "ignoreUnknown": false,
+ "ignore": []
+ },
+ "formatter": {
+ "enabled": true,
+ "indentStyle": "tab"
+ },
+ "organizeImports": {
+ "enabled": true
+ },
+ "linter": {
+ "enabled": false,
+ "rules": {
+ "recommended": true
+ }
+ },
+ "javascript": {
+ "formatter": {
+ "quoteStyle": "double"
+ }
+ }
+}
diff --git a/ui-v2/components.json b/ui-v2/components.json
new file mode 100644
index 000000000000..eaa263eb3e76
--- /dev/null
+++ b/ui-v2/components.json
@@ -0,0 +1,20 @@
+{
+ "$schema": "https://ui.shadcn.com/schema.json",
+ "style": "new-york",
+ "rsc": false,
+ "tsx": true,
+ "tailwind": {
+ "config": "tailwind.config.js",
+ "css": "src/index.css",
+ "baseColor": "neutral",
+ "cssVariables": true,
+ "prefix": ""
+ },
+ "aliases": {
+ "components": "@/components",
+ "utils": "@/lib/utils",
+ "ui": "@/components/ui",
+ "lib": "@/lib",
+ "hooks": "@/hooks"
+ }
+}
diff --git a/ui-v2/eslint.config.js b/ui-v2/eslint.config.js
new file mode 100644
index 000000000000..1be368f4b886
--- /dev/null
+++ b/ui-v2/eslint.config.js
@@ -0,0 +1,53 @@
+import js from "@eslint/js";
+import pluginRouter from "@tanstack/eslint-plugin-router";
+import react from "eslint-plugin-react";
+import reactHooks from "eslint-plugin-react-hooks";
+import reactRefresh from "eslint-plugin-react-refresh";
+import globals from "globals";
+import tseslint from "typescript-eslint";
+import testingLibrary from "eslint-plugin-testing-library";
+import jestDom from "eslint-plugin-jest-dom";
+
+export default tseslint.config(
+ { ignores: ["dist", "src/api/prefect.ts"] },
+ {
+ extends: [
+ js.configs.recommended,
+ ...tseslint.configs.recommendedTypeChecked,
+ ],
+ files: ["**/*.{ts,tsx}"],
+ languageOptions: {
+ ecmaVersion: 2020,
+ globals: globals.browser,
+ parserOptions: {
+ project: ["./tsconfig.node.json", "./tsconfig.app.json"],
+ tsconfigRootDir: import.meta.dirname,
+ },
+ },
+ settings: {
+ react: {
+ version: "18.3",
+ },
+ },
+ plugins: {
+ react,
+ "react-hooks": reactHooks,
+ "react-refresh": reactRefresh,
+ },
+ rules: {
+ ...reactHooks.configs.recommended.rules,
+ "react-refresh/only-export-components": [
+ "warn",
+ { allowConstantExport: true },
+ ],
+ ...react.configs.recommended.rules,
+ ...react.configs["jsx-runtime"].rules,
+ },
+ },
+ ...pluginRouter.configs["flat/recommended"],
+ {
+ files: ["tests/**/*.{ts,tsx}"],
+ ...testingLibrary.configs["flat/react"],
+ ...jestDom.configs["flat/recommended"],
+ },
+);
diff --git a/ui-v2/index.html b/ui-v2/index.html
new file mode 100644
index 000000000000..e382063add67
--- /dev/null
+++ b/ui-v2/index.html
@@ -0,0 +1,29 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ Prefect Server
+
+
+
+
+
+
diff --git a/ui-v2/package-lock.json b/ui-v2/package-lock.json
new file mode 100644
index 000000000000..8493d8c1bb38
--- /dev/null
+++ b/ui-v2/package-lock.json
@@ -0,0 +1,11491 @@
+{
+ "name": "prefect-ui",
+ "version": "0.0.0",
+ "lockfileVersion": 3,
+ "requires": true,
+ "packages": {
+ "": {
+ "name": "prefect-ui",
+ "version": "0.0.0",
+ "dependencies": {
+ "@codemirror/lang-json": "^6.0.1",
+ "@hookform/resolvers": "^3.9.1",
+ "@radix-ui/react-avatar": "^1.1.1",
+ "@radix-ui/react-checkbox": "^1.1.2",
+ "@radix-ui/react-dialog": "^1.1.2",
+ "@radix-ui/react-dropdown-menu": "^2.1.2",
+ "@radix-ui/react-hover-card": "^1.1.2",
+ "@radix-ui/react-icons": "^1.3.0",
+ "@radix-ui/react-label": "^2.1.0",
+ "@radix-ui/react-popover": "^1.1.2",
+ "@radix-ui/react-scroll-area": "^1.2.0",
+ "@radix-ui/react-select": "^2.1.2",
+ "@radix-ui/react-separator": "^1.1.0",
+ "@radix-ui/react-slot": "^1.1.0",
+ "@radix-ui/react-tabs": "^1.1.1",
+ "@radix-ui/react-toast": "^1.2.2",
+ "@radix-ui/react-tooltip": "^1.1.3",
+ "@tanstack/react-query": "^5.56.2",
+ "@tanstack/react-table": "^8.20.5",
+ "@tanstack/router-zod-adapter": "^1.58.16",
+ "@uiw/react-codemirror": "^4.23.6",
+ "class-variance-authority": "^0.7.0",
+ "clsx": "^2.1.1",
+ "date-fns": "^3.6.0",
+ "date-fns-tz": "^3.2.0",
+ "lucide-react": "^0.447.0",
+ "openapi-fetch": "^0.12.2",
+ "react": "^18.3.1",
+ "react-day-picker": "^8.10.1",
+ "react-dom": "^18.3.1",
+ "react-hook-form": "^7.53.1",
+ "recharts": "^2.12.7",
+ "tailwind-merge": "^2.5.2",
+ "tailwindcss-animate": "^1.0.7",
+ "zod": "^3.23.8"
+ },
+ "devDependencies": {
+ "@biomejs/biome": "1.9.4",
+ "@eslint/js": "^9.12.0",
+ "@storybook/addon-essentials": "^8.4.2",
+ "@storybook/addon-interactions": "^8.4.2",
+ "@storybook/blocks": "^8.4.2",
+ "@storybook/react": "^8.4.2",
+ "@storybook/react-vite": "^8.4.2",
+ "@storybook/test": "^8.4.2",
+ "@tanstack/eslint-plugin-router": "^1.77.7",
+ "@tanstack/router-devtools": "^1.58.15",
+ "@tanstack/router-plugin": "^1.58.12",
+ "@testing-library/dom": "^10.4.0",
+ "@testing-library/jest-dom": "^6.6.3",
+ "@testing-library/react": "^16.0.1",
+ "@testing-library/user-event": "^14.5.2",
+ "@types/node": "^22.7.4",
+ "@types/react": "^18.3.12",
+ "@types/react-dom": "^18.3.1",
+ "@vitejs/plugin-react-swc": "^3.5.0",
+ "autoprefixer": "^10.4.20",
+ "eslint": "^9.12.0",
+ "eslint-plugin-jest-dom": "^5.4.0",
+ "eslint-plugin-react": "^7.37.1",
+ "eslint-plugin-react-hooks": "^5.1.0-rc.0",
+ "eslint-plugin-react-refresh": "^0.4.9",
+ "eslint-plugin-storybook": "^0.11.0",
+ "eslint-plugin-testing-library": "^6.4.0",
+ "eslint-plugin-unused-imports": "^4.1.4",
+ "globals": "^15.10.0",
+ "husky": "^9.1.6",
+ "jsdom": "^25.0.1",
+ "msw": "^2.6.0",
+ "postcss": "^8.4.47",
+ "storybook": "^8.4.2",
+ "tailwindcss": "^3.4.13",
+ "typescript": "^5.5.3",
+ "typescript-eslint": "^8.8.1",
+ "vite": "^5.4.1",
+ "vitest": "^2.1.4"
+ }
+ },
+ "node_modules/@adobe/css-tools": {
+ "version": "4.4.0",
+ "resolved": "https://registry.npmjs.org/@adobe/css-tools/-/css-tools-4.4.0.tgz",
+ "integrity": "sha512-Ff9+ksdQQB3rMncgqDK78uLznstjyfIf2Arnh22pW8kBpLs6rpKDwgnZT46hin5Hl1WzazzK64DOrhSwYpS7bQ==",
+ "dev": true
+ },
+ "node_modules/@alloc/quick-lru": {
+ "version": "5.2.0",
+ "resolved": "https://registry.npmjs.org/@alloc/quick-lru/-/quick-lru-5.2.0.tgz",
+ "integrity": "sha512-UrcABB+4bUrFABwbluTIBErXwvbsU/V7TZWfmbgJfbkwiBuziS9gxdODUyuiecfdGQ85jglMW6juS3+z5TsKLw==",
+ "engines": {
+ "node": ">=10"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/@ampproject/remapping": {
+ "version": "2.3.0",
+ "resolved": "https://registry.npmjs.org/@ampproject/remapping/-/remapping-2.3.0.tgz",
+ "integrity": "sha512-30iZtAPgz+LTIYoeivqYo853f02jBYSd5uGnGpkFV0M3xOt9aN73erkgYAmZU43x4VfqcnLxW9Kpg3R5LC4YYw==",
+ "dev": true,
+ "dependencies": {
+ "@jridgewell/gen-mapping": "^0.3.5",
+ "@jridgewell/trace-mapping": "^0.3.24"
+ },
+ "engines": {
+ "node": ">=6.0.0"
+ }
+ },
+ "node_modules/@babel/code-frame": {
+ "version": "7.26.2",
+ "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.26.2.tgz",
+ "integrity": "sha512-RJlIHRueQgwWitWgF8OdFYGZX328Ax5BCemNGlqHfplnRT9ESi8JkFlvaVYbS+UubVY6dpv87Fs2u5M29iNFVQ==",
+ "dev": true,
+ "dependencies": {
+ "@babel/helper-validator-identifier": "^7.25.9",
+ "js-tokens": "^4.0.0",
+ "picocolors": "^1.0.0"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/@babel/compat-data": {
+ "version": "7.26.2",
+ "resolved": "https://registry.npmjs.org/@babel/compat-data/-/compat-data-7.26.2.tgz",
+ "integrity": "sha512-Z0WgzSEa+aUcdiJuCIqgujCshpMWgUpgOxXotrYPSA53hA3qopNaqcJpyr0hVb1FeWdnqFA35/fUtXgBK8srQg==",
+ "dev": true,
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/@babel/core": {
+ "version": "7.26.0",
+ "resolved": "https://registry.npmjs.org/@babel/core/-/core-7.26.0.tgz",
+ "integrity": "sha512-i1SLeK+DzNnQ3LL/CswPCa/E5u4lh1k6IAEphON8F+cXt0t9euTshDru0q7/IqMa1PMPz5RnHuHscF8/ZJsStg==",
+ "dev": true,
+ "dependencies": {
+ "@ampproject/remapping": "^2.2.0",
+ "@babel/code-frame": "^7.26.0",
+ "@babel/generator": "^7.26.0",
+ "@babel/helper-compilation-targets": "^7.25.9",
+ "@babel/helper-module-transforms": "^7.26.0",
+ "@babel/helpers": "^7.26.0",
+ "@babel/parser": "^7.26.0",
+ "@babel/template": "^7.25.9",
+ "@babel/traverse": "^7.25.9",
+ "@babel/types": "^7.26.0",
+ "convert-source-map": "^2.0.0",
+ "debug": "^4.1.0",
+ "gensync": "^1.0.0-beta.2",
+ "json5": "^2.2.3",
+ "semver": "^6.3.1"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/babel"
+ }
+ },
+ "node_modules/@babel/generator": {
+ "version": "7.26.2",
+ "resolved": "https://registry.npmjs.org/@babel/generator/-/generator-7.26.2.tgz",
+ "integrity": "sha512-zevQbhbau95nkoxSq3f/DC/SC+EEOUZd3DYqfSkMhY2/wfSeaHV1Ew4vk8e+x8lja31IbyuUa2uQ3JONqKbysw==",
+ "dev": true,
+ "dependencies": {
+ "@babel/parser": "^7.26.2",
+ "@babel/types": "^7.26.0",
+ "@jridgewell/gen-mapping": "^0.3.5",
+ "@jridgewell/trace-mapping": "^0.3.25",
+ "jsesc": "^3.0.2"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/@babel/helper-compilation-targets": {
+ "version": "7.25.9",
+ "resolved": "https://registry.npmjs.org/@babel/helper-compilation-targets/-/helper-compilation-targets-7.25.9.tgz",
+ "integrity": "sha512-j9Db8Suy6yV/VHa4qzrj9yZfZxhLWQdVnRlXxmKLYlhWUVB1sB2G5sxuWYXk/whHD9iW76PmNzxZ4UCnTQTVEQ==",
+ "dev": true,
+ "dependencies": {
+ "@babel/compat-data": "^7.25.9",
+ "@babel/helper-validator-option": "^7.25.9",
+ "browserslist": "^4.24.0",
+ "lru-cache": "^5.1.1",
+ "semver": "^6.3.1"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/@babel/helper-module-imports": {
+ "version": "7.25.9",
+ "resolved": "https://registry.npmjs.org/@babel/helper-module-imports/-/helper-module-imports-7.25.9.tgz",
+ "integrity": "sha512-tnUA4RsrmflIM6W6RFTLFSXITtl0wKjgpnLgXyowocVPrbYrLUXSBXDgTs8BlbmIzIdlBySRQjINYs2BAkiLtw==",
+ "dev": true,
+ "dependencies": {
+ "@babel/traverse": "^7.25.9",
+ "@babel/types": "^7.25.9"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/@babel/helper-module-transforms": {
+ "version": "7.26.0",
+ "resolved": "https://registry.npmjs.org/@babel/helper-module-transforms/-/helper-module-transforms-7.26.0.tgz",
+ "integrity": "sha512-xO+xu6B5K2czEnQye6BHA7DolFFmS3LB7stHZFaOLb1pAwO1HWLS8fXA+eh0A2yIvltPVmx3eNNDBJA2SLHXFw==",
+ "dev": true,
+ "dependencies": {
+ "@babel/helper-module-imports": "^7.25.9",
+ "@babel/helper-validator-identifier": "^7.25.9",
+ "@babel/traverse": "^7.25.9"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0"
+ }
+ },
+ "node_modules/@babel/helper-plugin-utils": {
+ "version": "7.25.9",
+ "resolved": "https://registry.npmjs.org/@babel/helper-plugin-utils/-/helper-plugin-utils-7.25.9.tgz",
+ "integrity": "sha512-kSMlyUVdWe25rEsRGviIgOWnoT/nfABVWlqt9N19/dIPWViAOW2s9wznP5tURbs/IDuNk4gPy3YdYRgH3uxhBw==",
+ "dev": true,
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/@babel/helper-string-parser": {
+ "version": "7.25.9",
+ "resolved": "https://registry.npmjs.org/@babel/helper-string-parser/-/helper-string-parser-7.25.9.tgz",
+ "integrity": "sha512-4A/SCr/2KLd5jrtOMFzaKjVtAei3+2r/NChoBNoZ3EyP/+GlhoaEGoWOZUmFmoITP7zOJyHIMm+DYRd8o3PvHA==",
+ "dev": true,
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/@babel/helper-validator-identifier": {
+ "version": "7.25.9",
+ "resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.25.9.tgz",
+ "integrity": "sha512-Ed61U6XJc3CVRfkERJWDz4dJwKe7iLmmJsbOGu9wSloNSFttHV0I8g6UAgb7qnK5ly5bGLPd4oXZlxCdANBOWQ==",
+ "dev": true,
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/@babel/helper-validator-option": {
+ "version": "7.25.9",
+ "resolved": "https://registry.npmjs.org/@babel/helper-validator-option/-/helper-validator-option-7.25.9.tgz",
+ "integrity": "sha512-e/zv1co8pp55dNdEcCynfj9X7nyUKUXoUEwfXqaZt0omVOmDe9oOTdKStH4GmAw6zxMFs50ZayuMfHDKlO7Tfw==",
+ "dev": true,
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/@babel/helpers": {
+ "version": "7.26.0",
+ "resolved": "https://registry.npmjs.org/@babel/helpers/-/helpers-7.26.0.tgz",
+ "integrity": "sha512-tbhNuIxNcVb21pInl3ZSjksLCvgdZy9KwJ8brv993QtIVKJBBkYXz4q4ZbAv31GdnC+R90np23L5FbEBlthAEw==",
+ "dev": true,
+ "dependencies": {
+ "@babel/template": "^7.25.9",
+ "@babel/types": "^7.26.0"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/@babel/parser": {
+ "version": "7.26.2",
+ "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.26.2.tgz",
+ "integrity": "sha512-DWMCZH9WA4Maitz2q21SRKHo9QXZxkDsbNZoVD62gusNtNBBqDg9i7uOhASfTfIGNzW+O+r7+jAlM8dwphcJKQ==",
+ "dev": true,
+ "dependencies": {
+ "@babel/types": "^7.26.0"
+ },
+ "bin": {
+ "parser": "bin/babel-parser.js"
+ },
+ "engines": {
+ "node": ">=6.0.0"
+ }
+ },
+ "node_modules/@babel/plugin-syntax-jsx": {
+ "version": "7.25.9",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-jsx/-/plugin-syntax-jsx-7.25.9.tgz",
+ "integrity": "sha512-ld6oezHQMZsZfp6pWtbjaNDF2tiiCYYDqQszHt5VV437lewP9aSi2Of99CK0D0XB21k7FLgnLcmQKyKzynfeAA==",
+ "dev": true,
+ "dependencies": {
+ "@babel/helper-plugin-utils": "^7.25.9"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-syntax-typescript": {
+ "version": "7.25.9",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-typescript/-/plugin-syntax-typescript-7.25.9.tgz",
+ "integrity": "sha512-hjMgRy5hb8uJJjUcdWunWVcoi9bGpJp8p5Ol1229PoN6aytsLwNMgmdftO23wnCLMfVmTwZDWMPNq/D1SY60JQ==",
+ "dev": true,
+ "dependencies": {
+ "@babel/helper-plugin-utils": "^7.25.9"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/runtime": {
+ "version": "7.26.0",
+ "resolved": "https://registry.npmjs.org/@babel/runtime/-/runtime-7.26.0.tgz",
+ "integrity": "sha512-FDSOghenHTiToteC/QRlv2q3DhPZ/oOXTBoirfWNx1Cx3TMVcGWQtMMmQcSvb/JjpNeGzx8Pq/b4fKEJuWm1sw==",
+ "dependencies": {
+ "regenerator-runtime": "^0.14.0"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/@babel/template": {
+ "version": "7.25.9",
+ "resolved": "https://registry.npmjs.org/@babel/template/-/template-7.25.9.tgz",
+ "integrity": "sha512-9DGttpmPvIxBb/2uwpVo3dqJ+O6RooAFOS+lB+xDqoE2PVCE8nfoHMdZLpfCQRLwvohzXISPZcgxt80xLfsuwg==",
+ "dev": true,
+ "dependencies": {
+ "@babel/code-frame": "^7.25.9",
+ "@babel/parser": "^7.25.9",
+ "@babel/types": "^7.25.9"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/@babel/traverse": {
+ "version": "7.25.9",
+ "resolved": "https://registry.npmjs.org/@babel/traverse/-/traverse-7.25.9.tgz",
+ "integrity": "sha512-ZCuvfwOwlz/bawvAuvcj8rrithP2/N55Tzz342AkTvq4qaWbGfmCk/tKhNaV2cthijKrPAA8SRJV5WWe7IBMJw==",
+ "dev": true,
+ "dependencies": {
+ "@babel/code-frame": "^7.25.9",
+ "@babel/generator": "^7.25.9",
+ "@babel/parser": "^7.25.9",
+ "@babel/template": "^7.25.9",
+ "@babel/types": "^7.25.9",
+ "debug": "^4.3.1",
+ "globals": "^11.1.0"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/@babel/traverse/node_modules/globals": {
+ "version": "11.12.0",
+ "resolved": "https://registry.npmjs.org/globals/-/globals-11.12.0.tgz",
+ "integrity": "sha512-WOBp/EEGUiIsJSp7wcv/y6MO+lV9UoncWqxuFfm8eBwzWNgyfBd6Gz+IeKQ9jCmyhoH99g15M3T+QaVHFjizVA==",
+ "dev": true,
+ "engines": {
+ "node": ">=4"
+ }
+ },
+ "node_modules/@babel/types": {
+ "version": "7.26.0",
+ "resolved": "https://registry.npmjs.org/@babel/types/-/types-7.26.0.tgz",
+ "integrity": "sha512-Z/yiTPj+lDVnF7lWeKCIJzaIkI0vYO87dMpZ4bg4TDrFe4XXLFWL1TbXU27gBP3QccxV9mZICCrnjnYlJjXHOA==",
+ "dev": true,
+ "dependencies": {
+ "@babel/helper-string-parser": "^7.25.9",
+ "@babel/helper-validator-identifier": "^7.25.9"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/@biomejs/biome": {
+ "version": "1.9.4",
+ "resolved": "https://registry.npmjs.org/@biomejs/biome/-/biome-1.9.4.tgz",
+ "integrity": "sha512-1rkd7G70+o9KkTn5KLmDYXihGoTaIGO9PIIN2ZB7UJxFrWw04CZHPYiMRjYsaDvVV7hP1dYNRLxSANLaBFGpog==",
+ "dev": true,
+ "hasInstallScript": true,
+ "bin": {
+ "biome": "bin/biome"
+ },
+ "engines": {
+ "node": ">=14.21.3"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/biome"
+ },
+ "optionalDependencies": {
+ "@biomejs/cli-darwin-arm64": "1.9.4",
+ "@biomejs/cli-darwin-x64": "1.9.4",
+ "@biomejs/cli-linux-arm64": "1.9.4",
+ "@biomejs/cli-linux-arm64-musl": "1.9.4",
+ "@biomejs/cli-linux-x64": "1.9.4",
+ "@biomejs/cli-linux-x64-musl": "1.9.4",
+ "@biomejs/cli-win32-arm64": "1.9.4",
+ "@biomejs/cli-win32-x64": "1.9.4"
+ }
+ },
+ "node_modules/@biomejs/cli-darwin-arm64": {
+ "version": "1.9.4",
+ "resolved": "https://registry.npmjs.org/@biomejs/cli-darwin-arm64/-/cli-darwin-arm64-1.9.4.tgz",
+ "integrity": "sha512-bFBsPWrNvkdKrNCYeAp+xo2HecOGPAy9WyNyB/jKnnedgzl4W4Hb9ZMzYNbf8dMCGmUdSavlYHiR01QaYR58cw==",
+ "cpu": [
+ "arm64"
+ ],
+ "dev": true,
+ "optional": true,
+ "os": [
+ "darwin"
+ ],
+ "engines": {
+ "node": ">=14.21.3"
+ }
+ },
+ "node_modules/@biomejs/cli-darwin-x64": {
+ "version": "1.9.4",
+ "resolved": "https://registry.npmjs.org/@biomejs/cli-darwin-x64/-/cli-darwin-x64-1.9.4.tgz",
+ "integrity": "sha512-ngYBh/+bEedqkSevPVhLP4QfVPCpb+4BBe2p7Xs32dBgs7rh9nY2AIYUL6BgLw1JVXV8GlpKmb/hNiuIxfPfZg==",
+ "cpu": [
+ "x64"
+ ],
+ "dev": true,
+ "optional": true,
+ "os": [
+ "darwin"
+ ],
+ "engines": {
+ "node": ">=14.21.3"
+ }
+ },
+ "node_modules/@biomejs/cli-linux-arm64": {
+ "version": "1.9.4",
+ "resolved": "https://registry.npmjs.org/@biomejs/cli-linux-arm64/-/cli-linux-arm64-1.9.4.tgz",
+ "integrity": "sha512-fJIW0+LYujdjUgJJuwesP4EjIBl/N/TcOX3IvIHJQNsAqvV2CHIogsmA94BPG6jZATS4Hi+xv4SkBBQSt1N4/g==",
+ "cpu": [
+ "arm64"
+ ],
+ "dev": true,
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "engines": {
+ "node": ">=14.21.3"
+ }
+ },
+ "node_modules/@biomejs/cli-linux-arm64-musl": {
+ "version": "1.9.4",
+ "resolved": "https://registry.npmjs.org/@biomejs/cli-linux-arm64-musl/-/cli-linux-arm64-musl-1.9.4.tgz",
+ "integrity": "sha512-v665Ct9WCRjGa8+kTr0CzApU0+XXtRgwmzIf1SeKSGAv+2scAlW6JR5PMFo6FzqqZ64Po79cKODKf3/AAmECqA==",
+ "cpu": [
+ "arm64"
+ ],
+ "dev": true,
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "engines": {
+ "node": ">=14.21.3"
+ }
+ },
+ "node_modules/@biomejs/cli-linux-x64": {
+ "version": "1.9.4",
+ "resolved": "https://registry.npmjs.org/@biomejs/cli-linux-x64/-/cli-linux-x64-1.9.4.tgz",
+ "integrity": "sha512-lRCJv/Vi3Vlwmbd6K+oQ0KhLHMAysN8lXoCI7XeHlxaajk06u7G+UsFSO01NAs5iYuWKmVZjmiOzJ0OJmGsMwg==",
+ "cpu": [
+ "x64"
+ ],
+ "dev": true,
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "engines": {
+ "node": ">=14.21.3"
+ }
+ },
+ "node_modules/@biomejs/cli-linux-x64-musl": {
+ "version": "1.9.4",
+ "resolved": "https://registry.npmjs.org/@biomejs/cli-linux-x64-musl/-/cli-linux-x64-musl-1.9.4.tgz",
+ "integrity": "sha512-gEhi/jSBhZ2m6wjV530Yy8+fNqG8PAinM3oV7CyO+6c3CEh16Eizm21uHVsyVBEB6RIM8JHIl6AGYCv6Q6Q9Tg==",
+ "cpu": [
+ "x64"
+ ],
+ "dev": true,
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "engines": {
+ "node": ">=14.21.3"
+ }
+ },
+ "node_modules/@biomejs/cli-win32-arm64": {
+ "version": "1.9.4",
+ "resolved": "https://registry.npmjs.org/@biomejs/cli-win32-arm64/-/cli-win32-arm64-1.9.4.tgz",
+ "integrity": "sha512-tlbhLk+WXZmgwoIKwHIHEBZUwxml7bRJgk0X2sPyNR3S93cdRq6XulAZRQJ17FYGGzWne0fgrXBKpl7l4M87Hg==",
+ "cpu": [
+ "arm64"
+ ],
+ "dev": true,
+ "optional": true,
+ "os": [
+ "win32"
+ ],
+ "engines": {
+ "node": ">=14.21.3"
+ }
+ },
+ "node_modules/@biomejs/cli-win32-x64": {
+ "version": "1.9.4",
+ "resolved": "https://registry.npmjs.org/@biomejs/cli-win32-x64/-/cli-win32-x64-1.9.4.tgz",
+ "integrity": "sha512-8Y5wMhVIPaWe6jw2H+KlEm4wP/f7EW3810ZLmDlrEEy5KvBsb9ECEfu/kMWD484ijfQ8+nIi0giMgu9g1UAuuA==",
+ "cpu": [
+ "x64"
+ ],
+ "dev": true,
+ "optional": true,
+ "os": [
+ "win32"
+ ],
+ "engines": {
+ "node": ">=14.21.3"
+ }
+ },
+ "node_modules/@bundled-es-modules/cookie": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/@bundled-es-modules/cookie/-/cookie-2.0.1.tgz",
+ "integrity": "sha512-8o+5fRPLNbjbdGRRmJj3h6Hh1AQJf2dk3qQ/5ZFb+PXkRNiSoMGGUKlsgLfrxneb72axVJyIYji64E2+nNfYyw==",
+ "dev": true,
+ "dependencies": {
+ "cookie": "^0.7.2"
+ }
+ },
+ "node_modules/@bundled-es-modules/statuses": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/@bundled-es-modules/statuses/-/statuses-1.0.1.tgz",
+ "integrity": "sha512-yn7BklA5acgcBr+7w064fGV+SGIFySjCKpqjcWgBAIfrAkY+4GQTJJHQMeT3V/sgz23VTEVV8TtOmkvJAhFVfg==",
+ "dev": true,
+ "dependencies": {
+ "statuses": "^2.0.1"
+ }
+ },
+ "node_modules/@bundled-es-modules/tough-cookie": {
+ "version": "0.1.6",
+ "resolved": "https://registry.npmjs.org/@bundled-es-modules/tough-cookie/-/tough-cookie-0.1.6.tgz",
+ "integrity": "sha512-dvMHbL464C0zI+Yqxbz6kZ5TOEp7GLW+pry/RWndAR8MJQAXZ2rPmIs8tziTZjeIyhSNZgZbCePtfSbdWqStJw==",
+ "dev": true,
+ "dependencies": {
+ "@types/tough-cookie": "^4.0.5",
+ "tough-cookie": "^4.1.4"
+ }
+ },
+ "node_modules/@bundled-es-modules/tough-cookie/node_modules/tough-cookie": {
+ "version": "4.1.4",
+ "resolved": "https://registry.npmjs.org/tough-cookie/-/tough-cookie-4.1.4.tgz",
+ "integrity": "sha512-Loo5UUvLD9ScZ6jh8beX1T6sO1w2/MpCRpEP7V280GKMVUQ0Jzar2U3UJPsrdbziLEMMhu3Ujnq//rhiFuIeag==",
+ "dev": true,
+ "dependencies": {
+ "psl": "^1.1.33",
+ "punycode": "^2.1.1",
+ "universalify": "^0.2.0",
+ "url-parse": "^1.5.3"
+ },
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/@codemirror/autocomplete": {
+ "version": "6.18.2",
+ "resolved": "https://registry.npmjs.org/@codemirror/autocomplete/-/autocomplete-6.18.2.tgz",
+ "integrity": "sha512-wJGylKtMFR/Ds6Gh01+OovXE/pncPiKZNNBKuC39pKnH+XK5d9+WsNqcrdxPjFPFTigRBqse0rfxw9UxrfyhPg==",
+ "dependencies": {
+ "@codemirror/language": "^6.0.0",
+ "@codemirror/state": "^6.0.0",
+ "@codemirror/view": "^6.17.0",
+ "@lezer/common": "^1.0.0"
+ },
+ "peerDependencies": {
+ "@codemirror/language": "^6.0.0",
+ "@codemirror/state": "^6.0.0",
+ "@codemirror/view": "^6.0.0",
+ "@lezer/common": "^1.0.0"
+ }
+ },
+ "node_modules/@codemirror/commands": {
+ "version": "6.7.1",
+ "resolved": "https://registry.npmjs.org/@codemirror/commands/-/commands-6.7.1.tgz",
+ "integrity": "sha512-llTrboQYw5H4THfhN4U3qCnSZ1SOJ60ohhz+SzU0ADGtwlc533DtklQP0vSFaQuCPDn3BPpOd1GbbnUtwNjsrw==",
+ "dependencies": {
+ "@codemirror/language": "^6.0.0",
+ "@codemirror/state": "^6.4.0",
+ "@codemirror/view": "^6.27.0",
+ "@lezer/common": "^1.1.0"
+ }
+ },
+ "node_modules/@codemirror/lang-json": {
+ "version": "6.0.1",
+ "resolved": "https://registry.npmjs.org/@codemirror/lang-json/-/lang-json-6.0.1.tgz",
+ "integrity": "sha512-+T1flHdgpqDDlJZ2Lkil/rLiRy684WMLc74xUnjJH48GQdfJo/pudlTRreZmKwzP8/tGdKf83wlbAdOCzlJOGQ==",
+ "dependencies": {
+ "@codemirror/language": "^6.0.0",
+ "@lezer/json": "^1.0.0"
+ }
+ },
+ "node_modules/@codemirror/language": {
+ "version": "6.10.3",
+ "resolved": "https://registry.npmjs.org/@codemirror/language/-/language-6.10.3.tgz",
+ "integrity": "sha512-kDqEU5sCP55Oabl6E7m5N+vZRoc0iWqgDVhEKifcHzPzjqCegcO4amfrYVL9PmPZpl4G0yjkpTpUO/Ui8CzO8A==",
+ "dependencies": {
+ "@codemirror/state": "^6.0.0",
+ "@codemirror/view": "^6.23.0",
+ "@lezer/common": "^1.1.0",
+ "@lezer/highlight": "^1.0.0",
+ "@lezer/lr": "^1.0.0",
+ "style-mod": "^4.0.0"
+ }
+ },
+ "node_modules/@codemirror/lint": {
+ "version": "6.8.2",
+ "resolved": "https://registry.npmjs.org/@codemirror/lint/-/lint-6.8.2.tgz",
+ "integrity": "sha512-PDFG5DjHxSEjOXk9TQYYVjZDqlZTFaDBfhQixHnQOEVDDNHUbEh/hstAjcQJaA6FQdZTD1hquXTK0rVBLADR1g==",
+ "dependencies": {
+ "@codemirror/state": "^6.0.0",
+ "@codemirror/view": "^6.0.0",
+ "crelt": "^1.0.5"
+ }
+ },
+ "node_modules/@codemirror/search": {
+ "version": "6.5.7",
+ "resolved": "https://registry.npmjs.org/@codemirror/search/-/search-6.5.7.tgz",
+ "integrity": "sha512-6+iLsXvITWKHYlkgHPCs/qiX4dNzn8N78YfhOFvPtPYCkuXqZq10rAfsUMhOq7O/1VjJqdXRflyExlfVcu/9VQ==",
+ "dependencies": {
+ "@codemirror/state": "^6.0.0",
+ "@codemirror/view": "^6.0.0",
+ "crelt": "^1.0.5"
+ }
+ },
+ "node_modules/@codemirror/state": {
+ "version": "6.4.1",
+ "resolved": "https://registry.npmjs.org/@codemirror/state/-/state-6.4.1.tgz",
+ "integrity": "sha512-QkEyUiLhsJoZkbumGZlswmAhA7CBU02Wrz7zvH4SrcifbsqwlXShVXg65f3v/ts57W3dqyamEriMhij1Z3Zz4A=="
+ },
+ "node_modules/@codemirror/theme-one-dark": {
+ "version": "6.1.2",
+ "resolved": "https://registry.npmjs.org/@codemirror/theme-one-dark/-/theme-one-dark-6.1.2.tgz",
+ "integrity": "sha512-F+sH0X16j/qFLMAfbciKTxVOwkdAS336b7AXTKOZhy8BR3eH/RelsnLgLFINrpST63mmN2OuwUt0W2ndUgYwUA==",
+ "dependencies": {
+ "@codemirror/language": "^6.0.0",
+ "@codemirror/state": "^6.0.0",
+ "@codemirror/view": "^6.0.0",
+ "@lezer/highlight": "^1.0.0"
+ }
+ },
+ "node_modules/@codemirror/view": {
+ "version": "6.34.2",
+ "resolved": "https://registry.npmjs.org/@codemirror/view/-/view-6.34.2.tgz",
+ "integrity": "sha512-d6n0WFvL970A9Z+l9N2dO+Hk9ev4hDYQzIx+B9tCyBP0W5wPEszi1rhuyFesNSkLZzXbQE5FPH7F/z/TMJfoPA==",
+ "dependencies": {
+ "@codemirror/state": "^6.4.0",
+ "style-mod": "^4.1.0",
+ "w3c-keyname": "^2.2.4"
+ }
+ },
+ "node_modules/@esbuild/aix-ppc64": {
+ "version": "0.23.1",
+ "resolved": "https://registry.npmjs.org/@esbuild/aix-ppc64/-/aix-ppc64-0.23.1.tgz",
+ "integrity": "sha512-6VhYk1diRqrhBAqpJEdjASR/+WVRtfjpqKuNw11cLiaWpAT/Uu+nokB+UJnevzy/P9C/ty6AOe0dwueMrGh/iQ==",
+ "cpu": [
+ "ppc64"
+ ],
+ "dev": true,
+ "optional": true,
+ "os": [
+ "aix"
+ ],
+ "engines": {
+ "node": ">=18"
+ }
+ },
+ "node_modules/@esbuild/android-arm": {
+ "version": "0.23.1",
+ "resolved": "https://registry.npmjs.org/@esbuild/android-arm/-/android-arm-0.23.1.tgz",
+ "integrity": "sha512-uz6/tEy2IFm9RYOyvKl88zdzZfwEfKZmnX9Cj1BHjeSGNuGLuMD1kR8y5bteYmwqKm1tj8m4cb/aKEorr6fHWQ==",
+ "cpu": [
+ "arm"
+ ],
+ "dev": true,
+ "optional": true,
+ "os": [
+ "android"
+ ],
+ "engines": {
+ "node": ">=18"
+ }
+ },
+ "node_modules/@esbuild/android-arm64": {
+ "version": "0.23.1",
+ "resolved": "https://registry.npmjs.org/@esbuild/android-arm64/-/android-arm64-0.23.1.tgz",
+ "integrity": "sha512-xw50ipykXcLstLeWH7WRdQuysJqejuAGPd30vd1i5zSyKK3WE+ijzHmLKxdiCMtH1pHz78rOg0BKSYOSB/2Khw==",
+ "cpu": [
+ "arm64"
+ ],
+ "dev": true,
+ "optional": true,
+ "os": [
+ "android"
+ ],
+ "engines": {
+ "node": ">=18"
+ }
+ },
+ "node_modules/@esbuild/android-x64": {
+ "version": "0.23.1",
+ "resolved": "https://registry.npmjs.org/@esbuild/android-x64/-/android-x64-0.23.1.tgz",
+ "integrity": "sha512-nlN9B69St9BwUoB+jkyU090bru8L0NA3yFvAd7k8dNsVH8bi9a8cUAUSEcEEgTp2z3dbEDGJGfP6VUnkQnlReg==",
+ "cpu": [
+ "x64"
+ ],
+ "dev": true,
+ "optional": true,
+ "os": [
+ "android"
+ ],
+ "engines": {
+ "node": ">=18"
+ }
+ },
+ "node_modules/@esbuild/darwin-arm64": {
+ "version": "0.23.1",
+ "resolved": "https://registry.npmjs.org/@esbuild/darwin-arm64/-/darwin-arm64-0.23.1.tgz",
+ "integrity": "sha512-YsS2e3Wtgnw7Wq53XXBLcV6JhRsEq8hkfg91ESVadIrzr9wO6jJDMZnCQbHm1Guc5t/CdDiFSSfWP58FNuvT3Q==",
+ "cpu": [
+ "arm64"
+ ],
+ "dev": true,
+ "optional": true,
+ "os": [
+ "darwin"
+ ],
+ "engines": {
+ "node": ">=18"
+ }
+ },
+ "node_modules/@esbuild/darwin-x64": {
+ "version": "0.23.1",
+ "resolved": "https://registry.npmjs.org/@esbuild/darwin-x64/-/darwin-x64-0.23.1.tgz",
+ "integrity": "sha512-aClqdgTDVPSEGgoCS8QDG37Gu8yc9lTHNAQlsztQ6ENetKEO//b8y31MMu2ZaPbn4kVsIABzVLXYLhCGekGDqw==",
+ "cpu": [
+ "x64"
+ ],
+ "dev": true,
+ "optional": true,
+ "os": [
+ "darwin"
+ ],
+ "engines": {
+ "node": ">=18"
+ }
+ },
+ "node_modules/@esbuild/freebsd-arm64": {
+ "version": "0.23.1",
+ "resolved": "https://registry.npmjs.org/@esbuild/freebsd-arm64/-/freebsd-arm64-0.23.1.tgz",
+ "integrity": "sha512-h1k6yS8/pN/NHlMl5+v4XPfikhJulk4G+tKGFIOwURBSFzE8bixw1ebjluLOjfwtLqY0kewfjLSrO6tN2MgIhA==",
+ "cpu": [
+ "arm64"
+ ],
+ "dev": true,
+ "optional": true,
+ "os": [
+ "freebsd"
+ ],
+ "engines": {
+ "node": ">=18"
+ }
+ },
+ "node_modules/@esbuild/freebsd-x64": {
+ "version": "0.23.1",
+ "resolved": "https://registry.npmjs.org/@esbuild/freebsd-x64/-/freebsd-x64-0.23.1.tgz",
+ "integrity": "sha512-lK1eJeyk1ZX8UklqFd/3A60UuZ/6UVfGT2LuGo3Wp4/z7eRTRYY+0xOu2kpClP+vMTi9wKOfXi2vjUpO1Ro76g==",
+ "cpu": [
+ "x64"
+ ],
+ "dev": true,
+ "optional": true,
+ "os": [
+ "freebsd"
+ ],
+ "engines": {
+ "node": ">=18"
+ }
+ },
+ "node_modules/@esbuild/linux-arm": {
+ "version": "0.23.1",
+ "resolved": "https://registry.npmjs.org/@esbuild/linux-arm/-/linux-arm-0.23.1.tgz",
+ "integrity": "sha512-CXXkzgn+dXAPs3WBwE+Kvnrf4WECwBdfjfeYHpMeVxWE0EceB6vhWGShs6wi0IYEqMSIzdOF1XjQ/Mkm5d7ZdQ==",
+ "cpu": [
+ "arm"
+ ],
+ "dev": true,
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "engines": {
+ "node": ">=18"
+ }
+ },
+ "node_modules/@esbuild/linux-arm64": {
+ "version": "0.23.1",
+ "resolved": "https://registry.npmjs.org/@esbuild/linux-arm64/-/linux-arm64-0.23.1.tgz",
+ "integrity": "sha512-/93bf2yxencYDnItMYV/v116zff6UyTjo4EtEQjUBeGiVpMmffDNUyD9UN2zV+V3LRV3/on4xdZ26NKzn6754g==",
+ "cpu": [
+ "arm64"
+ ],
+ "dev": true,
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "engines": {
+ "node": ">=18"
+ }
+ },
+ "node_modules/@esbuild/linux-ia32": {
+ "version": "0.23.1",
+ "resolved": "https://registry.npmjs.org/@esbuild/linux-ia32/-/linux-ia32-0.23.1.tgz",
+ "integrity": "sha512-VTN4EuOHwXEkXzX5nTvVY4s7E/Krz7COC8xkftbbKRYAl96vPiUssGkeMELQMOnLOJ8k3BY1+ZY52tttZnHcXQ==",
+ "cpu": [
+ "ia32"
+ ],
+ "dev": true,
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "engines": {
+ "node": ">=18"
+ }
+ },
+ "node_modules/@esbuild/linux-loong64": {
+ "version": "0.23.1",
+ "resolved": "https://registry.npmjs.org/@esbuild/linux-loong64/-/linux-loong64-0.23.1.tgz",
+ "integrity": "sha512-Vx09LzEoBa5zDnieH8LSMRToj7ir/Jeq0Gu6qJ/1GcBq9GkfoEAoXvLiW1U9J1qE/Y/Oyaq33w5p2ZWrNNHNEw==",
+ "cpu": [
+ "loong64"
+ ],
+ "dev": true,
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "engines": {
+ "node": ">=18"
+ }
+ },
+ "node_modules/@esbuild/linux-mips64el": {
+ "version": "0.23.1",
+ "resolved": "https://registry.npmjs.org/@esbuild/linux-mips64el/-/linux-mips64el-0.23.1.tgz",
+ "integrity": "sha512-nrFzzMQ7W4WRLNUOU5dlWAqa6yVeI0P78WKGUo7lg2HShq/yx+UYkeNSE0SSfSure0SqgnsxPvmAUu/vu0E+3Q==",
+ "cpu": [
+ "mips64el"
+ ],
+ "dev": true,
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "engines": {
+ "node": ">=18"
+ }
+ },
+ "node_modules/@esbuild/linux-ppc64": {
+ "version": "0.23.1",
+ "resolved": "https://registry.npmjs.org/@esbuild/linux-ppc64/-/linux-ppc64-0.23.1.tgz",
+ "integrity": "sha512-dKN8fgVqd0vUIjxuJI6P/9SSSe/mB9rvA98CSH2sJnlZ/OCZWO1DJvxj8jvKTfYUdGfcq2dDxoKaC6bHuTlgcw==",
+ "cpu": [
+ "ppc64"
+ ],
+ "dev": true,
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "engines": {
+ "node": ">=18"
+ }
+ },
+ "node_modules/@esbuild/linux-riscv64": {
+ "version": "0.23.1",
+ "resolved": "https://registry.npmjs.org/@esbuild/linux-riscv64/-/linux-riscv64-0.23.1.tgz",
+ "integrity": "sha512-5AV4Pzp80fhHL83JM6LoA6pTQVWgB1HovMBsLQ9OZWLDqVY8MVobBXNSmAJi//Csh6tcY7e7Lny2Hg1tElMjIA==",
+ "cpu": [
+ "riscv64"
+ ],
+ "dev": true,
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "engines": {
+ "node": ">=18"
+ }
+ },
+ "node_modules/@esbuild/linux-s390x": {
+ "version": "0.23.1",
+ "resolved": "https://registry.npmjs.org/@esbuild/linux-s390x/-/linux-s390x-0.23.1.tgz",
+ "integrity": "sha512-9ygs73tuFCe6f6m/Tb+9LtYxWR4c9yg7zjt2cYkjDbDpV/xVn+68cQxMXCjUpYwEkze2RcU/rMnfIXNRFmSoDw==",
+ "cpu": [
+ "s390x"
+ ],
+ "dev": true,
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "engines": {
+ "node": ">=18"
+ }
+ },
+ "node_modules/@esbuild/linux-x64": {
+ "version": "0.23.1",
+ "resolved": "https://registry.npmjs.org/@esbuild/linux-x64/-/linux-x64-0.23.1.tgz",
+ "integrity": "sha512-EV6+ovTsEXCPAp58g2dD68LxoP/wK5pRvgy0J/HxPGB009omFPv3Yet0HiaqvrIrgPTBuC6wCH1LTOY91EO5hQ==",
+ "cpu": [
+ "x64"
+ ],
+ "dev": true,
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "engines": {
+ "node": ">=18"
+ }
+ },
+ "node_modules/@esbuild/netbsd-x64": {
+ "version": "0.23.1",
+ "resolved": "https://registry.npmjs.org/@esbuild/netbsd-x64/-/netbsd-x64-0.23.1.tgz",
+ "integrity": "sha512-aevEkCNu7KlPRpYLjwmdcuNz6bDFiE7Z8XC4CPqExjTvrHugh28QzUXVOZtiYghciKUacNktqxdpymplil1beA==",
+ "cpu": [
+ "x64"
+ ],
+ "dev": true,
+ "optional": true,
+ "os": [
+ "netbsd"
+ ],
+ "engines": {
+ "node": ">=18"
+ }
+ },
+ "node_modules/@esbuild/openbsd-arm64": {
+ "version": "0.23.1",
+ "resolved": "https://registry.npmjs.org/@esbuild/openbsd-arm64/-/openbsd-arm64-0.23.1.tgz",
+ "integrity": "sha512-3x37szhLexNA4bXhLrCC/LImN/YtWis6WXr1VESlfVtVeoFJBRINPJ3f0a/6LV8zpikqoUg4hyXw0sFBt5Cr+Q==",
+ "cpu": [
+ "arm64"
+ ],
+ "dev": true,
+ "optional": true,
+ "os": [
+ "openbsd"
+ ],
+ "engines": {
+ "node": ">=18"
+ }
+ },
+ "node_modules/@esbuild/openbsd-x64": {
+ "version": "0.23.1",
+ "resolved": "https://registry.npmjs.org/@esbuild/openbsd-x64/-/openbsd-x64-0.23.1.tgz",
+ "integrity": "sha512-aY2gMmKmPhxfU+0EdnN+XNtGbjfQgwZj43k8G3fyrDM/UdZww6xrWxmDkuz2eCZchqVeABjV5BpildOrUbBTqA==",
+ "cpu": [
+ "x64"
+ ],
+ "dev": true,
+ "optional": true,
+ "os": [
+ "openbsd"
+ ],
+ "engines": {
+ "node": ">=18"
+ }
+ },
+ "node_modules/@esbuild/sunos-x64": {
+ "version": "0.23.1",
+ "resolved": "https://registry.npmjs.org/@esbuild/sunos-x64/-/sunos-x64-0.23.1.tgz",
+ "integrity": "sha512-RBRT2gqEl0IKQABT4XTj78tpk9v7ehp+mazn2HbUeZl1YMdaGAQqhapjGTCe7uw7y0frDi4gS0uHzhvpFuI1sA==",
+ "cpu": [
+ "x64"
+ ],
+ "dev": true,
+ "optional": true,
+ "os": [
+ "sunos"
+ ],
+ "engines": {
+ "node": ">=18"
+ }
+ },
+ "node_modules/@esbuild/win32-arm64": {
+ "version": "0.23.1",
+ "resolved": "https://registry.npmjs.org/@esbuild/win32-arm64/-/win32-arm64-0.23.1.tgz",
+ "integrity": "sha512-4O+gPR5rEBe2FpKOVyiJ7wNDPA8nGzDuJ6gN4okSA1gEOYZ67N8JPk58tkWtdtPeLz7lBnY6I5L3jdsr3S+A6A==",
+ "cpu": [
+ "arm64"
+ ],
+ "dev": true,
+ "optional": true,
+ "os": [
+ "win32"
+ ],
+ "engines": {
+ "node": ">=18"
+ }
+ },
+ "node_modules/@esbuild/win32-ia32": {
+ "version": "0.23.1",
+ "resolved": "https://registry.npmjs.org/@esbuild/win32-ia32/-/win32-ia32-0.23.1.tgz",
+ "integrity": "sha512-BcaL0Vn6QwCwre3Y717nVHZbAa4UBEigzFm6VdsVdT/MbZ38xoj1X9HPkZhbmaBGUD1W8vxAfffbDe8bA6AKnQ==",
+ "cpu": [
+ "ia32"
+ ],
+ "dev": true,
+ "optional": true,
+ "os": [
+ "win32"
+ ],
+ "engines": {
+ "node": ">=18"
+ }
+ },
+ "node_modules/@esbuild/win32-x64": {
+ "version": "0.23.1",
+ "resolved": "https://registry.npmjs.org/@esbuild/win32-x64/-/win32-x64-0.23.1.tgz",
+ "integrity": "sha512-BHpFFeslkWrXWyUPnbKm+xYYVYruCinGcftSBaa8zoF9hZO4BcSCFUvHVTtzpIY6YzUnYtuEhZ+C9iEXjxnasg==",
+ "cpu": [
+ "x64"
+ ],
+ "dev": true,
+ "optional": true,
+ "os": [
+ "win32"
+ ],
+ "engines": {
+ "node": ">=18"
+ }
+ },
+ "node_modules/@eslint-community/eslint-utils": {
+ "version": "4.4.1",
+ "resolved": "https://registry.npmjs.org/@eslint-community/eslint-utils/-/eslint-utils-4.4.1.tgz",
+ "integrity": "sha512-s3O3waFUrMV8P/XaF/+ZTp1X9XBZW1a4B97ZnjQF2KYWaFD2A8KyFBsrsfSjEmjn3RGWAIuvlneuZm3CUK3jbA==",
+ "dev": true,
+ "dependencies": {
+ "eslint-visitor-keys": "^3.4.3"
+ },
+ "engines": {
+ "node": "^12.22.0 || ^14.17.0 || >=16.0.0"
+ },
+ "funding": {
+ "url": "https://opencollective.com/eslint"
+ },
+ "peerDependencies": {
+ "eslint": "^6.0.0 || ^7.0.0 || >=8.0.0"
+ }
+ },
+ "node_modules/@eslint-community/eslint-utils/node_modules/eslint-visitor-keys": {
+ "version": "3.4.3",
+ "resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-3.4.3.tgz",
+ "integrity": "sha512-wpc+LXeiyiisxPlEkUzU6svyS1frIO3Mgxj1fdy7Pm8Ygzguax2N3Fa/D/ag1WqbOprdI+uY6wMUl8/a2G+iag==",
+ "dev": true,
+ "engines": {
+ "node": "^12.22.0 || ^14.17.0 || >=16.0.0"
+ },
+ "funding": {
+ "url": "https://opencollective.com/eslint"
+ }
+ },
+ "node_modules/@eslint-community/regexpp": {
+ "version": "4.12.1",
+ "resolved": "https://registry.npmjs.org/@eslint-community/regexpp/-/regexpp-4.12.1.tgz",
+ "integrity": "sha512-CCZCDJuduB9OUkFkY2IgppNZMi2lBQgD2qzwXkEia16cge2pijY/aXi96CJMquDMn3nJdlPV1A5KrJEXwfLNzQ==",
+ "dev": true,
+ "engines": {
+ "node": "^12.0.0 || ^14.0.0 || >=16.0.0"
+ }
+ },
+ "node_modules/@eslint/config-array": {
+ "version": "0.18.0",
+ "resolved": "https://registry.npmjs.org/@eslint/config-array/-/config-array-0.18.0.tgz",
+ "integrity": "sha512-fTxvnS1sRMu3+JjXwJG0j/i4RT9u4qJ+lqS/yCGap4lH4zZGzQ7tu+xZqQmcMZq5OBZDL4QRxQzRjkWcGt8IVw==",
+ "dev": true,
+ "dependencies": {
+ "@eslint/object-schema": "^2.1.4",
+ "debug": "^4.3.1",
+ "minimatch": "^3.1.2"
+ },
+ "engines": {
+ "node": "^18.18.0 || ^20.9.0 || >=21.1.0"
+ }
+ },
+ "node_modules/@eslint/core": {
+ "version": "0.7.0",
+ "resolved": "https://registry.npmjs.org/@eslint/core/-/core-0.7.0.tgz",
+ "integrity": "sha512-xp5Jirz5DyPYlPiKat8jaq0EmYvDXKKpzTbxXMpT9eqlRJkRKIz9AGMdlvYjih+im+QlhWrpvVjl8IPC/lHlUw==",
+ "dev": true,
+ "engines": {
+ "node": "^18.18.0 || ^20.9.0 || >=21.1.0"
+ }
+ },
+ "node_modules/@eslint/eslintrc": {
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/@eslint/eslintrc/-/eslintrc-3.1.0.tgz",
+ "integrity": "sha512-4Bfj15dVJdoy3RfZmmo86RK1Fwzn6SstsvK9JS+BaVKqC6QQQQyXekNaC+g+LKNgkQ+2VhGAzm6hO40AhMR3zQ==",
+ "dev": true,
+ "dependencies": {
+ "ajv": "^6.12.4",
+ "debug": "^4.3.2",
+ "espree": "^10.0.1",
+ "globals": "^14.0.0",
+ "ignore": "^5.2.0",
+ "import-fresh": "^3.2.1",
+ "js-yaml": "^4.1.0",
+ "minimatch": "^3.1.2",
+ "strip-json-comments": "^3.1.1"
+ },
+ "engines": {
+ "node": "^18.18.0 || ^20.9.0 || >=21.1.0"
+ },
+ "funding": {
+ "url": "https://opencollective.com/eslint"
+ }
+ },
+ "node_modules/@eslint/eslintrc/node_modules/globals": {
+ "version": "14.0.0",
+ "resolved": "https://registry.npmjs.org/globals/-/globals-14.0.0.tgz",
+ "integrity": "sha512-oahGvuMGQlPw/ivIYBjVSrWAfWLBeku5tpPE2fOPLi+WHffIWbuh2tCjhyQhTBPMf5E9jDEH4FOmTYgYwbKwtQ==",
+ "dev": true,
+ "engines": {
+ "node": ">=18"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/@eslint/js": {
+ "version": "9.13.0",
+ "resolved": "https://registry.npmjs.org/@eslint/js/-/js-9.13.0.tgz",
+ "integrity": "sha512-IFLyoY4d72Z5y/6o/BazFBezupzI/taV8sGumxTAVw3lXG9A6md1Dc34T9s1FoD/an9pJH8RHbAxsaEbBed9lA==",
+ "dev": true,
+ "engines": {
+ "node": "^18.18.0 || ^20.9.0 || >=21.1.0"
+ }
+ },
+ "node_modules/@eslint/object-schema": {
+ "version": "2.1.4",
+ "resolved": "https://registry.npmjs.org/@eslint/object-schema/-/object-schema-2.1.4.tgz",
+ "integrity": "sha512-BsWiH1yFGjXXS2yvrf5LyuoSIIbPrGUWob917o+BTKuZ7qJdxX8aJLRxs1fS9n6r7vESrq1OUqb68dANcFXuQQ==",
+ "dev": true,
+ "engines": {
+ "node": "^18.18.0 || ^20.9.0 || >=21.1.0"
+ }
+ },
+ "node_modules/@eslint/plugin-kit": {
+ "version": "0.2.2",
+ "resolved": "https://registry.npmjs.org/@eslint/plugin-kit/-/plugin-kit-0.2.2.tgz",
+ "integrity": "sha512-CXtq5nR4Su+2I47WPOlWud98Y5Lv8Kyxp2ukhgFx/eW6Blm18VXJO5WuQylPugRo8nbluoi6GvvxBLqHcvqUUw==",
+ "dev": true,
+ "dependencies": {
+ "levn": "^0.4.1"
+ },
+ "engines": {
+ "node": "^18.18.0 || ^20.9.0 || >=21.1.0"
+ }
+ },
+ "node_modules/@floating-ui/core": {
+ "version": "1.6.8",
+ "resolved": "https://registry.npmjs.org/@floating-ui/core/-/core-1.6.8.tgz",
+ "integrity": "sha512-7XJ9cPU+yI2QeLS+FCSlqNFZJq8arvswefkZrYI1yQBbftw6FyrZOxYSh+9S7z7TpeWlRt9zJ5IhM1WIL334jA==",
+ "dependencies": {
+ "@floating-ui/utils": "^0.2.8"
+ }
+ },
+ "node_modules/@floating-ui/dom": {
+ "version": "1.6.12",
+ "resolved": "https://registry.npmjs.org/@floating-ui/dom/-/dom-1.6.12.tgz",
+ "integrity": "sha512-NP83c0HjokcGVEMeoStg317VD9W7eDlGK7457dMBANbKA6GJZdc7rjujdgqzTaz93jkGgc5P/jeWbaCHnMNc+w==",
+ "dependencies": {
+ "@floating-ui/core": "^1.6.0",
+ "@floating-ui/utils": "^0.2.8"
+ }
+ },
+ "node_modules/@floating-ui/react-dom": {
+ "version": "2.1.2",
+ "resolved": "https://registry.npmjs.org/@floating-ui/react-dom/-/react-dom-2.1.2.tgz",
+ "integrity": "sha512-06okr5cgPzMNBy+Ycse2A6udMi4bqwW/zgBF/rwjcNqWkyr82Mcg8b0vjX8OJpZFy/FKjJmw6wV7t44kK6kW7A==",
+ "dependencies": {
+ "@floating-ui/dom": "^1.0.0"
+ },
+ "peerDependencies": {
+ "react": ">=16.8.0",
+ "react-dom": ">=16.8.0"
+ }
+ },
+ "node_modules/@floating-ui/utils": {
+ "version": "0.2.8",
+ "resolved": "https://registry.npmjs.org/@floating-ui/utils/-/utils-0.2.8.tgz",
+ "integrity": "sha512-kym7SodPp8/wloecOpcmSnWJsK7M0E5Wg8UcFA+uO4B9s5d0ywXOEro/8HM9x0rW+TljRzul/14UYz3TleT3ig=="
+ },
+ "node_modules/@hookform/resolvers": {
+ "version": "3.9.1",
+ "resolved": "https://registry.npmjs.org/@hookform/resolvers/-/resolvers-3.9.1.tgz",
+ "integrity": "sha512-ud2HqmGBM0P0IABqoskKWI6PEf6ZDDBZkFqe2Vnl+mTHCEHzr3ISjjZyCwTjC/qpL25JC9aIDkloQejvMeq0ug==",
+ "peerDependencies": {
+ "react-hook-form": "^7.0.0"
+ }
+ },
+ "node_modules/@humanfs/core": {
+ "version": "0.19.1",
+ "resolved": "https://registry.npmjs.org/@humanfs/core/-/core-0.19.1.tgz",
+ "integrity": "sha512-5DyQ4+1JEUzejeK1JGICcideyfUbGixgS9jNgex5nqkW+cY7WZhxBigmieN5Qnw9ZosSNVC9KQKyb+GUaGyKUA==",
+ "dev": true,
+ "engines": {
+ "node": ">=18.18.0"
+ }
+ },
+ "node_modules/@humanfs/node": {
+ "version": "0.16.6",
+ "resolved": "https://registry.npmjs.org/@humanfs/node/-/node-0.16.6.tgz",
+ "integrity": "sha512-YuI2ZHQL78Q5HbhDiBA1X4LmYdXCKCMQIfw0pw7piHJwyREFebJUvrQN4cMssyES6x+vfUbx1CIpaQUKYdQZOw==",
+ "dev": true,
+ "dependencies": {
+ "@humanfs/core": "^0.19.1",
+ "@humanwhocodes/retry": "^0.3.0"
+ },
+ "engines": {
+ "node": ">=18.18.0"
+ }
+ },
+ "node_modules/@humanwhocodes/module-importer": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/@humanwhocodes/module-importer/-/module-importer-1.0.1.tgz",
+ "integrity": "sha512-bxveV4V8v5Yb4ncFTT3rPSgZBOpCkjfK0y4oVVVJwIuDVBRMDXrPyXRL988i5ap9m9bnyEEjWfm5WkBmtffLfA==",
+ "dev": true,
+ "engines": {
+ "node": ">=12.22"
+ },
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/nzakas"
+ }
+ },
+ "node_modules/@humanwhocodes/retry": {
+ "version": "0.3.1",
+ "resolved": "https://registry.npmjs.org/@humanwhocodes/retry/-/retry-0.3.1.tgz",
+ "integrity": "sha512-JBxkERygn7Bv/GbN5Rv8Ul6LVknS+5Bp6RgDC/O8gEBU/yeH5Ui5C/OlWrTb6qct7LjjfT6Re2NxB0ln0yYybA==",
+ "dev": true,
+ "engines": {
+ "node": ">=18.18"
+ },
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/nzakas"
+ }
+ },
+ "node_modules/@inquirer/confirm": {
+ "version": "5.0.1",
+ "resolved": "https://registry.npmjs.org/@inquirer/confirm/-/confirm-5.0.1.tgz",
+ "integrity": "sha512-6ycMm7k7NUApiMGfVc32yIPp28iPKxhGRMqoNDiUjq2RyTAkbs5Fx0TdzBqhabcKvniDdAAvHCmsRjnNfTsogw==",
+ "dev": true,
+ "dependencies": {
+ "@inquirer/core": "^10.0.1",
+ "@inquirer/type": "^3.0.0"
+ },
+ "engines": {
+ "node": ">=18"
+ },
+ "peerDependencies": {
+ "@types/node": ">=18"
+ }
+ },
+ "node_modules/@inquirer/core": {
+ "version": "10.0.1",
+ "resolved": "https://registry.npmjs.org/@inquirer/core/-/core-10.0.1.tgz",
+ "integrity": "sha512-KKTgjViBQUi3AAssqjUFMnMO3CM3qwCHvePV9EW+zTKGKafFGFF01sc1yOIYjLJ7QU52G/FbzKc+c01WLzXmVQ==",
+ "dev": true,
+ "dependencies": {
+ "@inquirer/figures": "^1.0.7",
+ "@inquirer/type": "^3.0.0",
+ "ansi-escapes": "^4.3.2",
+ "cli-width": "^4.1.0",
+ "mute-stream": "^2.0.0",
+ "signal-exit": "^4.1.0",
+ "strip-ansi": "^6.0.1",
+ "wrap-ansi": "^6.2.0",
+ "yoctocolors-cjs": "^2.1.2"
+ },
+ "engines": {
+ "node": ">=18"
+ }
+ },
+ "node_modules/@inquirer/core/node_modules/ansi-regex": {
+ "version": "5.0.1",
+ "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz",
+ "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==",
+ "dev": true,
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/@inquirer/core/node_modules/emoji-regex": {
+ "version": "8.0.0",
+ "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz",
+ "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==",
+ "dev": true
+ },
+ "node_modules/@inquirer/core/node_modules/string-width": {
+ "version": "4.2.3",
+ "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz",
+ "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==",
+ "dev": true,
+ "dependencies": {
+ "emoji-regex": "^8.0.0",
+ "is-fullwidth-code-point": "^3.0.0",
+ "strip-ansi": "^6.0.1"
+ },
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/@inquirer/core/node_modules/strip-ansi": {
+ "version": "6.0.1",
+ "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz",
+ "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==",
+ "dev": true,
+ "dependencies": {
+ "ansi-regex": "^5.0.1"
+ },
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/@inquirer/core/node_modules/wrap-ansi": {
+ "version": "6.2.0",
+ "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-6.2.0.tgz",
+ "integrity": "sha512-r6lPcBGxZXlIcymEu7InxDMhdW0KDxpLgoFLcguasxCaJ/SOIZwINatK9KY/tf+ZrlywOKU0UDj3ATXUBfxJXA==",
+ "dev": true,
+ "dependencies": {
+ "ansi-styles": "^4.0.0",
+ "string-width": "^4.1.0",
+ "strip-ansi": "^6.0.0"
+ },
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/@inquirer/figures": {
+ "version": "1.0.7",
+ "resolved": "https://registry.npmjs.org/@inquirer/figures/-/figures-1.0.7.tgz",
+ "integrity": "sha512-m+Trk77mp54Zma6xLkLuY+mvanPxlE4A7yNKs2HBiyZ4UkVs28Mv5c/pgWrHeInx+USHeX/WEPzjrWrcJiQgjw==",
+ "dev": true,
+ "engines": {
+ "node": ">=18"
+ }
+ },
+ "node_modules/@inquirer/type": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/@inquirer/type/-/type-3.0.0.tgz",
+ "integrity": "sha512-YYykfbw/lefC7yKj7nanzQXILM7r3suIvyFlCcMskc99axmsSewXWkAfXKwMbgxL76iAFVmRwmYdwNZNc8gjog==",
+ "dev": true,
+ "engines": {
+ "node": ">=18"
+ },
+ "peerDependencies": {
+ "@types/node": ">=18"
+ }
+ },
+ "node_modules/@isaacs/cliui": {
+ "version": "8.0.2",
+ "resolved": "https://registry.npmjs.org/@isaacs/cliui/-/cliui-8.0.2.tgz",
+ "integrity": "sha512-O8jcjabXaleOG9DQ0+ARXWZBTfnP4WNAqzuiJK7ll44AmxGKv/J2M4TPjxjY3znBCfvBXFzucm1twdyFybFqEA==",
+ "dependencies": {
+ "string-width": "^5.1.2",
+ "string-width-cjs": "npm:string-width@^4.2.0",
+ "strip-ansi": "^7.0.1",
+ "strip-ansi-cjs": "npm:strip-ansi@^6.0.1",
+ "wrap-ansi": "^8.1.0",
+ "wrap-ansi-cjs": "npm:wrap-ansi@^7.0.0"
+ },
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/@joshwooding/vite-plugin-react-docgen-typescript": {
+ "version": "0.3.0",
+ "resolved": "https://registry.npmjs.org/@joshwooding/vite-plugin-react-docgen-typescript/-/vite-plugin-react-docgen-typescript-0.3.0.tgz",
+ "integrity": "sha512-2D6y7fNvFmsLmRt6UCOFJPvFoPMJGT0Uh1Wg0RaigUp7kdQPs6yYn8Dmx6GZkOH/NW0yMTwRz/p0SRMMRo50vA==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "glob": "^7.2.0",
+ "glob-promise": "^4.2.0",
+ "magic-string": "^0.27.0",
+ "react-docgen-typescript": "^2.2.2"
+ },
+ "peerDependencies": {
+ "typescript": ">= 4.3.x",
+ "vite": "^3.0.0 || ^4.0.0 || ^5.0.0"
+ },
+ "peerDependenciesMeta": {
+ "typescript": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/@joshwooding/vite-plugin-react-docgen-typescript/node_modules/glob": {
+ "version": "7.2.3",
+ "resolved": "https://registry.npmjs.org/glob/-/glob-7.2.3.tgz",
+ "integrity": "sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q==",
+ "deprecated": "Glob versions prior to v9 are no longer supported",
+ "dev": true,
+ "license": "ISC",
+ "dependencies": {
+ "fs.realpath": "^1.0.0",
+ "inflight": "^1.0.4",
+ "inherits": "2",
+ "minimatch": "^3.1.1",
+ "once": "^1.3.0",
+ "path-is-absolute": "^1.0.0"
+ },
+ "engines": {
+ "node": "*"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/isaacs"
+ }
+ },
+ "node_modules/@joshwooding/vite-plugin-react-docgen-typescript/node_modules/glob-promise": {
+ "version": "4.2.2",
+ "resolved": "https://registry.npmjs.org/glob-promise/-/glob-promise-4.2.2.tgz",
+ "integrity": "sha512-xcUzJ8NWN5bktoTIX7eOclO1Npxd/dyVqUJxlLIDasT4C7KZyqlPIwkdJ0Ypiy3p2ZKahTjK4M9uC3sNSfNMzw==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@types/glob": "^7.1.3"
+ },
+ "engines": {
+ "node": ">=12"
+ },
+ "funding": {
+ "type": "individual",
+ "url": "https://github.com/sponsors/ahmadnassri"
+ },
+ "peerDependencies": {
+ "glob": "^7.1.6"
+ }
+ },
+ "node_modules/@joshwooding/vite-plugin-react-docgen-typescript/node_modules/magic-string": {
+ "version": "0.27.0",
+ "resolved": "https://registry.npmjs.org/magic-string/-/magic-string-0.27.0.tgz",
+ "integrity": "sha512-8UnnX2PeRAPZuN12svgR9j7M1uWMovg/CEnIwIG0LFkXSJJe4PdfUGiTGl8V9bsBHFUtfVINcSyYxd7q+kx9fA==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@jridgewell/sourcemap-codec": "^1.4.13"
+ },
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/@jridgewell/gen-mapping": {
+ "version": "0.3.5",
+ "resolved": "https://registry.npmjs.org/@jridgewell/gen-mapping/-/gen-mapping-0.3.5.tgz",
+ "integrity": "sha512-IzL8ZoEDIBRWEzlCcRhOaCupYyN5gdIK+Q6fbFdPDg6HqX6jpkItn7DFIpW9LQzXG6Df9sA7+OKnq0qlz/GaQg==",
+ "dependencies": {
+ "@jridgewell/set-array": "^1.2.1",
+ "@jridgewell/sourcemap-codec": "^1.4.10",
+ "@jridgewell/trace-mapping": "^0.3.24"
+ },
+ "engines": {
+ "node": ">=6.0.0"
+ }
+ },
+ "node_modules/@jridgewell/resolve-uri": {
+ "version": "3.1.2",
+ "resolved": "https://registry.npmjs.org/@jridgewell/resolve-uri/-/resolve-uri-3.1.2.tgz",
+ "integrity": "sha512-bRISgCIjP20/tbWSPWMEi54QVPRZExkuD9lJL+UIxUKtwVJA8wW1Trb1jMs1RFXo1CBTNZ/5hpC9QvmKWdopKw==",
+ "engines": {
+ "node": ">=6.0.0"
+ }
+ },
+ "node_modules/@jridgewell/set-array": {
+ "version": "1.2.1",
+ "resolved": "https://registry.npmjs.org/@jridgewell/set-array/-/set-array-1.2.1.tgz",
+ "integrity": "sha512-R8gLRTZeyp03ymzP/6Lil/28tGeGEzhx1q2k703KGWRAI1VdvPIXdG70VJc2pAMw3NA6JKL5hhFu1sJX0Mnn/A==",
+ "engines": {
+ "node": ">=6.0.0"
+ }
+ },
+ "node_modules/@jridgewell/sourcemap-codec": {
+ "version": "1.5.0",
+ "resolved": "https://registry.npmjs.org/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.5.0.tgz",
+ "integrity": "sha512-gv3ZRaISU3fjPAgNsriBRqGWQL6quFx04YMPW/zD8XMLsU32mhCCbfbO6KZFLjvYpCZ8zyDEgqsgf+PwPaM7GQ=="
+ },
+ "node_modules/@jridgewell/trace-mapping": {
+ "version": "0.3.25",
+ "resolved": "https://registry.npmjs.org/@jridgewell/trace-mapping/-/trace-mapping-0.3.25.tgz",
+ "integrity": "sha512-vNk6aEwybGtawWmy/PzwnGDOjCkLWSD2wqvjGGAgOAwCGWySYXfYoxt00IJkTF+8Lb57DwOb3Aa0o9CApepiYQ==",
+ "dependencies": {
+ "@jridgewell/resolve-uri": "^3.1.0",
+ "@jridgewell/sourcemap-codec": "^1.4.14"
+ }
+ },
+ "node_modules/@lezer/common": {
+ "version": "1.2.3",
+ "resolved": "https://registry.npmjs.org/@lezer/common/-/common-1.2.3.tgz",
+ "integrity": "sha512-w7ojc8ejBqr2REPsWxJjrMFsA/ysDCFICn8zEOR9mrqzOu2amhITYuLD8ag6XZf0CFXDrhKqw7+tW8cX66NaDA=="
+ },
+ "node_modules/@lezer/highlight": {
+ "version": "1.2.1",
+ "resolved": "https://registry.npmjs.org/@lezer/highlight/-/highlight-1.2.1.tgz",
+ "integrity": "sha512-Z5duk4RN/3zuVO7Jq0pGLJ3qynpxUVsh7IbUbGj88+uV2ApSAn6kWg2au3iJb+0Zi7kKtqffIESgNcRXWZWmSA==",
+ "dependencies": {
+ "@lezer/common": "^1.0.0"
+ }
+ },
+ "node_modules/@lezer/json": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/@lezer/json/-/json-1.0.2.tgz",
+ "integrity": "sha512-xHT2P4S5eeCYECyKNPhr4cbEL9tc8w83SPwRC373o9uEdrvGKTZoJVAGxpOsZckMlEh9W23Pc72ew918RWQOBQ==",
+ "dependencies": {
+ "@lezer/common": "^1.2.0",
+ "@lezer/highlight": "^1.0.0",
+ "@lezer/lr": "^1.0.0"
+ }
+ },
+ "node_modules/@lezer/lr": {
+ "version": "1.4.2",
+ "resolved": "https://registry.npmjs.org/@lezer/lr/-/lr-1.4.2.tgz",
+ "integrity": "sha512-pu0K1jCIdnQ12aWNaAVU5bzi7Bd1w54J3ECgANPmYLtQKP0HBj2cE/5coBD66MT10xbtIuUr7tg0Shbsvk0mDA==",
+ "dependencies": {
+ "@lezer/common": "^1.0.0"
+ }
+ },
+ "node_modules/@mdx-js/react": {
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/@mdx-js/react/-/react-3.1.0.tgz",
+ "integrity": "sha512-QjHtSaoameoalGnKDT3FoIl4+9RwyTmo9ZJGBdLOks/YOiWHoRDI3PUwEzOE7kEmGcV3AFcp9K6dYu9rEuKLAQ==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@types/mdx": "^2.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ },
+ "peerDependencies": {
+ "@types/react": ">=16",
+ "react": ">=16"
+ }
+ },
+ "node_modules/@mswjs/interceptors": {
+ "version": "0.36.10",
+ "resolved": "https://registry.npmjs.org/@mswjs/interceptors/-/interceptors-0.36.10.tgz",
+ "integrity": "sha512-GXrJgakgJW3DWKueebkvtYgGKkxA7s0u5B0P5syJM5rvQUnrpLPigvci8Hukl7yEM+sU06l+er2Fgvx/gmiRgg==",
+ "dev": true,
+ "dependencies": {
+ "@open-draft/deferred-promise": "^2.2.0",
+ "@open-draft/logger": "^0.3.0",
+ "@open-draft/until": "^2.0.0",
+ "is-node-process": "^1.2.0",
+ "outvariant": "^1.4.3",
+ "strict-event-emitter": "^0.5.1"
+ },
+ "engines": {
+ "node": ">=18"
+ }
+ },
+ "node_modules/@nodelib/fs.scandir": {
+ "version": "2.1.5",
+ "resolved": "https://registry.npmjs.org/@nodelib/fs.scandir/-/fs.scandir-2.1.5.tgz",
+ "integrity": "sha512-vq24Bq3ym5HEQm2NKCr3yXDwjc7vTsEThRDnkp2DK9p1uqLR+DHurm/NOTo0KG7HYHU7eppKZj3MyqYuMBf62g==",
+ "dependencies": {
+ "@nodelib/fs.stat": "2.0.5",
+ "run-parallel": "^1.1.9"
+ },
+ "engines": {
+ "node": ">= 8"
+ }
+ },
+ "node_modules/@nodelib/fs.stat": {
+ "version": "2.0.5",
+ "resolved": "https://registry.npmjs.org/@nodelib/fs.stat/-/fs.stat-2.0.5.tgz",
+ "integrity": "sha512-RkhPPp2zrqDAQA/2jNhnztcPAlv64XdhIp7a7454A5ovI7Bukxgt7MX7udwAu3zg1DcpPU0rz3VV1SeaqvY4+A==",
+ "engines": {
+ "node": ">= 8"
+ }
+ },
+ "node_modules/@nodelib/fs.walk": {
+ "version": "1.2.8",
+ "resolved": "https://registry.npmjs.org/@nodelib/fs.walk/-/fs.walk-1.2.8.tgz",
+ "integrity": "sha512-oGB+UxlgWcgQkgwo8GcEGwemoTFt3FIO9ababBmaGwXIoBKZ+GTy0pP185beGg7Llih/NSHSV2XAs1lnznocSg==",
+ "dependencies": {
+ "@nodelib/fs.scandir": "2.1.5",
+ "fastq": "^1.6.0"
+ },
+ "engines": {
+ "node": ">= 8"
+ }
+ },
+ "node_modules/@open-draft/deferred-promise": {
+ "version": "2.2.0",
+ "resolved": "https://registry.npmjs.org/@open-draft/deferred-promise/-/deferred-promise-2.2.0.tgz",
+ "integrity": "sha512-CecwLWx3rhxVQF6V4bAgPS5t+So2sTbPgAzafKkVizyi7tlwpcFpdFqq+wqF2OwNBmqFuu6tOyouTuxgpMfzmA==",
+ "dev": true
+ },
+ "node_modules/@open-draft/logger": {
+ "version": "0.3.0",
+ "resolved": "https://registry.npmjs.org/@open-draft/logger/-/logger-0.3.0.tgz",
+ "integrity": "sha512-X2g45fzhxH238HKO4xbSr7+wBS8Fvw6ixhTDuvLd5mqh6bJJCFAPwU9mPDxbcrRtfxv4u5IHCEH77BmxvXmmxQ==",
+ "dev": true,
+ "dependencies": {
+ "is-node-process": "^1.2.0",
+ "outvariant": "^1.4.0"
+ }
+ },
+ "node_modules/@open-draft/until": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/@open-draft/until/-/until-2.1.0.tgz",
+ "integrity": "sha512-U69T3ItWHvLwGg5eJ0n3I62nWuE6ilHlmz7zM0npLBRvPRd7e6NYmg54vvRtP5mZG7kZqZCFVdsTWo7BPtBujg==",
+ "dev": true
+ },
+ "node_modules/@pkgjs/parseargs": {
+ "version": "0.11.0",
+ "resolved": "https://registry.npmjs.org/@pkgjs/parseargs/-/parseargs-0.11.0.tgz",
+ "integrity": "sha512-+1VkjdD0QBLPodGrJUeqarH8VAIvQODIbwh9XpP5Syisf7YoQgsJKPNFoqqLQlu+VQ/tVSshMR6loPMn8U+dPg==",
+ "optional": true,
+ "engines": {
+ "node": ">=14"
+ }
+ },
+ "node_modules/@radix-ui/number": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/@radix-ui/number/-/number-1.1.0.tgz",
+ "integrity": "sha512-V3gRzhVNU1ldS5XhAPTom1fOIo4ccrjjJgmE+LI2h/WaFpHmx0MQApT+KZHnx8abG6Avtfcz4WoEciMnpFT3HQ=="
+ },
+ "node_modules/@radix-ui/primitive": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/@radix-ui/primitive/-/primitive-1.1.0.tgz",
+ "integrity": "sha512-4Z8dn6Upk0qk4P74xBhZ6Hd/w0mPEzOOLxy4xiPXOXqjF7jZS0VAKk7/x/H6FyY2zCkYJqePf1G5KmkmNJ4RBA=="
+ },
+ "node_modules/@radix-ui/react-arrow": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/@radix-ui/react-arrow/-/react-arrow-1.1.0.tgz",
+ "integrity": "sha512-FmlW1rCg7hBpEBwFbjHwCW6AmWLQM6g/v0Sn8XbP9NvmSZ2San1FpQeyPtufzOMSIx7Y4dzjlHoifhp+7NkZhw==",
+ "dependencies": {
+ "@radix-ui/react-primitive": "2.0.0"
+ },
+ "peerDependencies": {
+ "@types/react": "*",
+ "@types/react-dom": "*",
+ "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc",
+ "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc"
+ },
+ "peerDependenciesMeta": {
+ "@types/react": {
+ "optional": true
+ },
+ "@types/react-dom": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/@radix-ui/react-avatar": {
+ "version": "1.1.1",
+ "resolved": "https://registry.npmjs.org/@radix-ui/react-avatar/-/react-avatar-1.1.1.tgz",
+ "integrity": "sha512-eoOtThOmxeoizxpX6RiEsQZ2wj5r4+zoeqAwO0cBaFQGjJwIH3dIX0OCxNrCyrrdxG+vBweMETh3VziQG7c1kw==",
+ "dependencies": {
+ "@radix-ui/react-context": "1.1.1",
+ "@radix-ui/react-primitive": "2.0.0",
+ "@radix-ui/react-use-callback-ref": "1.1.0",
+ "@radix-ui/react-use-layout-effect": "1.1.0"
+ },
+ "peerDependencies": {
+ "@types/react": "*",
+ "@types/react-dom": "*",
+ "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc",
+ "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc"
+ },
+ "peerDependenciesMeta": {
+ "@types/react": {
+ "optional": true
+ },
+ "@types/react-dom": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/@radix-ui/react-checkbox": {
+ "version": "1.1.2",
+ "resolved": "https://registry.npmjs.org/@radix-ui/react-checkbox/-/react-checkbox-1.1.2.tgz",
+ "integrity": "sha512-/i0fl686zaJbDQLNKrkCbMyDm6FQMt4jg323k7HuqitoANm9sE23Ql8yOK3Wusk34HSLKDChhMux05FnP6KUkw==",
+ "dependencies": {
+ "@radix-ui/primitive": "1.1.0",
+ "@radix-ui/react-compose-refs": "1.1.0",
+ "@radix-ui/react-context": "1.1.1",
+ "@radix-ui/react-presence": "1.1.1",
+ "@radix-ui/react-primitive": "2.0.0",
+ "@radix-ui/react-use-controllable-state": "1.1.0",
+ "@radix-ui/react-use-previous": "1.1.0",
+ "@radix-ui/react-use-size": "1.1.0"
+ },
+ "peerDependencies": {
+ "@types/react": "*",
+ "@types/react-dom": "*",
+ "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc",
+ "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc"
+ },
+ "peerDependenciesMeta": {
+ "@types/react": {
+ "optional": true
+ },
+ "@types/react-dom": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/@radix-ui/react-collection": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/@radix-ui/react-collection/-/react-collection-1.1.0.tgz",
+ "integrity": "sha512-GZsZslMJEyo1VKm5L1ZJY8tGDxZNPAoUeQUIbKeJfoi7Q4kmig5AsgLMYYuyYbfjd8fBmFORAIwYAkXMnXZgZw==",
+ "dependencies": {
+ "@radix-ui/react-compose-refs": "1.1.0",
+ "@radix-ui/react-context": "1.1.0",
+ "@radix-ui/react-primitive": "2.0.0",
+ "@radix-ui/react-slot": "1.1.0"
+ },
+ "peerDependencies": {
+ "@types/react": "*",
+ "@types/react-dom": "*",
+ "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc",
+ "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc"
+ },
+ "peerDependenciesMeta": {
+ "@types/react": {
+ "optional": true
+ },
+ "@types/react-dom": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/@radix-ui/react-collection/node_modules/@radix-ui/react-context": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/@radix-ui/react-context/-/react-context-1.1.0.tgz",
+ "integrity": "sha512-OKrckBy+sMEgYM/sMmqmErVn0kZqrHPJze+Ql3DzYsDDp0hl0L62nx/2122/Bvps1qz645jlcu2tD9lrRSdf8A==",
+ "peerDependencies": {
+ "@types/react": "*",
+ "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc"
+ },
+ "peerDependenciesMeta": {
+ "@types/react": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/@radix-ui/react-compose-refs": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/@radix-ui/react-compose-refs/-/react-compose-refs-1.1.0.tgz",
+ "integrity": "sha512-b4inOtiaOnYf9KWyO3jAeeCG6FeyfY6ldiEPanbUjWd+xIk5wZeHa8yVwmrJ2vderhu/BQvzCrJI0lHd+wIiqw==",
+ "peerDependencies": {
+ "@types/react": "*",
+ "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc"
+ },
+ "peerDependenciesMeta": {
+ "@types/react": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/@radix-ui/react-context": {
+ "version": "1.1.1",
+ "resolved": "https://registry.npmjs.org/@radix-ui/react-context/-/react-context-1.1.1.tgz",
+ "integrity": "sha512-UASk9zi+crv9WteK/NU4PLvOoL3OuE6BWVKNF6hPRBtYBDXQ2u5iu3O59zUlJiTVvkyuycnqrztsHVJwcK9K+Q==",
+ "peerDependencies": {
+ "@types/react": "*",
+ "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc"
+ },
+ "peerDependenciesMeta": {
+ "@types/react": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/@radix-ui/react-dialog": {
+ "version": "1.1.2",
+ "resolved": "https://registry.npmjs.org/@radix-ui/react-dialog/-/react-dialog-1.1.2.tgz",
+ "integrity": "sha512-Yj4dZtqa2o+kG61fzB0H2qUvmwBA2oyQroGLyNtBj1beo1khoQ3q1a2AO8rrQYjd8256CO9+N8L9tvsS+bnIyA==",
+ "dependencies": {
+ "@radix-ui/primitive": "1.1.0",
+ "@radix-ui/react-compose-refs": "1.1.0",
+ "@radix-ui/react-context": "1.1.1",
+ "@radix-ui/react-dismissable-layer": "1.1.1",
+ "@radix-ui/react-focus-guards": "1.1.1",
+ "@radix-ui/react-focus-scope": "1.1.0",
+ "@radix-ui/react-id": "1.1.0",
+ "@radix-ui/react-portal": "1.1.2",
+ "@radix-ui/react-presence": "1.1.1",
+ "@radix-ui/react-primitive": "2.0.0",
+ "@radix-ui/react-slot": "1.1.0",
+ "@radix-ui/react-use-controllable-state": "1.1.0",
+ "aria-hidden": "^1.1.1",
+ "react-remove-scroll": "2.6.0"
+ },
+ "peerDependencies": {
+ "@types/react": "*",
+ "@types/react-dom": "*",
+ "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc",
+ "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc"
+ },
+ "peerDependenciesMeta": {
+ "@types/react": {
+ "optional": true
+ },
+ "@types/react-dom": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/@radix-ui/react-direction": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/@radix-ui/react-direction/-/react-direction-1.1.0.tgz",
+ "integrity": "sha512-BUuBvgThEiAXh2DWu93XsT+a3aWrGqolGlqqw5VU1kG7p/ZH2cuDlM1sRLNnY3QcBS69UIz2mcKhMxDsdewhjg==",
+ "peerDependencies": {
+ "@types/react": "*",
+ "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc"
+ },
+ "peerDependenciesMeta": {
+ "@types/react": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/@radix-ui/react-dismissable-layer": {
+ "version": "1.1.1",
+ "resolved": "https://registry.npmjs.org/@radix-ui/react-dismissable-layer/-/react-dismissable-layer-1.1.1.tgz",
+ "integrity": "sha512-QSxg29lfr/xcev6kSz7MAlmDnzbP1eI/Dwn3Tp1ip0KT5CUELsxkekFEMVBEoykI3oV39hKT4TKZzBNMbcTZYQ==",
+ "dependencies": {
+ "@radix-ui/primitive": "1.1.0",
+ "@radix-ui/react-compose-refs": "1.1.0",
+ "@radix-ui/react-primitive": "2.0.0",
+ "@radix-ui/react-use-callback-ref": "1.1.0",
+ "@radix-ui/react-use-escape-keydown": "1.1.0"
+ },
+ "peerDependencies": {
+ "@types/react": "*",
+ "@types/react-dom": "*",
+ "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc",
+ "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc"
+ },
+ "peerDependenciesMeta": {
+ "@types/react": {
+ "optional": true
+ },
+ "@types/react-dom": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/@radix-ui/react-dropdown-menu": {
+ "version": "2.1.2",
+ "resolved": "https://registry.npmjs.org/@radix-ui/react-dropdown-menu/-/react-dropdown-menu-2.1.2.tgz",
+ "integrity": "sha512-GVZMR+eqK8/Kes0a36Qrv+i20bAPXSn8rCBTHx30w+3ECnR5o3xixAlqcVaYvLeyKUsm0aqyhWfmUcqufM8nYA==",
+ "dependencies": {
+ "@radix-ui/primitive": "1.1.0",
+ "@radix-ui/react-compose-refs": "1.1.0",
+ "@radix-ui/react-context": "1.1.1",
+ "@radix-ui/react-id": "1.1.0",
+ "@radix-ui/react-menu": "2.1.2",
+ "@radix-ui/react-primitive": "2.0.0",
+ "@radix-ui/react-use-controllable-state": "1.1.0"
+ },
+ "peerDependencies": {
+ "@types/react": "*",
+ "@types/react-dom": "*",
+ "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc",
+ "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc"
+ },
+ "peerDependenciesMeta": {
+ "@types/react": {
+ "optional": true
+ },
+ "@types/react-dom": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/@radix-ui/react-focus-guards": {
+ "version": "1.1.1",
+ "resolved": "https://registry.npmjs.org/@radix-ui/react-focus-guards/-/react-focus-guards-1.1.1.tgz",
+ "integrity": "sha512-pSIwfrT1a6sIoDASCSpFwOasEwKTZWDw/iBdtnqKO7v6FeOzYJ7U53cPzYFVR3geGGXgVHaH+CdngrrAzqUGxg==",
+ "peerDependencies": {
+ "@types/react": "*",
+ "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc"
+ },
+ "peerDependenciesMeta": {
+ "@types/react": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/@radix-ui/react-focus-scope": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/@radix-ui/react-focus-scope/-/react-focus-scope-1.1.0.tgz",
+ "integrity": "sha512-200UD8zylvEyL8Bx+z76RJnASR2gRMuxlgFCPAe/Q/679a/r0eK3MBVYMb7vZODZcffZBdob1EGnky78xmVvcA==",
+ "dependencies": {
+ "@radix-ui/react-compose-refs": "1.1.0",
+ "@radix-ui/react-primitive": "2.0.0",
+ "@radix-ui/react-use-callback-ref": "1.1.0"
+ },
+ "peerDependencies": {
+ "@types/react": "*",
+ "@types/react-dom": "*",
+ "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc",
+ "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc"
+ },
+ "peerDependenciesMeta": {
+ "@types/react": {
+ "optional": true
+ },
+ "@types/react-dom": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/@radix-ui/react-hover-card": {
+ "version": "1.1.2",
+ "resolved": "https://registry.npmjs.org/@radix-ui/react-hover-card/-/react-hover-card-1.1.2.tgz",
+ "integrity": "sha512-Y5w0qGhysvmqsIy6nQxaPa6mXNKznfoGjOfBgzOjocLxr2XlSjqBMYQQL+FfyogsMuX+m8cZyQGYhJxvxUzO4w==",
+ "dependencies": {
+ "@radix-ui/primitive": "1.1.0",
+ "@radix-ui/react-compose-refs": "1.1.0",
+ "@radix-ui/react-context": "1.1.1",
+ "@radix-ui/react-dismissable-layer": "1.1.1",
+ "@radix-ui/react-popper": "1.2.0",
+ "@radix-ui/react-portal": "1.1.2",
+ "@radix-ui/react-presence": "1.1.1",
+ "@radix-ui/react-primitive": "2.0.0",
+ "@radix-ui/react-use-controllable-state": "1.1.0"
+ },
+ "peerDependencies": {
+ "@types/react": "*",
+ "@types/react-dom": "*",
+ "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc",
+ "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc"
+ },
+ "peerDependenciesMeta": {
+ "@types/react": {
+ "optional": true
+ },
+ "@types/react-dom": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/@radix-ui/react-icons": {
+ "version": "1.3.1",
+ "resolved": "https://registry.npmjs.org/@radix-ui/react-icons/-/react-icons-1.3.1.tgz",
+ "integrity": "sha512-QvYompk0X+8Yjlo/Fv4McrzxohDdM5GgLHyQcPpcsPvlOSXCGFjdbuyGL5dzRbg0GpknAjQJJZzdiRK7iWVuFQ==",
+ "peerDependencies": {
+ "react": "^16.x || ^17.x || ^18.x || ^19.x"
+ }
+ },
+ "node_modules/@radix-ui/react-id": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/@radix-ui/react-id/-/react-id-1.1.0.tgz",
+ "integrity": "sha512-EJUrI8yYh7WOjNOqpoJaf1jlFIH2LvtgAl+YcFqNCa+4hj64ZXmPkAKOFs/ukjz3byN6bdb/AVUqHkI8/uWWMA==",
+ "dependencies": {
+ "@radix-ui/react-use-layout-effect": "1.1.0"
+ },
+ "peerDependencies": {
+ "@types/react": "*",
+ "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc"
+ },
+ "peerDependenciesMeta": {
+ "@types/react": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/@radix-ui/react-label": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/@radix-ui/react-label/-/react-label-2.1.0.tgz",
+ "integrity": "sha512-peLblDlFw/ngk3UWq0VnYaOLy6agTZZ+MUO/WhVfm14vJGML+xH4FAl2XQGLqdefjNb7ApRg6Yn7U42ZhmYXdw==",
+ "dependencies": {
+ "@radix-ui/react-primitive": "2.0.0"
+ },
+ "peerDependencies": {
+ "@types/react": "*",
+ "@types/react-dom": "*",
+ "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc",
+ "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc"
+ },
+ "peerDependenciesMeta": {
+ "@types/react": {
+ "optional": true
+ },
+ "@types/react-dom": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/@radix-ui/react-menu": {
+ "version": "2.1.2",
+ "resolved": "https://registry.npmjs.org/@radix-ui/react-menu/-/react-menu-2.1.2.tgz",
+ "integrity": "sha512-lZ0R4qR2Al6fZ4yCCZzu/ReTFrylHFxIqy7OezIpWF4bL0o9biKo0pFIvkaew3TyZ9Fy5gYVrR5zCGZBVbO1zg==",
+ "dependencies": {
+ "@radix-ui/primitive": "1.1.0",
+ "@radix-ui/react-collection": "1.1.0",
+ "@radix-ui/react-compose-refs": "1.1.0",
+ "@radix-ui/react-context": "1.1.1",
+ "@radix-ui/react-direction": "1.1.0",
+ "@radix-ui/react-dismissable-layer": "1.1.1",
+ "@radix-ui/react-focus-guards": "1.1.1",
+ "@radix-ui/react-focus-scope": "1.1.0",
+ "@radix-ui/react-id": "1.1.0",
+ "@radix-ui/react-popper": "1.2.0",
+ "@radix-ui/react-portal": "1.1.2",
+ "@radix-ui/react-presence": "1.1.1",
+ "@radix-ui/react-primitive": "2.0.0",
+ "@radix-ui/react-roving-focus": "1.1.0",
+ "@radix-ui/react-slot": "1.1.0",
+ "@radix-ui/react-use-callback-ref": "1.1.0",
+ "aria-hidden": "^1.1.1",
+ "react-remove-scroll": "2.6.0"
+ },
+ "peerDependencies": {
+ "@types/react": "*",
+ "@types/react-dom": "*",
+ "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc",
+ "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc"
+ },
+ "peerDependenciesMeta": {
+ "@types/react": {
+ "optional": true
+ },
+ "@types/react-dom": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/@radix-ui/react-popover": {
+ "version": "1.1.2",
+ "resolved": "https://registry.npmjs.org/@radix-ui/react-popover/-/react-popover-1.1.2.tgz",
+ "integrity": "sha512-u2HRUyWW+lOiA2g0Le0tMmT55FGOEWHwPFt1EPfbLly7uXQExFo5duNKqG2DzmFXIdqOeNd+TpE8baHWJCyP9w==",
+ "dependencies": {
+ "@radix-ui/primitive": "1.1.0",
+ "@radix-ui/react-compose-refs": "1.1.0",
+ "@radix-ui/react-context": "1.1.1",
+ "@radix-ui/react-dismissable-layer": "1.1.1",
+ "@radix-ui/react-focus-guards": "1.1.1",
+ "@radix-ui/react-focus-scope": "1.1.0",
+ "@radix-ui/react-id": "1.1.0",
+ "@radix-ui/react-popper": "1.2.0",
+ "@radix-ui/react-portal": "1.1.2",
+ "@radix-ui/react-presence": "1.1.1",
+ "@radix-ui/react-primitive": "2.0.0",
+ "@radix-ui/react-slot": "1.1.0",
+ "@radix-ui/react-use-controllable-state": "1.1.0",
+ "aria-hidden": "^1.1.1",
+ "react-remove-scroll": "2.6.0"
+ },
+ "peerDependencies": {
+ "@types/react": "*",
+ "@types/react-dom": "*",
+ "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc",
+ "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc"
+ },
+ "peerDependenciesMeta": {
+ "@types/react": {
+ "optional": true
+ },
+ "@types/react-dom": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/@radix-ui/react-popper": {
+ "version": "1.2.0",
+ "resolved": "https://registry.npmjs.org/@radix-ui/react-popper/-/react-popper-1.2.0.tgz",
+ "integrity": "sha512-ZnRMshKF43aBxVWPWvbj21+7TQCvhuULWJ4gNIKYpRlQt5xGRhLx66tMp8pya2UkGHTSlhpXwmjqltDYHhw7Vg==",
+ "dependencies": {
+ "@floating-ui/react-dom": "^2.0.0",
+ "@radix-ui/react-arrow": "1.1.0",
+ "@radix-ui/react-compose-refs": "1.1.0",
+ "@radix-ui/react-context": "1.1.0",
+ "@radix-ui/react-primitive": "2.0.0",
+ "@radix-ui/react-use-callback-ref": "1.1.0",
+ "@radix-ui/react-use-layout-effect": "1.1.0",
+ "@radix-ui/react-use-rect": "1.1.0",
+ "@radix-ui/react-use-size": "1.1.0",
+ "@radix-ui/rect": "1.1.0"
+ },
+ "peerDependencies": {
+ "@types/react": "*",
+ "@types/react-dom": "*",
+ "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc",
+ "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc"
+ },
+ "peerDependenciesMeta": {
+ "@types/react": {
+ "optional": true
+ },
+ "@types/react-dom": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/@radix-ui/react-popper/node_modules/@radix-ui/react-context": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/@radix-ui/react-context/-/react-context-1.1.0.tgz",
+ "integrity": "sha512-OKrckBy+sMEgYM/sMmqmErVn0kZqrHPJze+Ql3DzYsDDp0hl0L62nx/2122/Bvps1qz645jlcu2tD9lrRSdf8A==",
+ "peerDependencies": {
+ "@types/react": "*",
+ "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc"
+ },
+ "peerDependenciesMeta": {
+ "@types/react": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/@radix-ui/react-portal": {
+ "version": "1.1.2",
+ "resolved": "https://registry.npmjs.org/@radix-ui/react-portal/-/react-portal-1.1.2.tgz",
+ "integrity": "sha512-WeDYLGPxJb/5EGBoedyJbT0MpoULmwnIPMJMSldkuiMsBAv7N1cRdsTWZWht9vpPOiN3qyiGAtbK2is47/uMFg==",
+ "dependencies": {
+ "@radix-ui/react-primitive": "2.0.0",
+ "@radix-ui/react-use-layout-effect": "1.1.0"
+ },
+ "peerDependencies": {
+ "@types/react": "*",
+ "@types/react-dom": "*",
+ "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc",
+ "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc"
+ },
+ "peerDependenciesMeta": {
+ "@types/react": {
+ "optional": true
+ },
+ "@types/react-dom": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/@radix-ui/react-presence": {
+ "version": "1.1.1",
+ "resolved": "https://registry.npmjs.org/@radix-ui/react-presence/-/react-presence-1.1.1.tgz",
+ "integrity": "sha512-IeFXVi4YS1K0wVZzXNrbaaUvIJ3qdY+/Ih4eHFhWA9SwGR9UDX7Ck8abvL57C4cv3wwMvUE0OG69Qc3NCcTe/A==",
+ "dependencies": {
+ "@radix-ui/react-compose-refs": "1.1.0",
+ "@radix-ui/react-use-layout-effect": "1.1.0"
+ },
+ "peerDependencies": {
+ "@types/react": "*",
+ "@types/react-dom": "*",
+ "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc",
+ "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc"
+ },
+ "peerDependenciesMeta": {
+ "@types/react": {
+ "optional": true
+ },
+ "@types/react-dom": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/@radix-ui/react-primitive": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/@radix-ui/react-primitive/-/react-primitive-2.0.0.tgz",
+ "integrity": "sha512-ZSpFm0/uHa8zTvKBDjLFWLo8dkr4MBsiDLz0g3gMUwqgLHz9rTaRRGYDgvZPtBJgYCBKXkS9fzmoySgr8CO6Cw==",
+ "dependencies": {
+ "@radix-ui/react-slot": "1.1.0"
+ },
+ "peerDependencies": {
+ "@types/react": "*",
+ "@types/react-dom": "*",
+ "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc",
+ "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc"
+ },
+ "peerDependenciesMeta": {
+ "@types/react": {
+ "optional": true
+ },
+ "@types/react-dom": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/@radix-ui/react-roving-focus": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/@radix-ui/react-roving-focus/-/react-roving-focus-1.1.0.tgz",
+ "integrity": "sha512-EA6AMGeq9AEeQDeSH0aZgG198qkfHSbvWTf1HvoDmOB5bBG/qTxjYMWUKMnYiV6J/iP/J8MEFSuB2zRU2n7ODA==",
+ "dependencies": {
+ "@radix-ui/primitive": "1.1.0",
+ "@radix-ui/react-collection": "1.1.0",
+ "@radix-ui/react-compose-refs": "1.1.0",
+ "@radix-ui/react-context": "1.1.0",
+ "@radix-ui/react-direction": "1.1.0",
+ "@radix-ui/react-id": "1.1.0",
+ "@radix-ui/react-primitive": "2.0.0",
+ "@radix-ui/react-use-callback-ref": "1.1.0",
+ "@radix-ui/react-use-controllable-state": "1.1.0"
+ },
+ "peerDependencies": {
+ "@types/react": "*",
+ "@types/react-dom": "*",
+ "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc",
+ "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc"
+ },
+ "peerDependenciesMeta": {
+ "@types/react": {
+ "optional": true
+ },
+ "@types/react-dom": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/@radix-ui/react-roving-focus/node_modules/@radix-ui/react-context": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/@radix-ui/react-context/-/react-context-1.1.0.tgz",
+ "integrity": "sha512-OKrckBy+sMEgYM/sMmqmErVn0kZqrHPJze+Ql3DzYsDDp0hl0L62nx/2122/Bvps1qz645jlcu2tD9lrRSdf8A==",
+ "peerDependencies": {
+ "@types/react": "*",
+ "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc"
+ },
+ "peerDependenciesMeta": {
+ "@types/react": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/@radix-ui/react-scroll-area": {
+ "version": "1.2.0",
+ "resolved": "https://registry.npmjs.org/@radix-ui/react-scroll-area/-/react-scroll-area-1.2.0.tgz",
+ "integrity": "sha512-q2jMBdsJ9zB7QG6ngQNzNwlvxLQqONyL58QbEGwuyRZZb/ARQwk3uQVbCF7GvQVOtV6EU/pDxAw3zRzJZI3rpQ==",
+ "dependencies": {
+ "@radix-ui/number": "1.1.0",
+ "@radix-ui/primitive": "1.1.0",
+ "@radix-ui/react-compose-refs": "1.1.0",
+ "@radix-ui/react-context": "1.1.1",
+ "@radix-ui/react-direction": "1.1.0",
+ "@radix-ui/react-presence": "1.1.1",
+ "@radix-ui/react-primitive": "2.0.0",
+ "@radix-ui/react-use-callback-ref": "1.1.0",
+ "@radix-ui/react-use-layout-effect": "1.1.0"
+ },
+ "peerDependencies": {
+ "@types/react": "*",
+ "@types/react-dom": "*",
+ "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc",
+ "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc"
+ },
+ "peerDependenciesMeta": {
+ "@types/react": {
+ "optional": true
+ },
+ "@types/react-dom": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/@radix-ui/react-select": {
+ "version": "2.1.2",
+ "resolved": "https://registry.npmjs.org/@radix-ui/react-select/-/react-select-2.1.2.tgz",
+ "integrity": "sha512-rZJtWmorC7dFRi0owDmoijm6nSJH1tVw64QGiNIZ9PNLyBDtG+iAq+XGsya052At4BfarzY/Dhv9wrrUr6IMZA==",
+ "dependencies": {
+ "@radix-ui/number": "1.1.0",
+ "@radix-ui/primitive": "1.1.0",
+ "@radix-ui/react-collection": "1.1.0",
+ "@radix-ui/react-compose-refs": "1.1.0",
+ "@radix-ui/react-context": "1.1.1",
+ "@radix-ui/react-direction": "1.1.0",
+ "@radix-ui/react-dismissable-layer": "1.1.1",
+ "@radix-ui/react-focus-guards": "1.1.1",
+ "@radix-ui/react-focus-scope": "1.1.0",
+ "@radix-ui/react-id": "1.1.0",
+ "@radix-ui/react-popper": "1.2.0",
+ "@radix-ui/react-portal": "1.1.2",
+ "@radix-ui/react-primitive": "2.0.0",
+ "@radix-ui/react-slot": "1.1.0",
+ "@radix-ui/react-use-callback-ref": "1.1.0",
+ "@radix-ui/react-use-controllable-state": "1.1.0",
+ "@radix-ui/react-use-layout-effect": "1.1.0",
+ "@radix-ui/react-use-previous": "1.1.0",
+ "@radix-ui/react-visually-hidden": "1.1.0",
+ "aria-hidden": "^1.1.1",
+ "react-remove-scroll": "2.6.0"
+ },
+ "peerDependencies": {
+ "@types/react": "*",
+ "@types/react-dom": "*",
+ "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc",
+ "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc"
+ },
+ "peerDependenciesMeta": {
+ "@types/react": {
+ "optional": true
+ },
+ "@types/react-dom": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/@radix-ui/react-separator": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/@radix-ui/react-separator/-/react-separator-1.1.0.tgz",
+ "integrity": "sha512-3uBAs+egzvJBDZAzvb/n4NxxOYpnspmWxO2u5NbZ8Y6FM/NdrGSF9bop3Cf6F6C71z1rTSn8KV0Fo2ZVd79lGA==",
+ "dependencies": {
+ "@radix-ui/react-primitive": "2.0.0"
+ },
+ "peerDependencies": {
+ "@types/react": "*",
+ "@types/react-dom": "*",
+ "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc",
+ "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc"
+ },
+ "peerDependenciesMeta": {
+ "@types/react": {
+ "optional": true
+ },
+ "@types/react-dom": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/@radix-ui/react-slot": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/@radix-ui/react-slot/-/react-slot-1.1.0.tgz",
+ "integrity": "sha512-FUCf5XMfmW4dtYl69pdS4DbxKy8nj4M7SafBgPllysxmdachynNflAdp/gCsnYWNDnge6tI9onzMp5ARYc1KNw==",
+ "dependencies": {
+ "@radix-ui/react-compose-refs": "1.1.0"
+ },
+ "peerDependencies": {
+ "@types/react": "*",
+ "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc"
+ },
+ "peerDependenciesMeta": {
+ "@types/react": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/@radix-ui/react-tabs": {
+ "version": "1.1.1",
+ "resolved": "https://registry.npmjs.org/@radix-ui/react-tabs/-/react-tabs-1.1.1.tgz",
+ "integrity": "sha512-3GBUDmP2DvzmtYLMsHmpA1GtR46ZDZ+OreXM/N+kkQJOPIgytFWWTfDQmBQKBvaFS0Vno0FktdbVzN28KGrMdw==",
+ "dependencies": {
+ "@radix-ui/primitive": "1.1.0",
+ "@radix-ui/react-context": "1.1.1",
+ "@radix-ui/react-direction": "1.1.0",
+ "@radix-ui/react-id": "1.1.0",
+ "@radix-ui/react-presence": "1.1.1",
+ "@radix-ui/react-primitive": "2.0.0",
+ "@radix-ui/react-roving-focus": "1.1.0",
+ "@radix-ui/react-use-controllable-state": "1.1.0"
+ },
+ "peerDependencies": {
+ "@types/react": "*",
+ "@types/react-dom": "*",
+ "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc",
+ "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc"
+ },
+ "peerDependenciesMeta": {
+ "@types/react": {
+ "optional": true
+ },
+ "@types/react-dom": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/@radix-ui/react-toast": {
+ "version": "1.2.2",
+ "resolved": "https://registry.npmjs.org/@radix-ui/react-toast/-/react-toast-1.2.2.tgz",
+ "integrity": "sha512-Z6pqSzmAP/bFJoqMAston4eSNa+ud44NSZTiZUmUen+IOZ5nBY8kzuU5WDBVyFXPtcW6yUalOHsxM/BP6Sv8ww==",
+ "dependencies": {
+ "@radix-ui/primitive": "1.1.0",
+ "@radix-ui/react-collection": "1.1.0",
+ "@radix-ui/react-compose-refs": "1.1.0",
+ "@radix-ui/react-context": "1.1.1",
+ "@radix-ui/react-dismissable-layer": "1.1.1",
+ "@radix-ui/react-portal": "1.1.2",
+ "@radix-ui/react-presence": "1.1.1",
+ "@radix-ui/react-primitive": "2.0.0",
+ "@radix-ui/react-use-callback-ref": "1.1.0",
+ "@radix-ui/react-use-controllable-state": "1.1.0",
+ "@radix-ui/react-use-layout-effect": "1.1.0",
+ "@radix-ui/react-visually-hidden": "1.1.0"
+ },
+ "peerDependencies": {
+ "@types/react": "*",
+ "@types/react-dom": "*",
+ "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc",
+ "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc"
+ },
+ "peerDependenciesMeta": {
+ "@types/react": {
+ "optional": true
+ },
+ "@types/react-dom": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/@radix-ui/react-tooltip": {
+ "version": "1.1.3",
+ "resolved": "https://registry.npmjs.org/@radix-ui/react-tooltip/-/react-tooltip-1.1.3.tgz",
+ "integrity": "sha512-Z4w1FIS0BqVFI2c1jZvb/uDVJijJjJ2ZMuPV81oVgTZ7g3BZxobplnMVvXtFWgtozdvYJ+MFWtwkM5S2HnAong==",
+ "dependencies": {
+ "@radix-ui/primitive": "1.1.0",
+ "@radix-ui/react-compose-refs": "1.1.0",
+ "@radix-ui/react-context": "1.1.1",
+ "@radix-ui/react-dismissable-layer": "1.1.1",
+ "@radix-ui/react-id": "1.1.0",
+ "@radix-ui/react-popper": "1.2.0",
+ "@radix-ui/react-portal": "1.1.2",
+ "@radix-ui/react-presence": "1.1.1",
+ "@radix-ui/react-primitive": "2.0.0",
+ "@radix-ui/react-slot": "1.1.0",
+ "@radix-ui/react-use-controllable-state": "1.1.0",
+ "@radix-ui/react-visually-hidden": "1.1.0"
+ },
+ "peerDependencies": {
+ "@types/react": "*",
+ "@types/react-dom": "*",
+ "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc",
+ "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc"
+ },
+ "peerDependenciesMeta": {
+ "@types/react": {
+ "optional": true
+ },
+ "@types/react-dom": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/@radix-ui/react-use-callback-ref": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/@radix-ui/react-use-callback-ref/-/react-use-callback-ref-1.1.0.tgz",
+ "integrity": "sha512-CasTfvsy+frcFkbXtSJ2Zu9JHpN8TYKxkgJGWbjiZhFivxaeW7rMeZt7QELGVLaYVfFMsKHjb7Ak0nMEe+2Vfw==",
+ "peerDependencies": {
+ "@types/react": "*",
+ "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc"
+ },
+ "peerDependenciesMeta": {
+ "@types/react": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/@radix-ui/react-use-controllable-state": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/@radix-ui/react-use-controllable-state/-/react-use-controllable-state-1.1.0.tgz",
+ "integrity": "sha512-MtfMVJiSr2NjzS0Aa90NPTnvTSg6C/JLCV7ma0W6+OMV78vd8OyRpID+Ng9LxzsPbLeuBnWBA1Nq30AtBIDChw==",
+ "dependencies": {
+ "@radix-ui/react-use-callback-ref": "1.1.0"
+ },
+ "peerDependencies": {
+ "@types/react": "*",
+ "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc"
+ },
+ "peerDependenciesMeta": {
+ "@types/react": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/@radix-ui/react-use-escape-keydown": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/@radix-ui/react-use-escape-keydown/-/react-use-escape-keydown-1.1.0.tgz",
+ "integrity": "sha512-L7vwWlR1kTTQ3oh7g1O0CBF3YCyyTj8NmhLR+phShpyA50HCfBFKVJTpshm9PzLiKmehsrQzTYTpX9HvmC9rhw==",
+ "dependencies": {
+ "@radix-ui/react-use-callback-ref": "1.1.0"
+ },
+ "peerDependencies": {
+ "@types/react": "*",
+ "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc"
+ },
+ "peerDependenciesMeta": {
+ "@types/react": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/@radix-ui/react-use-layout-effect": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/@radix-ui/react-use-layout-effect/-/react-use-layout-effect-1.1.0.tgz",
+ "integrity": "sha512-+FPE0rOdziWSrH9athwI1R0HDVbWlEhd+FR+aSDk4uWGmSJ9Z54sdZVDQPZAinJhJXwfT+qnj969mCsT2gfm5w==",
+ "peerDependencies": {
+ "@types/react": "*",
+ "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc"
+ },
+ "peerDependenciesMeta": {
+ "@types/react": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/@radix-ui/react-use-previous": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/@radix-ui/react-use-previous/-/react-use-previous-1.1.0.tgz",
+ "integrity": "sha512-Z/e78qg2YFnnXcW88A4JmTtm4ADckLno6F7OXotmkQfeuCVaKuYzqAATPhVzl3delXE7CxIV8shofPn3jPc5Og==",
+ "peerDependencies": {
+ "@types/react": "*",
+ "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc"
+ },
+ "peerDependenciesMeta": {
+ "@types/react": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/@radix-ui/react-use-rect": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/@radix-ui/react-use-rect/-/react-use-rect-1.1.0.tgz",
+ "integrity": "sha512-0Fmkebhr6PiseyZlYAOtLS+nb7jLmpqTrJyv61Pe68MKYW6OWdRE2kI70TaYY27u7H0lajqM3hSMMLFq18Z7nQ==",
+ "dependencies": {
+ "@radix-ui/rect": "1.1.0"
+ },
+ "peerDependencies": {
+ "@types/react": "*",
+ "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc"
+ },
+ "peerDependenciesMeta": {
+ "@types/react": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/@radix-ui/react-use-size": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/@radix-ui/react-use-size/-/react-use-size-1.1.0.tgz",
+ "integrity": "sha512-XW3/vWuIXHa+2Uwcc2ABSfcCledmXhhQPlGbfcRXbiUQI5Icjcg19BGCZVKKInYbvUCut/ufbbLLPFC5cbb1hw==",
+ "dependencies": {
+ "@radix-ui/react-use-layout-effect": "1.1.0"
+ },
+ "peerDependencies": {
+ "@types/react": "*",
+ "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc"
+ },
+ "peerDependenciesMeta": {
+ "@types/react": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/@radix-ui/react-visually-hidden": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/@radix-ui/react-visually-hidden/-/react-visually-hidden-1.1.0.tgz",
+ "integrity": "sha512-N8MDZqtgCgG5S3aV60INAB475osJousYpZ4cTJ2cFbMpdHS5Y6loLTH8LPtkj2QN0x93J30HT/M3qJXM0+lyeQ==",
+ "dependencies": {
+ "@radix-ui/react-primitive": "2.0.0"
+ },
+ "peerDependencies": {
+ "@types/react": "*",
+ "@types/react-dom": "*",
+ "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc",
+ "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc"
+ },
+ "peerDependenciesMeta": {
+ "@types/react": {
+ "optional": true
+ },
+ "@types/react-dom": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/@radix-ui/rect": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/@radix-ui/rect/-/rect-1.1.0.tgz",
+ "integrity": "sha512-A9+lCBZoaMJlVKcRBz2YByCG+Cp2t6nAnMnNba+XiWxnj6r4JUFqfsgwocMBZU9LPtdxC6wB56ySYpc7LQIoJg=="
+ },
+ "node_modules/@rollup/pluginutils": {
+ "version": "5.1.3",
+ "resolved": "https://registry.npmjs.org/@rollup/pluginutils/-/pluginutils-5.1.3.tgz",
+ "integrity": "sha512-Pnsb6f32CD2W3uCaLZIzDmeFyQ2b8UWMFI7xtwUezpcGBDVDW6y9XgAWIlARiGAo6eNF5FK5aQTr0LFyNyqq5A==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@types/estree": "^1.0.0",
+ "estree-walker": "^2.0.2",
+ "picomatch": "^4.0.2"
+ },
+ "engines": {
+ "node": ">=14.0.0"
+ },
+ "peerDependencies": {
+ "rollup": "^1.20.0||^2.0.0||^3.0.0||^4.0.0"
+ },
+ "peerDependenciesMeta": {
+ "rollup": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/@rollup/pluginutils/node_modules/estree-walker": {
+ "version": "2.0.2",
+ "resolved": "https://registry.npmjs.org/estree-walker/-/estree-walker-2.0.2.tgz",
+ "integrity": "sha512-Rfkk/Mp/DL7JVje3u18FxFujQlTNR2q6QfMSMB7AvCBx91NGj/ba3kCfza0f6dVDbw7YlRf/nDrn7pQrCCyQ/w==",
+ "dev": true,
+ "license": "MIT"
+ },
+ "node_modules/@rollup/pluginutils/node_modules/picomatch": {
+ "version": "4.0.2",
+ "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-4.0.2.tgz",
+ "integrity": "sha512-M7BAV6Rlcy5u+m6oPhAPFgJTzAioX/6B0DxyvDlo9l8+T3nLKbrczg2WLUyzd45L8RqfUMyGPzekbMvX2Ldkwg==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">=12"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/jonschlinkert"
+ }
+ },
+ "node_modules/@rollup/rollup-android-arm-eabi": {
+ "version": "4.24.3",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm-eabi/-/rollup-android-arm-eabi-4.24.3.tgz",
+ "integrity": "sha512-ufb2CH2KfBWPJok95frEZZ82LtDl0A6QKTa8MoM+cWwDZvVGl5/jNb79pIhRvAalUu+7LD91VYR0nwRD799HkQ==",
+ "cpu": [
+ "arm"
+ ],
+ "dev": true,
+ "optional": true,
+ "os": [
+ "android"
+ ]
+ },
+ "node_modules/@rollup/rollup-android-arm64": {
+ "version": "4.24.3",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm64/-/rollup-android-arm64-4.24.3.tgz",
+ "integrity": "sha512-iAHpft/eQk9vkWIV5t22V77d90CRofgR2006UiCjHcHJFVI1E0oBkQIAbz+pLtthFw3hWEmVB4ilxGyBf48i2Q==",
+ "cpu": [
+ "arm64"
+ ],
+ "dev": true,
+ "optional": true,
+ "os": [
+ "android"
+ ]
+ },
+ "node_modules/@rollup/rollup-darwin-arm64": {
+ "version": "4.24.3",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-arm64/-/rollup-darwin-arm64-4.24.3.tgz",
+ "integrity": "sha512-QPW2YmkWLlvqmOa2OwrfqLJqkHm7kJCIMq9kOz40Zo9Ipi40kf9ONG5Sz76zszrmIZZ4hgRIkez69YnTHgEz1w==",
+ "cpu": [
+ "arm64"
+ ],
+ "dev": true,
+ "optional": true,
+ "os": [
+ "darwin"
+ ]
+ },
+ "node_modules/@rollup/rollup-darwin-x64": {
+ "version": "4.24.3",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-x64/-/rollup-darwin-x64-4.24.3.tgz",
+ "integrity": "sha512-KO0pN5x3+uZm1ZXeIfDqwcvnQ9UEGN8JX5ufhmgH5Lz4ujjZMAnxQygZAVGemFWn+ZZC0FQopruV4lqmGMshow==",
+ "cpu": [
+ "x64"
+ ],
+ "dev": true,
+ "optional": true,
+ "os": [
+ "darwin"
+ ]
+ },
+ "node_modules/@rollup/rollup-freebsd-arm64": {
+ "version": "4.24.3",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-freebsd-arm64/-/rollup-freebsd-arm64-4.24.3.tgz",
+ "integrity": "sha512-CsC+ZdIiZCZbBI+aRlWpYJMSWvVssPuWqrDy/zi9YfnatKKSLFCe6fjna1grHuo/nVaHG+kiglpRhyBQYRTK4A==",
+ "cpu": [
+ "arm64"
+ ],
+ "dev": true,
+ "optional": true,
+ "os": [
+ "freebsd"
+ ]
+ },
+ "node_modules/@rollup/rollup-freebsd-x64": {
+ "version": "4.24.3",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-freebsd-x64/-/rollup-freebsd-x64-4.24.3.tgz",
+ "integrity": "sha512-F0nqiLThcfKvRQhZEzMIXOQG4EeX61im61VYL1jo4eBxv4aZRmpin6crnBJQ/nWnCsjH5F6J3W6Stdm0mBNqBg==",
+ "cpu": [
+ "x64"
+ ],
+ "dev": true,
+ "optional": true,
+ "os": [
+ "freebsd"
+ ]
+ },
+ "node_modules/@rollup/rollup-linux-arm-gnueabihf": {
+ "version": "4.24.3",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-gnueabihf/-/rollup-linux-arm-gnueabihf-4.24.3.tgz",
+ "integrity": "sha512-KRSFHyE/RdxQ1CSeOIBVIAxStFC/hnBgVcaiCkQaVC+EYDtTe4X7z5tBkFyRoBgUGtB6Xg6t9t2kulnX6wJc6A==",
+ "cpu": [
+ "arm"
+ ],
+ "dev": true,
+ "optional": true,
+ "os": [
+ "linux"
+ ]
+ },
+ "node_modules/@rollup/rollup-linux-arm-musleabihf": {
+ "version": "4.24.3",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-musleabihf/-/rollup-linux-arm-musleabihf-4.24.3.tgz",
+ "integrity": "sha512-h6Q8MT+e05zP5BxEKz0vi0DhthLdrNEnspdLzkoFqGwnmOzakEHSlXfVyA4HJ322QtFy7biUAVFPvIDEDQa6rw==",
+ "cpu": [
+ "arm"
+ ],
+ "dev": true,
+ "optional": true,
+ "os": [
+ "linux"
+ ]
+ },
+ "node_modules/@rollup/rollup-linux-arm64-gnu": {
+ "version": "4.24.3",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-gnu/-/rollup-linux-arm64-gnu-4.24.3.tgz",
+ "integrity": "sha512-fKElSyXhXIJ9pqiYRqisfirIo2Z5pTTve5K438URf08fsypXrEkVmShkSfM8GJ1aUyvjakT+fn2W7Czlpd/0FQ==",
+ "cpu": [
+ "arm64"
+ ],
+ "dev": true,
+ "optional": true,
+ "os": [
+ "linux"
+ ]
+ },
+ "node_modules/@rollup/rollup-linux-arm64-musl": {
+ "version": "4.24.3",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-musl/-/rollup-linux-arm64-musl-4.24.3.tgz",
+ "integrity": "sha512-YlddZSUk8G0px9/+V9PVilVDC6ydMz7WquxozToozSnfFK6wa6ne1ATUjUvjin09jp34p84milxlY5ikueoenw==",
+ "cpu": [
+ "arm64"
+ ],
+ "dev": true,
+ "optional": true,
+ "os": [
+ "linux"
+ ]
+ },
+ "node_modules/@rollup/rollup-linux-powerpc64le-gnu": {
+ "version": "4.24.3",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-powerpc64le-gnu/-/rollup-linux-powerpc64le-gnu-4.24.3.tgz",
+ "integrity": "sha512-yNaWw+GAO8JjVx3s3cMeG5Esz1cKVzz8PkTJSfYzE5u7A+NvGmbVFEHP+BikTIyYWuz0+DX9kaA3pH9Sqxp69g==",
+ "cpu": [
+ "ppc64"
+ ],
+ "dev": true,
+ "optional": true,
+ "os": [
+ "linux"
+ ]
+ },
+ "node_modules/@rollup/rollup-linux-riscv64-gnu": {
+ "version": "4.24.3",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-riscv64-gnu/-/rollup-linux-riscv64-gnu-4.24.3.tgz",
+ "integrity": "sha512-lWKNQfsbpv14ZCtM/HkjCTm4oWTKTfxPmr7iPfp3AHSqyoTz5AgLemYkWLwOBWc+XxBbrU9SCokZP0WlBZM9lA==",
+ "cpu": [
+ "riscv64"
+ ],
+ "dev": true,
+ "optional": true,
+ "os": [
+ "linux"
+ ]
+ },
+ "node_modules/@rollup/rollup-linux-s390x-gnu": {
+ "version": "4.24.3",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-s390x-gnu/-/rollup-linux-s390x-gnu-4.24.3.tgz",
+ "integrity": "sha512-HoojGXTC2CgCcq0Woc/dn12wQUlkNyfH0I1ABK4Ni9YXyFQa86Fkt2Q0nqgLfbhkyfQ6003i3qQk9pLh/SpAYw==",
+ "cpu": [
+ "s390x"
+ ],
+ "dev": true,
+ "optional": true,
+ "os": [
+ "linux"
+ ]
+ },
+ "node_modules/@rollup/rollup-linux-x64-gnu": {
+ "version": "4.24.3",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-gnu/-/rollup-linux-x64-gnu-4.24.3.tgz",
+ "integrity": "sha512-mnEOh4iE4USSccBOtcrjF5nj+5/zm6NcNhbSEfR3Ot0pxBwvEn5QVUXcuOwwPkapDtGZ6pT02xLoPaNv06w7KQ==",
+ "cpu": [
+ "x64"
+ ],
+ "dev": true,
+ "optional": true,
+ "os": [
+ "linux"
+ ]
+ },
+ "node_modules/@rollup/rollup-linux-x64-musl": {
+ "version": "4.24.3",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-musl/-/rollup-linux-x64-musl-4.24.3.tgz",
+ "integrity": "sha512-rMTzawBPimBQkG9NKpNHvquIUTQPzrnPxPbCY1Xt+mFkW7pshvyIS5kYgcf74goxXOQk0CP3EoOC1zcEezKXhw==",
+ "cpu": [
+ "x64"
+ ],
+ "dev": true,
+ "optional": true,
+ "os": [
+ "linux"
+ ]
+ },
+ "node_modules/@rollup/rollup-win32-arm64-msvc": {
+ "version": "4.24.3",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-arm64-msvc/-/rollup-win32-arm64-msvc-4.24.3.tgz",
+ "integrity": "sha512-2lg1CE305xNvnH3SyiKwPVsTVLCg4TmNCF1z7PSHX2uZY2VbUpdkgAllVoISD7JO7zu+YynpWNSKAtOrX3AiuA==",
+ "cpu": [
+ "arm64"
+ ],
+ "dev": true,
+ "optional": true,
+ "os": [
+ "win32"
+ ]
+ },
+ "node_modules/@rollup/rollup-win32-ia32-msvc": {
+ "version": "4.24.3",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-ia32-msvc/-/rollup-win32-ia32-msvc-4.24.3.tgz",
+ "integrity": "sha512-9SjYp1sPyxJsPWuhOCX6F4jUMXGbVVd5obVpoVEi8ClZqo52ViZewA6eFz85y8ezuOA+uJMP5A5zo6Oz4S5rVQ==",
+ "cpu": [
+ "ia32"
+ ],
+ "dev": true,
+ "optional": true,
+ "os": [
+ "win32"
+ ]
+ },
+ "node_modules/@rollup/rollup-win32-x64-msvc": {
+ "version": "4.24.3",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-x64-msvc/-/rollup-win32-x64-msvc-4.24.3.tgz",
+ "integrity": "sha512-HGZgRFFYrMrP3TJlq58nR1xy8zHKId25vhmm5S9jETEfDf6xybPxsavFTJaufe2zgOGYJBskGlj49CwtEuFhWQ==",
+ "cpu": [
+ "x64"
+ ],
+ "dev": true,
+ "optional": true,
+ "os": [
+ "win32"
+ ]
+ },
+ "node_modules/@storybook/addon-actions": {
+ "version": "8.4.2",
+ "resolved": "https://registry.npmjs.org/@storybook/addon-actions/-/addon-actions-8.4.2.tgz",
+ "integrity": "sha512-+hA200XN5aeA4T3jq8IifQq6Y+9FyNQ0Q+blM1L0Tl7WLzBc7B1kHQnKvhSj5pvMSBWc/Q/kY7Ev5t9gdOu13g==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@storybook/global": "^5.0.0",
+ "@types/uuid": "^9.0.1",
+ "dequal": "^2.0.2",
+ "polished": "^4.2.2",
+ "uuid": "^9.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/storybook"
+ },
+ "peerDependencies": {
+ "storybook": "^8.4.2"
+ }
+ },
+ "node_modules/@storybook/addon-backgrounds": {
+ "version": "8.4.2",
+ "resolved": "https://registry.npmjs.org/@storybook/addon-backgrounds/-/addon-backgrounds-8.4.2.tgz",
+ "integrity": "sha512-s4uag5VKuk8q2MSnuNS7Sv+v1/mykzGPXe/zZRW2ammtkdHp8Uy78eQS2G0aiG02chXCX+qQgWMyy5QItDcTFQ==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@storybook/global": "^5.0.0",
+ "memoizerific": "^1.11.3",
+ "ts-dedent": "^2.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/storybook"
+ },
+ "peerDependencies": {
+ "storybook": "^8.4.2"
+ }
+ },
+ "node_modules/@storybook/addon-controls": {
+ "version": "8.4.2",
+ "resolved": "https://registry.npmjs.org/@storybook/addon-controls/-/addon-controls-8.4.2.tgz",
+ "integrity": "sha512-raCbHEj1xl4F3wKH6IdfEXNRaxKpY4QGhjSTE8Pte5iJSVhKG86taLqqRr+4dC7H1/LVMPU1XCGV4mkgDGtyxQ==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@storybook/global": "^5.0.0",
+ "dequal": "^2.0.2",
+ "ts-dedent": "^2.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/storybook"
+ },
+ "peerDependencies": {
+ "storybook": "^8.4.2"
+ }
+ },
+ "node_modules/@storybook/addon-docs": {
+ "version": "8.4.2",
+ "resolved": "https://registry.npmjs.org/@storybook/addon-docs/-/addon-docs-8.4.2.tgz",
+ "integrity": "sha512-jIpykha7hv2Inlrq31ZoYg2QhuCuvcO+Q+uvhT45RDTB+2US/fg3rJINKlw2Djq8RPPOXvty5W0yvE6CrWKhnQ==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@mdx-js/react": "^3.0.0",
+ "@storybook/blocks": "8.4.2",
+ "@storybook/csf-plugin": "8.4.2",
+ "@storybook/react-dom-shim": "8.4.2",
+ "react": "^16.8.0 || ^17.0.0 || ^18.0.0",
+ "react-dom": "^16.8.0 || ^17.0.0 || ^18.0.0",
+ "ts-dedent": "^2.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/storybook"
+ },
+ "peerDependencies": {
+ "storybook": "^8.4.2"
+ }
+ },
+ "node_modules/@storybook/addon-essentials": {
+ "version": "8.4.2",
+ "resolved": "https://registry.npmjs.org/@storybook/addon-essentials/-/addon-essentials-8.4.2.tgz",
+ "integrity": "sha512-+/vfPrXM/GWU3Kbrg92PepwAZr7lOeulTTYF4THK0CL3DfUUlkGNpBPLP5PtjCuIkVrTCjXiIEdVWk47d5m2+w==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@storybook/addon-actions": "8.4.2",
+ "@storybook/addon-backgrounds": "8.4.2",
+ "@storybook/addon-controls": "8.4.2",
+ "@storybook/addon-docs": "8.4.2",
+ "@storybook/addon-highlight": "8.4.2",
+ "@storybook/addon-measure": "8.4.2",
+ "@storybook/addon-outline": "8.4.2",
+ "@storybook/addon-toolbars": "8.4.2",
+ "@storybook/addon-viewport": "8.4.2",
+ "ts-dedent": "^2.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/storybook"
+ },
+ "peerDependencies": {
+ "storybook": "^8.4.2"
+ }
+ },
+ "node_modules/@storybook/addon-highlight": {
+ "version": "8.4.2",
+ "resolved": "https://registry.npmjs.org/@storybook/addon-highlight/-/addon-highlight-8.4.2.tgz",
+ "integrity": "sha512-vTtwp7nyJ09SXrsMnH+pukCjHjRMjQXgHZHxvbrv09uoH8ldQMv9B7u+X+9Wcy/jYSKFz/ng7pWo4b4a2oXHkg==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@storybook/global": "^5.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/storybook"
+ },
+ "peerDependencies": {
+ "storybook": "^8.4.2"
+ }
+ },
+ "node_modules/@storybook/addon-interactions": {
+ "version": "8.4.2",
+ "resolved": "https://registry.npmjs.org/@storybook/addon-interactions/-/addon-interactions-8.4.2.tgz",
+ "integrity": "sha512-+/NTENTApeOcONgFNQ6Olbk0GH3pTDG3w0eh00slCB+2agD1BcVKg8SSlHQV0lQF1cK3vWL/X3jeaxdFLYOjjg==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@storybook/global": "^5.0.0",
+ "@storybook/instrumenter": "8.4.2",
+ "@storybook/test": "8.4.2",
+ "polished": "^4.2.2",
+ "ts-dedent": "^2.2.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/storybook"
+ },
+ "peerDependencies": {
+ "storybook": "^8.4.2"
+ }
+ },
+ "node_modules/@storybook/addon-measure": {
+ "version": "8.4.2",
+ "resolved": "https://registry.npmjs.org/@storybook/addon-measure/-/addon-measure-8.4.2.tgz",
+ "integrity": "sha512-z+j6xQwcUBSpgzl1XDU+xU4YYgLraLMljECW7NvRNyJ/PYixvol8R3wtzWbr+CBpxmvbXjEJCPlF+EjF9/mBWQ==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@storybook/global": "^5.0.0",
+ "tiny-invariant": "^1.3.1"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/storybook"
+ },
+ "peerDependencies": {
+ "storybook": "^8.4.2"
+ }
+ },
+ "node_modules/@storybook/addon-outline": {
+ "version": "8.4.2",
+ "resolved": "https://registry.npmjs.org/@storybook/addon-outline/-/addon-outline-8.4.2.tgz",
+ "integrity": "sha512-oTMlPEyT4CBqzcQbfemoJzJ6yzeRAmvrAx9ssaBcnQQRsKxo0D2Ri/Jmm6SNcR0yBHxYRkvIH+2phLw8aiflCQ==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@storybook/global": "^5.0.0",
+ "ts-dedent": "^2.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/storybook"
+ },
+ "peerDependencies": {
+ "storybook": "^8.4.2"
+ }
+ },
+ "node_modules/@storybook/addon-toolbars": {
+ "version": "8.4.2",
+ "resolved": "https://registry.npmjs.org/@storybook/addon-toolbars/-/addon-toolbars-8.4.2.tgz",
+ "integrity": "sha512-DidzW/NQS224niMJIjcJI2ls83emqygUcS9GYNGgdc5Xwro/TPgGYOXP2qnXgYUxXQTHbrxmIbHdEehxC7CcYQ==",
+ "dev": true,
+ "license": "MIT",
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/storybook"
+ },
+ "peerDependencies": {
+ "storybook": "^8.4.2"
+ }
+ },
+ "node_modules/@storybook/addon-viewport": {
+ "version": "8.4.2",
+ "resolved": "https://registry.npmjs.org/@storybook/addon-viewport/-/addon-viewport-8.4.2.tgz",
+ "integrity": "sha512-qVQ2UaxCNsUSFHnAAAizNPIJ/QwfMg7p5bBdpYROTZXJe+bxVp0rFzZmQgHZ3/sn+lzE4ItM4QEfxkfQUWi1ag==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "memoizerific": "^1.11.3"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/storybook"
+ },
+ "peerDependencies": {
+ "storybook": "^8.4.2"
+ }
+ },
+ "node_modules/@storybook/blocks": {
+ "version": "8.4.2",
+ "resolved": "https://registry.npmjs.org/@storybook/blocks/-/blocks-8.4.2.tgz",
+ "integrity": "sha512-yAAvmOWaD8gIrepOxCh/RxQqd/1xZIwd/V+gsvAhW/thawN+SpI+zK63gmcqAPLX84hJ3Dh5pegRk0SoHNuDVA==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@storybook/csf": "^0.1.11",
+ "@storybook/icons": "^1.2.12",
+ "ts-dedent": "^2.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/storybook"
+ },
+ "peerDependencies": {
+ "react": "^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0-beta",
+ "react-dom": "^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0-beta",
+ "storybook": "^8.4.2"
+ },
+ "peerDependenciesMeta": {
+ "react": {
+ "optional": true
+ },
+ "react-dom": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/@storybook/builder-vite": {
+ "version": "8.4.2",
+ "resolved": "https://registry.npmjs.org/@storybook/builder-vite/-/builder-vite-8.4.2.tgz",
+ "integrity": "sha512-dO5FB5yH1C6tr/kBHn1frvGwp8Pt0D1apgXWkJ5ITWEUfh6WwOqX2fqsWsqaNwE7gP0qn0XgwCIEkI/4Mj55SA==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@storybook/csf-plugin": "8.4.2",
+ "browser-assert": "^1.2.1",
+ "ts-dedent": "^2.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/storybook"
+ },
+ "peerDependencies": {
+ "storybook": "^8.4.2",
+ "vite": "^4.0.0 || ^5.0.0"
+ }
+ },
+ "node_modules/@storybook/components": {
+ "version": "8.4.2",
+ "resolved": "https://registry.npmjs.org/@storybook/components/-/components-8.4.2.tgz",
+ "integrity": "sha512-+W59oF7D73LAxLNmCfFrfs98cH9pyNHK9HlJoO5/lKbK4IdWhhOoqUR/AJ3ueksoLuetFat4DxyE8SN1H4Bvrg==",
+ "dev": true,
+ "license": "MIT",
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/storybook"
+ },
+ "peerDependencies": {
+ "storybook": "^8.2.0 || ^8.3.0-0 || ^8.4.0-0 || ^8.5.0-0 || ^8.6.0-0"
+ }
+ },
+ "node_modules/@storybook/core": {
+ "version": "8.4.2",
+ "resolved": "https://registry.npmjs.org/@storybook/core/-/core-8.4.2.tgz",
+ "integrity": "sha512-hF8GWoUZTjwwuV5j4OLhMHZtZQL/NYcVUBReC2Ba06c8PkFIKqKZwATr1zKd301gQ5Qwcn9WgmZxJTMgdKQtOg==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@storybook/csf": "^0.1.11",
+ "better-opn": "^3.0.2",
+ "browser-assert": "^1.2.1",
+ "esbuild": "^0.18.0 || ^0.19.0 || ^0.20.0 || ^0.21.0 || ^0.22.0 || ^0.23.0 || ^0.24.0",
+ "esbuild-register": "^3.5.0",
+ "jsdoc-type-pratt-parser": "^4.0.0",
+ "process": "^0.11.10",
+ "recast": "^0.23.5",
+ "semver": "^7.6.2",
+ "util": "^0.12.5",
+ "ws": "^8.2.3"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/storybook"
+ },
+ "peerDependencies": {
+ "prettier": "^2 || ^3"
+ },
+ "peerDependenciesMeta": {
+ "prettier": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/@storybook/core/node_modules/semver": {
+ "version": "7.6.3",
+ "resolved": "https://registry.npmjs.org/semver/-/semver-7.6.3.tgz",
+ "integrity": "sha512-oVekP1cKtI+CTDvHWYFUcMtsK/00wmAEfyqKfNdARm8u1wNVhSgaX7A8d4UuIlUI5e84iEwOhs7ZPYRmzU9U6A==",
+ "dev": true,
+ "license": "ISC",
+ "bin": {
+ "semver": "bin/semver.js"
+ },
+ "engines": {
+ "node": ">=10"
+ }
+ },
+ "node_modules/@storybook/csf": {
+ "version": "0.1.11",
+ "resolved": "https://registry.npmjs.org/@storybook/csf/-/csf-0.1.11.tgz",
+ "integrity": "sha512-dHYFQH3mA+EtnCkHXzicbLgsvzYjcDJ1JWsogbItZogkPHgSJM/Wr71uMkcvw8v9mmCyP4NpXJuu6bPoVsOnzg==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "type-fest": "^2.19.0"
+ }
+ },
+ "node_modules/@storybook/csf-plugin": {
+ "version": "8.4.2",
+ "resolved": "https://registry.npmjs.org/@storybook/csf-plugin/-/csf-plugin-8.4.2.tgz",
+ "integrity": "sha512-1f0t6W5xbC1sSAHHs3uXYPIQs2NXAEtIGqn6X9i3xbbub6hDS8PF8BIm7dOjQ8dZOPp7d9ltR64V5CoLlsOigA==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "unplugin": "^1.3.1"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/storybook"
+ },
+ "peerDependencies": {
+ "storybook": "^8.4.2"
+ }
+ },
+ "node_modules/@storybook/csf/node_modules/type-fest": {
+ "version": "2.19.0",
+ "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-2.19.0.tgz",
+ "integrity": "sha512-RAH822pAdBgcNMAfWnCBU3CFZcfZ/i1eZjwFU/dsLKumyuuP3niueg2UAukXYF0E2AAoc82ZSSf9J0WQBinzHA==",
+ "dev": true,
+ "license": "(MIT OR CC0-1.0)",
+ "engines": {
+ "node": ">=12.20"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/@storybook/global": {
+ "version": "5.0.0",
+ "resolved": "https://registry.npmjs.org/@storybook/global/-/global-5.0.0.tgz",
+ "integrity": "sha512-FcOqPAXACP0I3oJ/ws6/rrPT9WGhu915Cg8D02a9YxLo0DE9zI+a9A5gRGvmQ09fiWPukqI8ZAEoQEdWUKMQdQ==",
+ "dev": true,
+ "license": "MIT"
+ },
+ "node_modules/@storybook/icons": {
+ "version": "1.2.12",
+ "resolved": "https://registry.npmjs.org/@storybook/icons/-/icons-1.2.12.tgz",
+ "integrity": "sha512-UxgyK5W3/UV4VrI3dl6ajGfHM4aOqMAkFLWe2KibeQudLf6NJpDrDMSHwZj+3iKC4jFU7dkKbbtH2h/al4sW3Q==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">=14.0.0"
+ },
+ "peerDependencies": {
+ "react": "^16.8.0 || ^17.0.0 || ^18.0.0",
+ "react-dom": "^16.8.0 || ^17.0.0 || ^18.0.0"
+ }
+ },
+ "node_modules/@storybook/instrumenter": {
+ "version": "8.4.2",
+ "resolved": "https://registry.npmjs.org/@storybook/instrumenter/-/instrumenter-8.4.2.tgz",
+ "integrity": "sha512-gPYCZ/0O6gRLI3zmenu2N6QtKzxDZFdT2xf4RWcNUSZyp28RZkRCIgKFMt3fTmvE0yMzAjQyRSkBdrONjQ44HA==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@storybook/global": "^5.0.0",
+ "@vitest/utils": "^2.1.1"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/storybook"
+ },
+ "peerDependencies": {
+ "storybook": "^8.4.2"
+ }
+ },
+ "node_modules/@storybook/manager-api": {
+ "version": "8.4.2",
+ "resolved": "https://registry.npmjs.org/@storybook/manager-api/-/manager-api-8.4.2.tgz",
+ "integrity": "sha512-rhPc4cgQDKDH8NUyRh/ZaJW7QIhR/PO5MNX4xc+vz71sM2nO7ONA/FrgLtCuu4SULdwilEPvGefYvLK0dE+Caw==",
+ "dev": true,
+ "license": "MIT",
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/storybook"
+ },
+ "peerDependencies": {
+ "storybook": "^8.2.0 || ^8.3.0-0 || ^8.4.0-0 || ^8.5.0-0 || ^8.6.0-0"
+ }
+ },
+ "node_modules/@storybook/preview-api": {
+ "version": "8.4.2",
+ "resolved": "https://registry.npmjs.org/@storybook/preview-api/-/preview-api-8.4.2.tgz",
+ "integrity": "sha512-5X/xvIvDPaWJKUBCo5zVeBbbjkhnwcI2KPkuOgrHVRRhuQ5WqD0RYxVtOOFNyQXme7g0nNl5RFNgvT7qv9qGeg==",
+ "dev": true,
+ "license": "MIT",
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/storybook"
+ },
+ "peerDependencies": {
+ "storybook": "^8.2.0 || ^8.3.0-0 || ^8.4.0-0 || ^8.5.0-0 || ^8.6.0-0"
+ }
+ },
+ "node_modules/@storybook/react": {
+ "version": "8.4.2",
+ "resolved": "https://registry.npmjs.org/@storybook/react/-/react-8.4.2.tgz",
+ "integrity": "sha512-rO5/aVKBVhIKENcL7G8ud4QKC5OyWBPCkJIvY6XUHIuhErJy9/4pP+sZ85jypVwx5kq+EqCPF8AEOWjIxB/4/Q==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@storybook/components": "8.4.2",
+ "@storybook/global": "^5.0.0",
+ "@storybook/manager-api": "8.4.2",
+ "@storybook/preview-api": "8.4.2",
+ "@storybook/react-dom-shim": "8.4.2",
+ "@storybook/theming": "8.4.2"
+ },
+ "engines": {
+ "node": ">=18.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/storybook"
+ },
+ "peerDependencies": {
+ "@storybook/test": "8.4.2",
+ "react": "^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0-beta",
+ "react-dom": "^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0-beta",
+ "storybook": "^8.4.2",
+ "typescript": ">= 4.2.x"
+ },
+ "peerDependenciesMeta": {
+ "@storybook/test": {
+ "optional": true
+ },
+ "typescript": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/@storybook/react-dom-shim": {
+ "version": "8.4.2",
+ "resolved": "https://registry.npmjs.org/@storybook/react-dom-shim/-/react-dom-shim-8.4.2.tgz",
+ "integrity": "sha512-FZVTM1f34FpGnf6e3MDIKkz05gmn8H9wEccvQAgr8pEFe8VWfrpVWeUrmatSAfgrCMNXYC1avDend8UX6IM8Fg==",
+ "dev": true,
+ "license": "MIT",
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/storybook"
+ },
+ "peerDependencies": {
+ "react": "^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0-beta",
+ "react-dom": "^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0-beta",
+ "storybook": "^8.4.2"
+ }
+ },
+ "node_modules/@storybook/react-vite": {
+ "version": "8.4.2",
+ "resolved": "https://registry.npmjs.org/@storybook/react-vite/-/react-vite-8.4.2.tgz",
+ "integrity": "sha512-OoXaW/V1AqLggMyniRcnuwmqQ1/OtSn38t31lePX4nDDeJhbGT3ZPldRrwvsLb0EaD3N27uoL+QbAOgsYJIhwA==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@joshwooding/vite-plugin-react-docgen-typescript": "0.3.0",
+ "@rollup/pluginutils": "^5.0.2",
+ "@storybook/builder-vite": "8.4.2",
+ "@storybook/react": "8.4.2",
+ "find-up": "^5.0.0",
+ "magic-string": "^0.30.0",
+ "react-docgen": "^7.0.0",
+ "resolve": "^1.22.8",
+ "tsconfig-paths": "^4.2.0"
+ },
+ "engines": {
+ "node": ">=18.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/storybook"
+ },
+ "peerDependencies": {
+ "react": "^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0-beta",
+ "react-dom": "^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0-beta",
+ "storybook": "^8.4.2",
+ "vite": "^4.0.0 || ^5.0.0"
+ }
+ },
+ "node_modules/@storybook/react-vite/node_modules/resolve": {
+ "version": "1.22.8",
+ "resolved": "https://registry.npmjs.org/resolve/-/resolve-1.22.8.tgz",
+ "integrity": "sha512-oKWePCxqpd6FlLvGV1VU0x7bkPmmCNolxzjMf4NczoDnQcIWrAF+cPtZn5i6n+RfD2d9i0tzpKnG6Yk168yIyw==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "is-core-module": "^2.13.0",
+ "path-parse": "^1.0.7",
+ "supports-preserve-symlinks-flag": "^1.0.0"
+ },
+ "bin": {
+ "resolve": "bin/resolve"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/@storybook/test": {
+ "version": "8.4.2",
+ "resolved": "https://registry.npmjs.org/@storybook/test/-/test-8.4.2.tgz",
+ "integrity": "sha512-MipTdboStv0hsqF2Sw8TZgP0YnxCcDYwxkTOd4hmRzev/7Brtvpi4pqjqh8k98ZCvhrCPAPVIoX5drk+oi3YUA==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@storybook/csf": "^0.1.11",
+ "@storybook/global": "^5.0.0",
+ "@storybook/instrumenter": "8.4.2",
+ "@testing-library/dom": "10.4.0",
+ "@testing-library/jest-dom": "6.5.0",
+ "@testing-library/user-event": "14.5.2",
+ "@vitest/expect": "2.0.5",
+ "@vitest/spy": "2.0.5"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/storybook"
+ },
+ "peerDependencies": {
+ "storybook": "^8.4.2"
+ }
+ },
+ "node_modules/@storybook/test/node_modules/@testing-library/jest-dom": {
+ "version": "6.5.0",
+ "resolved": "https://registry.npmjs.org/@testing-library/jest-dom/-/jest-dom-6.5.0.tgz",
+ "integrity": "sha512-xGGHpBXYSHUUr6XsKBfs85TWlYKpTc37cSBBVrXcib2MkHLboWlkClhWF37JKlDb9KEq3dHs+f2xR7XJEWGBxA==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@adobe/css-tools": "^4.4.0",
+ "aria-query": "^5.0.0",
+ "chalk": "^3.0.0",
+ "css.escape": "^1.5.1",
+ "dom-accessibility-api": "^0.6.3",
+ "lodash": "^4.17.21",
+ "redent": "^3.0.0"
+ },
+ "engines": {
+ "node": ">=14",
+ "npm": ">=6",
+ "yarn": ">=1"
+ }
+ },
+ "node_modules/@storybook/test/node_modules/@vitest/expect": {
+ "version": "2.0.5",
+ "resolved": "https://registry.npmjs.org/@vitest/expect/-/expect-2.0.5.tgz",
+ "integrity": "sha512-yHZtwuP7JZivj65Gxoi8upUN2OzHTi3zVfjwdpu2WrvCZPLwsJ2Ey5ILIPccoW23dd/zQBlJ4/dhi7DWNyXCpA==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@vitest/spy": "2.0.5",
+ "@vitest/utils": "2.0.5",
+ "chai": "^5.1.1",
+ "tinyrainbow": "^1.2.0"
+ },
+ "funding": {
+ "url": "https://opencollective.com/vitest"
+ }
+ },
+ "node_modules/@storybook/test/node_modules/@vitest/pretty-format": {
+ "version": "2.0.5",
+ "resolved": "https://registry.npmjs.org/@vitest/pretty-format/-/pretty-format-2.0.5.tgz",
+ "integrity": "sha512-h8k+1oWHfwTkyTkb9egzwNMfJAEx4veaPSnMeKbVSjp4euqGSbQlm5+6VHwTr7u4FJslVVsUG5nopCaAYdOmSQ==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "tinyrainbow": "^1.2.0"
+ },
+ "funding": {
+ "url": "https://opencollective.com/vitest"
+ }
+ },
+ "node_modules/@storybook/test/node_modules/@vitest/spy": {
+ "version": "2.0.5",
+ "resolved": "https://registry.npmjs.org/@vitest/spy/-/spy-2.0.5.tgz",
+ "integrity": "sha512-c/jdthAhvJdpfVuaexSrnawxZz6pywlTPe84LUB2m/4t3rl2fTo9NFGBG4oWgaD+FTgDDV8hJ/nibT7IfH3JfA==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "tinyspy": "^3.0.0"
+ },
+ "funding": {
+ "url": "https://opencollective.com/vitest"
+ }
+ },
+ "node_modules/@storybook/test/node_modules/@vitest/utils": {
+ "version": "2.0.5",
+ "resolved": "https://registry.npmjs.org/@vitest/utils/-/utils-2.0.5.tgz",
+ "integrity": "sha512-d8HKbqIcya+GR67mkZbrzhS5kKhtp8dQLcmRZLGTscGVg7yImT82cIrhtn2L8+VujWcy6KZweApgNmPsTAO/UQ==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@vitest/pretty-format": "2.0.5",
+ "estree-walker": "^3.0.3",
+ "loupe": "^3.1.1",
+ "tinyrainbow": "^1.2.0"
+ },
+ "funding": {
+ "url": "https://opencollective.com/vitest"
+ }
+ },
+ "node_modules/@storybook/test/node_modules/chalk": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/chalk/-/chalk-3.0.0.tgz",
+ "integrity": "sha512-4D3B6Wf41KOYRFdszmDqMCGq5VV/uMAB273JILmO+3jAlh8X4qDtdtgCR3fxtbLEMzSx22QdhnDcJvu2u1fVwg==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "ansi-styles": "^4.1.0",
+ "supports-color": "^7.1.0"
+ },
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/@storybook/test/node_modules/dom-accessibility-api": {
+ "version": "0.6.3",
+ "resolved": "https://registry.npmjs.org/dom-accessibility-api/-/dom-accessibility-api-0.6.3.tgz",
+ "integrity": "sha512-7ZgogeTnjuHbo+ct10G9Ffp0mif17idi0IyWNVA/wcwcm7NPOD/WEHVP3n7n3MhXqxoIYm8d6MuZohYWIZ4T3w==",
+ "dev": true,
+ "license": "MIT"
+ },
+ "node_modules/@storybook/theming": {
+ "version": "8.4.2",
+ "resolved": "https://registry.npmjs.org/@storybook/theming/-/theming-8.4.2.tgz",
+ "integrity": "sha512-9j4fnu5LcV+qSs1rdwf61Bt14lms0T1LOZkHxGNcS1c1oH+cPS+sxECh2lxtni+mvOAHUlBs9pKhVZzRPdWpvg==",
+ "dev": true,
+ "license": "MIT",
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/storybook"
+ },
+ "peerDependencies": {
+ "storybook": "^8.2.0 || ^8.3.0-0 || ^8.4.0-0 || ^8.5.0-0 || ^8.6.0-0"
+ }
+ },
+ "node_modules/@swc/core": {
+ "version": "1.7.42",
+ "resolved": "https://registry.npmjs.org/@swc/core/-/core-1.7.42.tgz",
+ "integrity": "sha512-iQrRk3SKndQZ4ptJv1rzeQSiCYQIhMjiO97QXOlCcCoaazOLKPnLnXzU4Kv0FuBFyYfG2FE94BoR0XI2BN02qw==",
+ "dev": true,
+ "hasInstallScript": true,
+ "dependencies": {
+ "@swc/counter": "^0.1.3",
+ "@swc/types": "^0.1.13"
+ },
+ "engines": {
+ "node": ">=10"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/swc"
+ },
+ "optionalDependencies": {
+ "@swc/core-darwin-arm64": "1.7.42",
+ "@swc/core-darwin-x64": "1.7.42",
+ "@swc/core-linux-arm-gnueabihf": "1.7.42",
+ "@swc/core-linux-arm64-gnu": "1.7.42",
+ "@swc/core-linux-arm64-musl": "1.7.42",
+ "@swc/core-linux-x64-gnu": "1.7.42",
+ "@swc/core-linux-x64-musl": "1.7.42",
+ "@swc/core-win32-arm64-msvc": "1.7.42",
+ "@swc/core-win32-ia32-msvc": "1.7.42",
+ "@swc/core-win32-x64-msvc": "1.7.42"
+ },
+ "peerDependencies": {
+ "@swc/helpers": "*"
+ },
+ "peerDependenciesMeta": {
+ "@swc/helpers": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/@swc/core-darwin-arm64": {
+ "version": "1.7.42",
+ "resolved": "https://registry.npmjs.org/@swc/core-darwin-arm64/-/core-darwin-arm64-1.7.42.tgz",
+ "integrity": "sha512-fWhaCs2+8GDRIcjExVDEIfbptVrxDqG8oHkESnXgymmvqTWzWei5SOnPNMS8Q+MYsn/b++Y2bDxkcwmq35Bvxg==",
+ "cpu": [
+ "arm64"
+ ],
+ "dev": true,
+ "optional": true,
+ "os": [
+ "darwin"
+ ],
+ "engines": {
+ "node": ">=10"
+ }
+ },
+ "node_modules/@swc/core-darwin-x64": {
+ "version": "1.7.42",
+ "resolved": "https://registry.npmjs.org/@swc/core-darwin-x64/-/core-darwin-x64-1.7.42.tgz",
+ "integrity": "sha512-ZaVHD2bijrlkCyD7NDzLmSK849Jgcx+6DdL4x1dScoz1slJ8GTvLtEu0JOUaaScQwA+cVlhmrmlmi9ssjbRLGQ==",
+ "cpu": [
+ "x64"
+ ],
+ "dev": true,
+ "optional": true,
+ "os": [
+ "darwin"
+ ],
+ "engines": {
+ "node": ">=10"
+ }
+ },
+ "node_modules/@swc/core-linux-arm-gnueabihf": {
+ "version": "1.7.42",
+ "resolved": "https://registry.npmjs.org/@swc/core-linux-arm-gnueabihf/-/core-linux-arm-gnueabihf-1.7.42.tgz",
+ "integrity": "sha512-iF0BJj7hVTbY/vmbvyzVTh/0W80+Q4fbOYschdUM3Bsud39TA+lSaPOefOHywkNH58EQ1z3EAxYcJOWNES7GFQ==",
+ "cpu": [
+ "arm"
+ ],
+ "dev": true,
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "engines": {
+ "node": ">=10"
+ }
+ },
+ "node_modules/@swc/core-linux-arm64-gnu": {
+ "version": "1.7.42",
+ "resolved": "https://registry.npmjs.org/@swc/core-linux-arm64-gnu/-/core-linux-arm64-gnu-1.7.42.tgz",
+ "integrity": "sha512-xGu8j+DOLYTLkVmsfZPJbNPW1EkiWgSucT0nOlz77bLxImukt/0+HVm2hOwHSKuArQ8C3cjahAMY3b/s4VH2ww==",
+ "cpu": [
+ "arm64"
+ ],
+ "dev": true,
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "engines": {
+ "node": ">=10"
+ }
+ },
+ "node_modules/@swc/core-linux-arm64-musl": {
+ "version": "1.7.42",
+ "resolved": "https://registry.npmjs.org/@swc/core-linux-arm64-musl/-/core-linux-arm64-musl-1.7.42.tgz",
+ "integrity": "sha512-qtW3JNO7i1yHEko59xxz+jY38+tYmB96JGzj6XzygMbYJYZDYbrOpXQvKbMGNG3YeTDan7Fp2jD0dlKf7NgDPA==",
+ "cpu": [
+ "arm64"
+ ],
+ "dev": true,
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "engines": {
+ "node": ">=10"
+ }
+ },
+ "node_modules/@swc/core-linux-x64-gnu": {
+ "version": "1.7.42",
+ "resolved": "https://registry.npmjs.org/@swc/core-linux-x64-gnu/-/core-linux-x64-gnu-1.7.42.tgz",
+ "integrity": "sha512-F9WY1TN+hhhtiEzZjRQziNLt36M5YprMeOBHjsLVNqwgflzleSI7ulgnlQECS8c8zESaXj3ksGduAoJYtPC1cA==",
+ "cpu": [
+ "x64"
+ ],
+ "dev": true,
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "engines": {
+ "node": ">=10"
+ }
+ },
+ "node_modules/@swc/core-linux-x64-musl": {
+ "version": "1.7.42",
+ "resolved": "https://registry.npmjs.org/@swc/core-linux-x64-musl/-/core-linux-x64-musl-1.7.42.tgz",
+ "integrity": "sha512-7YMdOaYKLMQ8JGfnmRDwidpLFs/6ka+80zekeM0iCVO48yLrJR36G0QGXzMjKsXI0BPhq+mboZRRENK4JfQnEA==",
+ "cpu": [
+ "x64"
+ ],
+ "dev": true,
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "engines": {
+ "node": ">=10"
+ }
+ },
+ "node_modules/@swc/core-win32-arm64-msvc": {
+ "version": "1.7.42",
+ "resolved": "https://registry.npmjs.org/@swc/core-win32-arm64-msvc/-/core-win32-arm64-msvc-1.7.42.tgz",
+ "integrity": "sha512-C5CYWaIZEyqPl5W/EwcJ/mLBJFHVoUEa/IwWi0b4q2fCXcSCktQGwKXOQ+d67GneiZoiq0HasgcdMmMpGS9YRQ==",
+ "cpu": [
+ "arm64"
+ ],
+ "dev": true,
+ "optional": true,
+ "os": [
+ "win32"
+ ],
+ "engines": {
+ "node": ">=10"
+ }
+ },
+ "node_modules/@swc/core-win32-ia32-msvc": {
+ "version": "1.7.42",
+ "resolved": "https://registry.npmjs.org/@swc/core-win32-ia32-msvc/-/core-win32-ia32-msvc-1.7.42.tgz",
+ "integrity": "sha512-3j47seZ5pO62mbrqvPe1iwhe2BXnM5q7iB+n2xgA38PCGYt0mnaJafqmpCXm/uYZOCMqSNynaoOWCMMZm4sqtA==",
+ "cpu": [
+ "ia32"
+ ],
+ "dev": true,
+ "optional": true,
+ "os": [
+ "win32"
+ ],
+ "engines": {
+ "node": ">=10"
+ }
+ },
+ "node_modules/@swc/core-win32-x64-msvc": {
+ "version": "1.7.42",
+ "resolved": "https://registry.npmjs.org/@swc/core-win32-x64-msvc/-/core-win32-x64-msvc-1.7.42.tgz",
+ "integrity": "sha512-FXl9MdeUogZLGDcLr6QIRdDVkpG0dkN4MLM4dwQ5kcAk+XfKPrQibX6M2kcfhsCx+jtBqtK7hRFReRXPWJZGbA==",
+ "cpu": [
+ "x64"
+ ],
+ "dev": true,
+ "optional": true,
+ "os": [
+ "win32"
+ ],
+ "engines": {
+ "node": ">=10"
+ }
+ },
+ "node_modules/@swc/counter": {
+ "version": "0.1.3",
+ "resolved": "https://registry.npmjs.org/@swc/counter/-/counter-0.1.3.tgz",
+ "integrity": "sha512-e2BR4lsJkkRlKZ/qCHPw9ZaSxc0MVUd7gtbtaB7aMvHeJVYe8sOB8DBZkP2DtISHGSku9sCK6T6cnY0CtXrOCQ==",
+ "dev": true
+ },
+ "node_modules/@swc/types": {
+ "version": "0.1.13",
+ "resolved": "https://registry.npmjs.org/@swc/types/-/types-0.1.13.tgz",
+ "integrity": "sha512-JL7eeCk6zWCbiYQg2xQSdLXQJl8Qoc9rXmG2cEKvHe3CKwMHwHGpfOb8frzNLmbycOo6I51qxnLnn9ESf4I20Q==",
+ "dev": true,
+ "dependencies": {
+ "@swc/counter": "^0.1.3"
+ }
+ },
+ "node_modules/@tanstack/eslint-plugin-router": {
+ "version": "1.77.7",
+ "resolved": "https://registry.npmjs.org/@tanstack/eslint-plugin-router/-/eslint-plugin-router-1.77.7.tgz",
+ "integrity": "sha512-22lpivV3EA+S25dlZehXfl7pbuNkrbzOsS1FylZuXYCG7pq659N42LBovf5YlD8byYTwfAVjBFB01ylFjuG/Bw==",
+ "dev": true,
+ "dependencies": {
+ "@typescript-eslint/utils": "^8.12.1"
+ },
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/tannerlinsley"
+ },
+ "peerDependencies": {
+ "eslint": "^8.57.0 || ^9.0.0"
+ }
+ },
+ "node_modules/@tanstack/history": {
+ "version": "1.61.1",
+ "resolved": "https://registry.npmjs.org/@tanstack/history/-/history-1.61.1.tgz",
+ "integrity": "sha512-2CqERleeqO3hkhJmyJm37tiL3LYgeOpmo8szqdjgtnnG0z7ZpvzkZz6HkfOr9Ca/ha7mhAiouSvLYuLkM37AMg==",
+ "peer": true,
+ "engines": {
+ "node": ">=12"
+ },
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/tannerlinsley"
+ }
+ },
+ "node_modules/@tanstack/query-core": {
+ "version": "5.59.16",
+ "resolved": "https://registry.npmjs.org/@tanstack/query-core/-/query-core-5.59.16.tgz",
+ "integrity": "sha512-crHn+G3ltqb5JG0oUv6q+PMz1m1YkjpASrXTU+sYWW9pLk0t2GybUHNRqYPZWhxgjPaVGC4yp92gSFEJgYEsPw==",
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/tannerlinsley"
+ }
+ },
+ "node_modules/@tanstack/react-query": {
+ "version": "5.59.16",
+ "resolved": "https://registry.npmjs.org/@tanstack/react-query/-/react-query-5.59.16.tgz",
+ "integrity": "sha512-MuyWheG47h6ERd4PKQ6V8gDyBu3ThNG22e1fRVwvq6ap3EqsFhyuxCAwhNP/03m/mLg+DAb0upgbPaX6VB+CkQ==",
+ "dependencies": {
+ "@tanstack/query-core": "5.59.16"
+ },
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/tannerlinsley"
+ },
+ "peerDependencies": {
+ "react": "^18 || ^19"
+ }
+ },
+ "node_modules/@tanstack/react-router": {
+ "version": "1.78.0",
+ "resolved": "https://registry.npmjs.org/@tanstack/react-router/-/react-router-1.78.0.tgz",
+ "integrity": "sha512-yVIi6EJ3HzQxfdeWzfytSYBEELaHgvVL8xUoKWHeShcey4ChegGF0oSXhZ6dTiu8ODu0CkHKInTjgzjrlMO14Q==",
+ "peer": true,
+ "dependencies": {
+ "@tanstack/history": "1.61.1",
+ "@tanstack/react-store": "^0.5.6",
+ "tiny-invariant": "^1.3.3",
+ "tiny-warning": "^1.0.3"
+ },
+ "engines": {
+ "node": ">=12"
+ },
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/tannerlinsley"
+ },
+ "peerDependencies": {
+ "@tanstack/router-generator": "1.78.0",
+ "react": ">=18",
+ "react-dom": ">=18"
+ },
+ "peerDependenciesMeta": {
+ "@tanstack/router-generator": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/@tanstack/react-store": {
+ "version": "0.5.6",
+ "resolved": "https://registry.npmjs.org/@tanstack/react-store/-/react-store-0.5.6.tgz",
+ "integrity": "sha512-SitIpS5jTj28DajjLpWbIX+YetmJL+6PRY0DKKiCGBKfYIqj3ryODQYF3jB3SNoR9ifUA/jFkqbJdBKFtWd+AQ==",
+ "peer": true,
+ "dependencies": {
+ "@tanstack/store": "0.5.5",
+ "use-sync-external-store": "^1.2.2"
+ },
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/tannerlinsley"
+ },
+ "peerDependencies": {
+ "react": "^17.0.0 || ^18.0.0",
+ "react-dom": "^17.0.0 || ^18.0.0"
+ }
+ },
+ "node_modules/@tanstack/react-table": {
+ "version": "8.20.5",
+ "resolved": "https://registry.npmjs.org/@tanstack/react-table/-/react-table-8.20.5.tgz",
+ "integrity": "sha512-WEHopKw3znbUZ61s9i0+i9g8drmDo6asTWbrQh8Us63DAk/M0FkmIqERew6P71HI75ksZ2Pxyuf4vvKh9rAkiA==",
+ "dependencies": {
+ "@tanstack/table-core": "8.20.5"
+ },
+ "engines": {
+ "node": ">=12"
+ },
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/tannerlinsley"
+ },
+ "peerDependencies": {
+ "react": ">=16.8",
+ "react-dom": ">=16.8"
+ }
+ },
+ "node_modules/@tanstack/router-devtools": {
+ "version": "1.78.0",
+ "resolved": "https://registry.npmjs.org/@tanstack/router-devtools/-/router-devtools-1.78.0.tgz",
+ "integrity": "sha512-9NTBXKL3jqHEMPTyy/f4ibDxKL5aEyGMt+SxE4fJUeaG8cZeko8cZwcRuV+a2/5OBrvxzEBMa4enmKY8PvW19Q==",
+ "dev": true,
+ "dependencies": {
+ "clsx": "^2.1.1",
+ "goober": "^2.1.16"
+ },
+ "engines": {
+ "node": ">=12"
+ },
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/tannerlinsley"
+ },
+ "peerDependencies": {
+ "@tanstack/react-router": "^1.78.0",
+ "react": ">=18",
+ "react-dom": ">=18"
+ }
+ },
+ "node_modules/@tanstack/router-generator": {
+ "version": "1.78.0",
+ "resolved": "https://registry.npmjs.org/@tanstack/router-generator/-/router-generator-1.78.0.tgz",
+ "integrity": "sha512-hrP3M/oh+Lvli6oVH2bItXFHsWKHkL758efPKIyfmvp7Fk9Lm+eL0AVcxF8w9KzS+jtlzw0aPYVV7KH7Vu0URA==",
+ "devOptional": true,
+ "dependencies": {
+ "@tanstack/virtual-file-routes": "^1.64.0",
+ "prettier": "^3.3.3",
+ "tsx": "^4.19.2",
+ "zod": "^3.23.8"
+ },
+ "engines": {
+ "node": ">=12"
+ },
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/tannerlinsley"
+ }
+ },
+ "node_modules/@tanstack/router-plugin": {
+ "version": "1.78.0",
+ "resolved": "https://registry.npmjs.org/@tanstack/router-plugin/-/router-plugin-1.78.0.tgz",
+ "integrity": "sha512-ep3FYZzH/DYxZE/Pg4VyiYUqNfvoNXzmvw0hdHliTOxgsJdOs71LKcJokzeLdNijQOgud+CDurTWTknrRBOgeA==",
+ "dev": true,
+ "dependencies": {
+ "@babel/core": "^7.26.0",
+ "@babel/generator": "^7.26.0",
+ "@babel/parser": "^7.26.1",
+ "@babel/plugin-syntax-jsx": "^7.25.9",
+ "@babel/plugin-syntax-typescript": "^7.25.9",
+ "@babel/template": "^7.25.9",
+ "@babel/traverse": "^7.25.9",
+ "@babel/types": "^7.26.0",
+ "@tanstack/router-generator": "^1.78.0",
+ "@tanstack/virtual-file-routes": "^1.64.0",
+ "@types/babel__core": "^7.20.5",
+ "@types/babel__generator": "^7.6.8",
+ "@types/babel__template": "^7.4.4",
+ "@types/babel__traverse": "^7.20.6",
+ "babel-dead-code-elimination": "^1.0.6",
+ "chokidar": "^3.6.0",
+ "unplugin": "^1.12.2",
+ "zod": "^3.23.8"
+ },
+ "engines": {
+ "node": ">=12"
+ },
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/tannerlinsley"
+ },
+ "peerDependencies": {
+ "@rsbuild/core": ">=1.0.2",
+ "vite": ">=5.0.0",
+ "webpack": ">=5.92.0"
+ },
+ "peerDependenciesMeta": {
+ "@rsbuild/core": {
+ "optional": true
+ },
+ "vite": {
+ "optional": true
+ },
+ "webpack": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/@tanstack/router-zod-adapter": {
+ "version": "1.78.0",
+ "resolved": "https://registry.npmjs.org/@tanstack/router-zod-adapter/-/router-zod-adapter-1.78.0.tgz",
+ "integrity": "sha512-AoRY57TJo3ZruHgqBGxTHxTREkLBOV/8LFOommUZ3VXys/zn+ovoJRU+El6YgF7XIeJl1gJQ2WFuOInkuHYOgg==",
+ "engines": {
+ "node": ">=12"
+ },
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/tannerlinsley"
+ },
+ "peerDependencies": {
+ "@tanstack/react-router": ">=1.43.2",
+ "zod": ">=3"
+ }
+ },
+ "node_modules/@tanstack/store": {
+ "version": "0.5.5",
+ "resolved": "https://registry.npmjs.org/@tanstack/store/-/store-0.5.5.tgz",
+ "integrity": "sha512-EOSrgdDAJExbvRZEQ/Xhh9iZchXpMN+ga1Bnk8Nmygzs8TfiE6hbzThF+Pr2G19uHL6+DTDTHhJ8VQiOd7l4tA==",
+ "peer": true,
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/tannerlinsley"
+ }
+ },
+ "node_modules/@tanstack/table-core": {
+ "version": "8.20.5",
+ "resolved": "https://registry.npmjs.org/@tanstack/table-core/-/table-core-8.20.5.tgz",
+ "integrity": "sha512-P9dF7XbibHph2PFRz8gfBKEXEY/HJPOhym8CHmjF8y3q5mWpKx9xtZapXQUWCgkqvsK0R46Azuz+VaxD4Xl+Tg==",
+ "engines": {
+ "node": ">=12"
+ },
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/tannerlinsley"
+ }
+ },
+ "node_modules/@tanstack/virtual-file-routes": {
+ "version": "1.64.0",
+ "resolved": "https://registry.npmjs.org/@tanstack/virtual-file-routes/-/virtual-file-routes-1.64.0.tgz",
+ "integrity": "sha512-soW+gE9QTmMaqXM17r7y1p8NiQVIIECjdTaYla8BKL5Flj030m3KuxEQoiG1XgjtA0O7ayznFz2YvPcXIy3qDg==",
+ "devOptional": true,
+ "engines": {
+ "node": ">=12"
+ },
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/tannerlinsley"
+ }
+ },
+ "node_modules/@testing-library/dom": {
+ "version": "10.4.0",
+ "resolved": "https://registry.npmjs.org/@testing-library/dom/-/dom-10.4.0.tgz",
+ "integrity": "sha512-pemlzrSESWbdAloYml3bAJMEfNh1Z7EduzqPKprCH5S341frlpYnUEW0H72dLxa6IsYr+mPno20GiSm+h9dEdQ==",
+ "dev": true,
+ "dependencies": {
+ "@babel/code-frame": "^7.10.4",
+ "@babel/runtime": "^7.12.5",
+ "@types/aria-query": "^5.0.1",
+ "aria-query": "5.3.0",
+ "chalk": "^4.1.0",
+ "dom-accessibility-api": "^0.5.9",
+ "lz-string": "^1.5.0",
+ "pretty-format": "^27.0.2"
+ },
+ "engines": {
+ "node": ">=18"
+ }
+ },
+ "node_modules/@testing-library/jest-dom": {
+ "version": "6.6.3",
+ "resolved": "https://registry.npmjs.org/@testing-library/jest-dom/-/jest-dom-6.6.3.tgz",
+ "integrity": "sha512-IteBhl4XqYNkM54f4ejhLRJiZNqcSCoXUOG2CPK7qbD322KjQozM4kHQOfkG2oln9b9HTYqs+Sae8vBATubxxA==",
+ "dev": true,
+ "dependencies": {
+ "@adobe/css-tools": "^4.4.0",
+ "aria-query": "^5.0.0",
+ "chalk": "^3.0.0",
+ "css.escape": "^1.5.1",
+ "dom-accessibility-api": "^0.6.3",
+ "lodash": "^4.17.21",
+ "redent": "^3.0.0"
+ },
+ "engines": {
+ "node": ">=14",
+ "npm": ">=6",
+ "yarn": ">=1"
+ }
+ },
+ "node_modules/@testing-library/jest-dom/node_modules/chalk": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/chalk/-/chalk-3.0.0.tgz",
+ "integrity": "sha512-4D3B6Wf41KOYRFdszmDqMCGq5VV/uMAB273JILmO+3jAlh8X4qDtdtgCR3fxtbLEMzSx22QdhnDcJvu2u1fVwg==",
+ "dev": true,
+ "dependencies": {
+ "ansi-styles": "^4.1.0",
+ "supports-color": "^7.1.0"
+ },
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/@testing-library/jest-dom/node_modules/dom-accessibility-api": {
+ "version": "0.6.3",
+ "resolved": "https://registry.npmjs.org/dom-accessibility-api/-/dom-accessibility-api-0.6.3.tgz",
+ "integrity": "sha512-7ZgogeTnjuHbo+ct10G9Ffp0mif17idi0IyWNVA/wcwcm7NPOD/WEHVP3n7n3MhXqxoIYm8d6MuZohYWIZ4T3w==",
+ "dev": true
+ },
+ "node_modules/@testing-library/react": {
+ "version": "16.0.1",
+ "resolved": "https://registry.npmjs.org/@testing-library/react/-/react-16.0.1.tgz",
+ "integrity": "sha512-dSmwJVtJXmku+iocRhWOUFbrERC76TX2Mnf0ATODz8brzAZrMBbzLwQixlBSanZxR6LddK3eiwpSFZgDET1URg==",
+ "dev": true,
+ "dependencies": {
+ "@babel/runtime": "^7.12.5"
+ },
+ "engines": {
+ "node": ">=18"
+ },
+ "peerDependencies": {
+ "@testing-library/dom": "^10.0.0",
+ "@types/react": "^18.0.0",
+ "@types/react-dom": "^18.0.0",
+ "react": "^18.0.0",
+ "react-dom": "^18.0.0"
+ },
+ "peerDependenciesMeta": {
+ "@types/react": {
+ "optional": true
+ },
+ "@types/react-dom": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/@testing-library/user-event": {
+ "version": "14.5.2",
+ "resolved": "https://registry.npmjs.org/@testing-library/user-event/-/user-event-14.5.2.tgz",
+ "integrity": "sha512-YAh82Wh4TIrxYLmfGcixwD18oIjyC1pFQC2Y01F2lzV2HTMiYrI0nze0FD0ocB//CKS/7jIUgae+adPqxK5yCQ==",
+ "dev": true,
+ "engines": {
+ "node": ">=12",
+ "npm": ">=6"
+ },
+ "peerDependencies": {
+ "@testing-library/dom": ">=7.21.4"
+ }
+ },
+ "node_modules/@types/aria-query": {
+ "version": "5.0.4",
+ "resolved": "https://registry.npmjs.org/@types/aria-query/-/aria-query-5.0.4.tgz",
+ "integrity": "sha512-rfT93uj5s0PRL7EzccGMs3brplhcrghnDoV26NqKhCAS1hVo+WdNsPvE/yb6ilfr5hi2MEk6d5EWJTKdxg8jVw==",
+ "dev": true
+ },
+ "node_modules/@types/babel__core": {
+ "version": "7.20.5",
+ "resolved": "https://registry.npmjs.org/@types/babel__core/-/babel__core-7.20.5.tgz",
+ "integrity": "sha512-qoQprZvz5wQFJwMDqeseRXWv3rqMvhgpbXFfVyWhbx9X47POIA6i/+dXefEmZKoAgOaTdaIgNSMqMIU61yRyzA==",
+ "dev": true,
+ "dependencies": {
+ "@babel/parser": "^7.20.7",
+ "@babel/types": "^7.20.7",
+ "@types/babel__generator": "*",
+ "@types/babel__template": "*",
+ "@types/babel__traverse": "*"
+ }
+ },
+ "node_modules/@types/babel__generator": {
+ "version": "7.6.8",
+ "resolved": "https://registry.npmjs.org/@types/babel__generator/-/babel__generator-7.6.8.tgz",
+ "integrity": "sha512-ASsj+tpEDsEiFr1arWrlN6V3mdfjRMZt6LtK/Vp/kreFLnr5QH5+DhvD5nINYZXzwJvXeGq+05iUXcAzVrqWtw==",
+ "dev": true,
+ "dependencies": {
+ "@babel/types": "^7.0.0"
+ }
+ },
+ "node_modules/@types/babel__template": {
+ "version": "7.4.4",
+ "resolved": "https://registry.npmjs.org/@types/babel__template/-/babel__template-7.4.4.tgz",
+ "integrity": "sha512-h/NUaSyG5EyxBIp8YRxo4RMe2/qQgvyowRwVMzhYhBCONbW8PUsg4lkFMrhgZhUe5z3L3MiLDuvyJ/CaPa2A8A==",
+ "dev": true,
+ "dependencies": {
+ "@babel/parser": "^7.1.0",
+ "@babel/types": "^7.0.0"
+ }
+ },
+ "node_modules/@types/babel__traverse": {
+ "version": "7.20.6",
+ "resolved": "https://registry.npmjs.org/@types/babel__traverse/-/babel__traverse-7.20.6.tgz",
+ "integrity": "sha512-r1bzfrm0tomOI8g1SzvCaQHo6Lcv6zu0EA+W2kHrt8dyrHQxGzBBL4kdkzIS+jBMV+EYcMAEAqXqYaLJq5rOZg==",
+ "dev": true,
+ "dependencies": {
+ "@babel/types": "^7.20.7"
+ }
+ },
+ "node_modules/@types/cookie": {
+ "version": "0.6.0",
+ "resolved": "https://registry.npmjs.org/@types/cookie/-/cookie-0.6.0.tgz",
+ "integrity": "sha512-4Kh9a6B2bQciAhf7FSuMRRkUWecJgJu9nPnx3yzpsfXX/c50REIqpHY4C82bXP90qrLtXtkDxTZosYO3UpOwlA==",
+ "dev": true
+ },
+ "node_modules/@types/d3-array": {
+ "version": "3.2.1",
+ "resolved": "https://registry.npmjs.org/@types/d3-array/-/d3-array-3.2.1.tgz",
+ "integrity": "sha512-Y2Jn2idRrLzUfAKV2LyRImR+y4oa2AntrgID95SHJxuMUrkNXmanDSed71sRNZysveJVt1hLLemQZIady0FpEg=="
+ },
+ "node_modules/@types/d3-color": {
+ "version": "3.1.3",
+ "resolved": "https://registry.npmjs.org/@types/d3-color/-/d3-color-3.1.3.tgz",
+ "integrity": "sha512-iO90scth9WAbmgv7ogoq57O9YpKmFBbmoEoCHDB2xMBY0+/KVrqAaCDyCE16dUspeOvIxFFRI+0sEtqDqy2b4A=="
+ },
+ "node_modules/@types/d3-ease": {
+ "version": "3.0.2",
+ "resolved": "https://registry.npmjs.org/@types/d3-ease/-/d3-ease-3.0.2.tgz",
+ "integrity": "sha512-NcV1JjO5oDzoK26oMzbILE6HW7uVXOHLQvHshBUW4UMdZGfiY6v5BeQwh9a9tCzv+CeefZQHJt5SRgK154RtiA=="
+ },
+ "node_modules/@types/d3-interpolate": {
+ "version": "3.0.4",
+ "resolved": "https://registry.npmjs.org/@types/d3-interpolate/-/d3-interpolate-3.0.4.tgz",
+ "integrity": "sha512-mgLPETlrpVV1YRJIglr4Ez47g7Yxjl1lj7YKsiMCb27VJH9W8NVM6Bb9d8kkpG/uAQS5AmbA48q2IAolKKo1MA==",
+ "dependencies": {
+ "@types/d3-color": "*"
+ }
+ },
+ "node_modules/@types/d3-path": {
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/@types/d3-path/-/d3-path-3.1.0.tgz",
+ "integrity": "sha512-P2dlU/q51fkOc/Gfl3Ul9kicV7l+ra934qBFXCFhrZMOL6du1TM0pm1ThYvENukyOn5h9v+yMJ9Fn5JK4QozrQ=="
+ },
+ "node_modules/@types/d3-scale": {
+ "version": "4.0.8",
+ "resolved": "https://registry.npmjs.org/@types/d3-scale/-/d3-scale-4.0.8.tgz",
+ "integrity": "sha512-gkK1VVTr5iNiYJ7vWDI+yUFFlszhNMtVeneJ6lUTKPjprsvLLI9/tgEGiXJOnlINJA8FyA88gfnQsHbybVZrYQ==",
+ "dependencies": {
+ "@types/d3-time": "*"
+ }
+ },
+ "node_modules/@types/d3-shape": {
+ "version": "3.1.6",
+ "resolved": "https://registry.npmjs.org/@types/d3-shape/-/d3-shape-3.1.6.tgz",
+ "integrity": "sha512-5KKk5aKGu2I+O6SONMYSNflgiP0WfZIQvVUMan50wHsLG1G94JlxEVnCpQARfTtzytuY0p/9PXXZb3I7giofIA==",
+ "dependencies": {
+ "@types/d3-path": "*"
+ }
+ },
+ "node_modules/@types/d3-time": {
+ "version": "3.0.3",
+ "resolved": "https://registry.npmjs.org/@types/d3-time/-/d3-time-3.0.3.tgz",
+ "integrity": "sha512-2p6olUZ4w3s+07q3Tm2dbiMZy5pCDfYwtLXXHUnVzXgQlZ/OyPtUz6OL382BkOuGlLXqfT+wqv8Fw2v8/0geBw=="
+ },
+ "node_modules/@types/d3-timer": {
+ "version": "3.0.2",
+ "resolved": "https://registry.npmjs.org/@types/d3-timer/-/d3-timer-3.0.2.tgz",
+ "integrity": "sha512-Ps3T8E8dZDam6fUyNiMkekK3XUsaUEik+idO9/YjPtfj2qruF8tFBXS7XhtE4iIXBLxhmLjP3SXpLhVf21I9Lw=="
+ },
+ "node_modules/@types/doctrine": {
+ "version": "0.0.9",
+ "resolved": "https://registry.npmjs.org/@types/doctrine/-/doctrine-0.0.9.tgz",
+ "integrity": "sha512-eOIHzCUSH7SMfonMG1LsC2f8vxBFtho6NGBznK41R84YzPuvSBzrhEps33IsQiOW9+VL6NQ9DbjQJznk/S4uRA==",
+ "dev": true,
+ "license": "MIT"
+ },
+ "node_modules/@types/estree": {
+ "version": "1.0.6",
+ "resolved": "https://registry.npmjs.org/@types/estree/-/estree-1.0.6.tgz",
+ "integrity": "sha512-AYnb1nQyY49te+VRAVgmzfcgjYS91mY5P0TKUDCLEM+gNnA+3T6rWITXRLYCpahpqSQbN5cE+gHpnPyXjHWxcw==",
+ "dev": true
+ },
+ "node_modules/@types/glob": {
+ "version": "7.2.0",
+ "resolved": "https://registry.npmjs.org/@types/glob/-/glob-7.2.0.tgz",
+ "integrity": "sha512-ZUxbzKl0IfJILTS6t7ip5fQQM/J3TJYubDm3nMbgubNNYS62eXeUpoLUC8/7fJNiFYHTrGPQn7hspDUzIHX3UA==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@types/minimatch": "*",
+ "@types/node": "*"
+ }
+ },
+ "node_modules/@types/json-schema": {
+ "version": "7.0.15",
+ "resolved": "https://registry.npmjs.org/@types/json-schema/-/json-schema-7.0.15.tgz",
+ "integrity": "sha512-5+fP8P8MFNC+AyZCDxrB2pkZFPGzqQWUzpSeuuVLvm8VMcorNYavBqoFcxK8bQz4Qsbn4oUEEem4wDLfcysGHA==",
+ "dev": true
+ },
+ "node_modules/@types/mdx": {
+ "version": "2.0.13",
+ "resolved": "https://registry.npmjs.org/@types/mdx/-/mdx-2.0.13.tgz",
+ "integrity": "sha512-+OWZQfAYyio6YkJb3HLxDrvnx6SWWDbC0zVPfBRzUk0/nqoDyf6dNxQi3eArPe8rJ473nobTMQ/8Zk+LxJ+Yuw==",
+ "dev": true,
+ "license": "MIT"
+ },
+ "node_modules/@types/minimatch": {
+ "version": "5.1.2",
+ "resolved": "https://registry.npmjs.org/@types/minimatch/-/minimatch-5.1.2.tgz",
+ "integrity": "sha512-K0VQKziLUWkVKiRVrx4a40iPaxTUefQmjtkQofBkYRcoaaL/8rhwDWww9qWbrgicNOgnpIsMxyNIUM4+n6dUIA==",
+ "dev": true,
+ "license": "MIT"
+ },
+ "node_modules/@types/node": {
+ "version": "22.8.6",
+ "resolved": "https://registry.npmjs.org/@types/node/-/node-22.8.6.tgz",
+ "integrity": "sha512-tosuJYKrIqjQIlVCM4PEGxOmyg3FCPa/fViuJChnGeEIhjA46oy8FMVoF9su1/v8PNs2a8Q0iFNyOx0uOF91nw==",
+ "dev": true,
+ "dependencies": {
+ "undici-types": "~6.19.8"
+ }
+ },
+ "node_modules/@types/prop-types": {
+ "version": "15.7.13",
+ "resolved": "https://registry.npmjs.org/@types/prop-types/-/prop-types-15.7.13.tgz",
+ "integrity": "sha512-hCZTSvwbzWGvhqxp/RqVqwU999pBf2vp7hzIjiYOsl8wqOmUxkQ6ddw1cV3l8811+kdUFus/q4d1Y3E3SyEifA==",
+ "devOptional": true
+ },
+ "node_modules/@types/react": {
+ "version": "18.3.12",
+ "resolved": "https://registry.npmjs.org/@types/react/-/react-18.3.12.tgz",
+ "integrity": "sha512-D2wOSq/d6Agt28q7rSI3jhU7G6aiuzljDGZ2hTZHIkrTLUI+AF3WMeKkEZ9nN2fkBAlcktT6vcZjDFiIhMYEQw==",
+ "devOptional": true,
+ "dependencies": {
+ "@types/prop-types": "*",
+ "csstype": "^3.0.2"
+ }
+ },
+ "node_modules/@types/react-dom": {
+ "version": "18.3.1",
+ "resolved": "https://registry.npmjs.org/@types/react-dom/-/react-dom-18.3.1.tgz",
+ "integrity": "sha512-qW1Mfv8taImTthu4KoXgDfLuk4bydU6Q/TkADnDWWHwi4NX4BR+LWfTp2sVmTqRrsHvyDDTelgelxJ+SsejKKQ==",
+ "devOptional": true,
+ "dependencies": {
+ "@types/react": "*"
+ }
+ },
+ "node_modules/@types/resolve": {
+ "version": "1.20.6",
+ "resolved": "https://registry.npmjs.org/@types/resolve/-/resolve-1.20.6.tgz",
+ "integrity": "sha512-A4STmOXPhMUtHH+S6ymgE2GiBSMqf4oTvcQZMcHzokuTLVYzXTB8ttjcgxOVaAp2lGwEdzZ0J+cRbbeevQj1UQ==",
+ "dev": true,
+ "license": "MIT"
+ },
+ "node_modules/@types/semver": {
+ "version": "7.5.8",
+ "resolved": "https://registry.npmjs.org/@types/semver/-/semver-7.5.8.tgz",
+ "integrity": "sha512-I8EUhyrgfLrcTkzV3TSsGyl1tSuPrEDzr0yd5m90UgNxQkyDXULk3b6MlQqTCpZpNtWe1K0hzclnZkTcLBe2UQ==",
+ "dev": true
+ },
+ "node_modules/@types/statuses": {
+ "version": "2.0.5",
+ "resolved": "https://registry.npmjs.org/@types/statuses/-/statuses-2.0.5.tgz",
+ "integrity": "sha512-jmIUGWrAiwu3dZpxntxieC+1n/5c3mjrImkmOSQ2NC5uP6cYO4aAZDdSmRcI5C1oiTmqlZGHC+/NmJrKogbP5A==",
+ "dev": true
+ },
+ "node_modules/@types/tough-cookie": {
+ "version": "4.0.5",
+ "resolved": "https://registry.npmjs.org/@types/tough-cookie/-/tough-cookie-4.0.5.tgz",
+ "integrity": "sha512-/Ad8+nIOV7Rl++6f1BdKxFSMgmoqEoYbHRpPcx3JEfv8VRsQe9Z4mCXeJBzxs7mbHY/XOZZuXlRNfhpVPbs6ZA==",
+ "dev": true
+ },
+ "node_modules/@types/uuid": {
+ "version": "9.0.8",
+ "resolved": "https://registry.npmjs.org/@types/uuid/-/uuid-9.0.8.tgz",
+ "integrity": "sha512-jg+97EGIcY9AGHJJRaaPVgetKDsrTgbRjQ5Msgjh/DQKEFl0DtyRr/VCOyD1T2R1MNeWPK/u7JoGhlDZnKBAfA==",
+ "dev": true,
+ "license": "MIT"
+ },
+ "node_modules/@typescript-eslint/eslint-plugin": {
+ "version": "8.12.2",
+ "resolved": "https://registry.npmjs.org/@typescript-eslint/eslint-plugin/-/eslint-plugin-8.12.2.tgz",
+ "integrity": "sha512-gQxbxM8mcxBwaEmWdtLCIGLfixBMHhQjBqR8sVWNTPpcj45WlYL2IObS/DNMLH1DBP0n8qz+aiiLTGfopPEebw==",
+ "dev": true,
+ "dependencies": {
+ "@eslint-community/regexpp": "^4.10.0",
+ "@typescript-eslint/scope-manager": "8.12.2",
+ "@typescript-eslint/type-utils": "8.12.2",
+ "@typescript-eslint/utils": "8.12.2",
+ "@typescript-eslint/visitor-keys": "8.12.2",
+ "graphemer": "^1.4.0",
+ "ignore": "^5.3.1",
+ "natural-compare": "^1.4.0",
+ "ts-api-utils": "^1.3.0"
+ },
+ "engines": {
+ "node": "^18.18.0 || ^20.9.0 || >=21.1.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/typescript-eslint"
+ },
+ "peerDependencies": {
+ "@typescript-eslint/parser": "^8.0.0 || ^8.0.0-alpha.0",
+ "eslint": "^8.57.0 || ^9.0.0"
+ },
+ "peerDependenciesMeta": {
+ "typescript": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/@typescript-eslint/parser": {
+ "version": "8.12.2",
+ "resolved": "https://registry.npmjs.org/@typescript-eslint/parser/-/parser-8.12.2.tgz",
+ "integrity": "sha512-MrvlXNfGPLH3Z+r7Tk+Z5moZAc0dzdVjTgUgwsdGweH7lydysQsnSww3nAmsq8blFuRD5VRlAr9YdEFw3e6PBw==",
+ "dev": true,
+ "dependencies": {
+ "@typescript-eslint/scope-manager": "8.12.2",
+ "@typescript-eslint/types": "8.12.2",
+ "@typescript-eslint/typescript-estree": "8.12.2",
+ "@typescript-eslint/visitor-keys": "8.12.2",
+ "debug": "^4.3.4"
+ },
+ "engines": {
+ "node": "^18.18.0 || ^20.9.0 || >=21.1.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/typescript-eslint"
+ },
+ "peerDependencies": {
+ "eslint": "^8.57.0 || ^9.0.0"
+ },
+ "peerDependenciesMeta": {
+ "typescript": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/@typescript-eslint/scope-manager": {
+ "version": "8.12.2",
+ "resolved": "https://registry.npmjs.org/@typescript-eslint/scope-manager/-/scope-manager-8.12.2.tgz",
+ "integrity": "sha512-gPLpLtrj9aMHOvxJkSbDBmbRuYdtiEbnvO25bCMza3DhMjTQw0u7Y1M+YR5JPbMsXXnSPuCf5hfq0nEkQDL/JQ==",
+ "dev": true,
+ "dependencies": {
+ "@typescript-eslint/types": "8.12.2",
+ "@typescript-eslint/visitor-keys": "8.12.2"
+ },
+ "engines": {
+ "node": "^18.18.0 || ^20.9.0 || >=21.1.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/typescript-eslint"
+ }
+ },
+ "node_modules/@typescript-eslint/type-utils": {
+ "version": "8.12.2",
+ "resolved": "https://registry.npmjs.org/@typescript-eslint/type-utils/-/type-utils-8.12.2.tgz",
+ "integrity": "sha512-bwuU4TAogPI+1q/IJSKuD4shBLc/d2vGcRT588q+jzayQyjVK2X6v/fbR4InY2U2sgf8MEvVCqEWUzYzgBNcGQ==",
+ "dev": true,
+ "dependencies": {
+ "@typescript-eslint/typescript-estree": "8.12.2",
+ "@typescript-eslint/utils": "8.12.2",
+ "debug": "^4.3.4",
+ "ts-api-utils": "^1.3.0"
+ },
+ "engines": {
+ "node": "^18.18.0 || ^20.9.0 || >=21.1.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/typescript-eslint"
+ },
+ "peerDependenciesMeta": {
+ "typescript": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/@typescript-eslint/types": {
+ "version": "8.12.2",
+ "resolved": "https://registry.npmjs.org/@typescript-eslint/types/-/types-8.12.2.tgz",
+ "integrity": "sha512-VwDwMF1SZ7wPBUZwmMdnDJ6sIFk4K4s+ALKLP6aIQsISkPv8jhiw65sAK6SuWODN/ix+m+HgbYDkH+zLjrzvOA==",
+ "dev": true,
+ "engines": {
+ "node": "^18.18.0 || ^20.9.0 || >=21.1.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/typescript-eslint"
+ }
+ },
+ "node_modules/@typescript-eslint/typescript-estree": {
+ "version": "8.12.2",
+ "resolved": "https://registry.npmjs.org/@typescript-eslint/typescript-estree/-/typescript-estree-8.12.2.tgz",
+ "integrity": "sha512-mME5MDwGe30Pq9zKPvyduyU86PH7aixwqYR2grTglAdB+AN8xXQ1vFGpYaUSJ5o5P/5znsSBeNcs5g5/2aQwow==",
+ "dev": true,
+ "dependencies": {
+ "@typescript-eslint/types": "8.12.2",
+ "@typescript-eslint/visitor-keys": "8.12.2",
+ "debug": "^4.3.4",
+ "fast-glob": "^3.3.2",
+ "is-glob": "^4.0.3",
+ "minimatch": "^9.0.4",
+ "semver": "^7.6.0",
+ "ts-api-utils": "^1.3.0"
+ },
+ "engines": {
+ "node": "^18.18.0 || ^20.9.0 || >=21.1.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/typescript-eslint"
+ },
+ "peerDependenciesMeta": {
+ "typescript": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/@typescript-eslint/typescript-estree/node_modules/brace-expansion": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.1.tgz",
+ "integrity": "sha512-XnAIvQ8eM+kC6aULx6wuQiwVsnzsi9d3WxzV3FpWTGA19F621kwdbsAcFKXgKUHZWsy+mY6iL1sHTxWEFCytDA==",
+ "dev": true,
+ "dependencies": {
+ "balanced-match": "^1.0.0"
+ }
+ },
+ "node_modules/@typescript-eslint/typescript-estree/node_modules/minimatch": {
+ "version": "9.0.5",
+ "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-9.0.5.tgz",
+ "integrity": "sha512-G6T0ZX48xgozx7587koeX9Ys2NYy6Gmv//P89sEte9V9whIapMNF4idKxnW2QtCcLiTWlb/wfCabAtAFWhhBow==",
+ "dev": true,
+ "dependencies": {
+ "brace-expansion": "^2.0.1"
+ },
+ "engines": {
+ "node": ">=16 || 14 >=14.17"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/isaacs"
+ }
+ },
+ "node_modules/@typescript-eslint/typescript-estree/node_modules/semver": {
+ "version": "7.6.3",
+ "resolved": "https://registry.npmjs.org/semver/-/semver-7.6.3.tgz",
+ "integrity": "sha512-oVekP1cKtI+CTDvHWYFUcMtsK/00wmAEfyqKfNdARm8u1wNVhSgaX7A8d4UuIlUI5e84iEwOhs7ZPYRmzU9U6A==",
+ "dev": true,
+ "bin": {
+ "semver": "bin/semver.js"
+ },
+ "engines": {
+ "node": ">=10"
+ }
+ },
+ "node_modules/@typescript-eslint/utils": {
+ "version": "8.12.2",
+ "resolved": "https://registry.npmjs.org/@typescript-eslint/utils/-/utils-8.12.2.tgz",
+ "integrity": "sha512-UTTuDIX3fkfAz6iSVa5rTuSfWIYZ6ATtEocQ/umkRSyC9O919lbZ8dcH7mysshrCdrAM03skJOEYaBugxN+M6A==",
+ "dev": true,
+ "dependencies": {
+ "@eslint-community/eslint-utils": "^4.4.0",
+ "@typescript-eslint/scope-manager": "8.12.2",
+ "@typescript-eslint/types": "8.12.2",
+ "@typescript-eslint/typescript-estree": "8.12.2"
+ },
+ "engines": {
+ "node": "^18.18.0 || ^20.9.0 || >=21.1.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/typescript-eslint"
+ },
+ "peerDependencies": {
+ "eslint": "^8.57.0 || ^9.0.0"
+ }
+ },
+ "node_modules/@typescript-eslint/visitor-keys": {
+ "version": "8.12.2",
+ "resolved": "https://registry.npmjs.org/@typescript-eslint/visitor-keys/-/visitor-keys-8.12.2.tgz",
+ "integrity": "sha512-PChz8UaKQAVNHghsHcPyx1OMHoFRUEA7rJSK/mDhdq85bk+PLsUHUBqTQTFt18VJZbmxBovM65fezlheQRsSDA==",
+ "dev": true,
+ "dependencies": {
+ "@typescript-eslint/types": "8.12.2",
+ "eslint-visitor-keys": "^3.4.3"
+ },
+ "engines": {
+ "node": "^18.18.0 || ^20.9.0 || >=21.1.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/typescript-eslint"
+ }
+ },
+ "node_modules/@typescript-eslint/visitor-keys/node_modules/eslint-visitor-keys": {
+ "version": "3.4.3",
+ "resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-3.4.3.tgz",
+ "integrity": "sha512-wpc+LXeiyiisxPlEkUzU6svyS1frIO3Mgxj1fdy7Pm8Ygzguax2N3Fa/D/ag1WqbOprdI+uY6wMUl8/a2G+iag==",
+ "dev": true,
+ "engines": {
+ "node": "^12.22.0 || ^14.17.0 || >=16.0.0"
+ },
+ "funding": {
+ "url": "https://opencollective.com/eslint"
+ }
+ },
+ "node_modules/@uiw/codemirror-extensions-basic-setup": {
+ "version": "4.23.6",
+ "resolved": "https://registry.npmjs.org/@uiw/codemirror-extensions-basic-setup/-/codemirror-extensions-basic-setup-4.23.6.tgz",
+ "integrity": "sha512-bvtq8IOvdkLJMhoJBRGPEzU51fMpPDwEhcAHp9xCR05MtbIokQgsnLXrmD1aZm6e7s/3q47H+qdSfAAkR5MkLA==",
+ "dependencies": {
+ "@codemirror/autocomplete": "^6.0.0",
+ "@codemirror/commands": "^6.0.0",
+ "@codemirror/language": "^6.0.0",
+ "@codemirror/lint": "^6.0.0",
+ "@codemirror/search": "^6.0.0",
+ "@codemirror/state": "^6.0.0",
+ "@codemirror/view": "^6.0.0"
+ },
+ "funding": {
+ "url": "https://jaywcjlove.github.io/#/sponsor"
+ },
+ "peerDependencies": {
+ "@codemirror/autocomplete": ">=6.0.0",
+ "@codemirror/commands": ">=6.0.0",
+ "@codemirror/language": ">=6.0.0",
+ "@codemirror/lint": ">=6.0.0",
+ "@codemirror/search": ">=6.0.0",
+ "@codemirror/state": ">=6.0.0",
+ "@codemirror/view": ">=6.0.0"
+ }
+ },
+ "node_modules/@uiw/react-codemirror": {
+ "version": "4.23.6",
+ "resolved": "https://registry.npmjs.org/@uiw/react-codemirror/-/react-codemirror-4.23.6.tgz",
+ "integrity": "sha512-caYKGV6TfGLRV1HHD3p0G3FiVzKL1go7wes5XT2nWjB0+dTdyzyb81MKRSacptgZcotujfNO6QXn65uhETRAMw==",
+ "dependencies": {
+ "@babel/runtime": "^7.18.6",
+ "@codemirror/commands": "^6.1.0",
+ "@codemirror/state": "^6.1.1",
+ "@codemirror/theme-one-dark": "^6.0.0",
+ "@uiw/codemirror-extensions-basic-setup": "4.23.6",
+ "codemirror": "^6.0.0"
+ },
+ "funding": {
+ "url": "https://jaywcjlove.github.io/#/sponsor"
+ },
+ "peerDependencies": {
+ "@babel/runtime": ">=7.11.0",
+ "@codemirror/state": ">=6.0.0",
+ "@codemirror/theme-one-dark": ">=6.0.0",
+ "@codemirror/view": ">=6.0.0",
+ "codemirror": ">=6.0.0",
+ "react": ">=16.8.0",
+ "react-dom": ">=16.8.0"
+ }
+ },
+ "node_modules/@vitejs/plugin-react-swc": {
+ "version": "3.7.1",
+ "resolved": "https://registry.npmjs.org/@vitejs/plugin-react-swc/-/plugin-react-swc-3.7.1.tgz",
+ "integrity": "sha512-vgWOY0i1EROUK0Ctg1hwhtC3SdcDjZcdit4Ups4aPkDcB1jYhmo+RMYWY87cmXMhvtD5uf8lV89j2w16vkdSVg==",
+ "dev": true,
+ "dependencies": {
+ "@swc/core": "^1.7.26"
+ },
+ "peerDependencies": {
+ "vite": "^4 || ^5"
+ }
+ },
+ "node_modules/@vitest/expect": {
+ "version": "2.1.4",
+ "resolved": "https://registry.npmjs.org/@vitest/expect/-/expect-2.1.4.tgz",
+ "integrity": "sha512-DOETT0Oh1avie/D/o2sgMHGrzYUFFo3zqESB2Hn70z6QB1HrS2IQ9z5DfyTqU8sg4Bpu13zZe9V4+UTNQlUeQA==",
+ "dev": true,
+ "dependencies": {
+ "@vitest/spy": "2.1.4",
+ "@vitest/utils": "2.1.4",
+ "chai": "^5.1.2",
+ "tinyrainbow": "^1.2.0"
+ },
+ "funding": {
+ "url": "https://opencollective.com/vitest"
+ }
+ },
+ "node_modules/@vitest/mocker": {
+ "version": "2.1.4",
+ "resolved": "https://registry.npmjs.org/@vitest/mocker/-/mocker-2.1.4.tgz",
+ "integrity": "sha512-Ky/O1Lc0QBbutJdW0rqLeFNbuLEyS+mIPiNdlVlp2/yhJ0SbyYqObS5IHdhferJud8MbbwMnexg4jordE5cCoQ==",
+ "dev": true,
+ "dependencies": {
+ "@vitest/spy": "2.1.4",
+ "estree-walker": "^3.0.3",
+ "magic-string": "^0.30.12"
+ },
+ "funding": {
+ "url": "https://opencollective.com/vitest"
+ },
+ "peerDependencies": {
+ "msw": "^2.4.9",
+ "vite": "^5.0.0"
+ },
+ "peerDependenciesMeta": {
+ "msw": {
+ "optional": true
+ },
+ "vite": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/@vitest/pretty-format": {
+ "version": "2.1.4",
+ "resolved": "https://registry.npmjs.org/@vitest/pretty-format/-/pretty-format-2.1.4.tgz",
+ "integrity": "sha512-L95zIAkEuTDbUX1IsjRl+vyBSLh3PwLLgKpghl37aCK9Jvw0iP+wKwIFhfjdUtA2myLgjrG6VU6JCFLv8q/3Ww==",
+ "dev": true,
+ "dependencies": {
+ "tinyrainbow": "^1.2.0"
+ },
+ "funding": {
+ "url": "https://opencollective.com/vitest"
+ }
+ },
+ "node_modules/@vitest/runner": {
+ "version": "2.1.4",
+ "resolved": "https://registry.npmjs.org/@vitest/runner/-/runner-2.1.4.tgz",
+ "integrity": "sha512-sKRautINI9XICAMl2bjxQM8VfCMTB0EbsBc/EDFA57V6UQevEKY/TOPOF5nzcvCALltiLfXWbq4MaAwWx/YxIA==",
+ "dev": true,
+ "dependencies": {
+ "@vitest/utils": "2.1.4",
+ "pathe": "^1.1.2"
+ },
+ "funding": {
+ "url": "https://opencollective.com/vitest"
+ }
+ },
+ "node_modules/@vitest/snapshot": {
+ "version": "2.1.4",
+ "resolved": "https://registry.npmjs.org/@vitest/snapshot/-/snapshot-2.1.4.tgz",
+ "integrity": "sha512-3Kab14fn/5QZRog5BPj6Rs8dc4B+mim27XaKWFWHWA87R56AKjHTGcBFKpvZKDzC4u5Wd0w/qKsUIio3KzWW4Q==",
+ "dev": true,
+ "dependencies": {
+ "@vitest/pretty-format": "2.1.4",
+ "magic-string": "^0.30.12",
+ "pathe": "^1.1.2"
+ },
+ "funding": {
+ "url": "https://opencollective.com/vitest"
+ }
+ },
+ "node_modules/@vitest/spy": {
+ "version": "2.1.4",
+ "resolved": "https://registry.npmjs.org/@vitest/spy/-/spy-2.1.4.tgz",
+ "integrity": "sha512-4JOxa+UAizJgpZfaCPKK2smq9d8mmjZVPMt2kOsg/R8QkoRzydHH1qHxIYNvr1zlEaFj4SXiaaJWxq/LPLKaLg==",
+ "dev": true,
+ "dependencies": {
+ "tinyspy": "^3.0.2"
+ },
+ "funding": {
+ "url": "https://opencollective.com/vitest"
+ }
+ },
+ "node_modules/@vitest/utils": {
+ "version": "2.1.4",
+ "resolved": "https://registry.npmjs.org/@vitest/utils/-/utils-2.1.4.tgz",
+ "integrity": "sha512-MXDnZn0Awl2S86PSNIim5PWXgIAx8CIkzu35mBdSApUip6RFOGXBCf3YFyeEu8n1IHk4bWD46DeYFu9mQlFIRg==",
+ "dev": true,
+ "dependencies": {
+ "@vitest/pretty-format": "2.1.4",
+ "loupe": "^3.1.2",
+ "tinyrainbow": "^1.2.0"
+ },
+ "funding": {
+ "url": "https://opencollective.com/vitest"
+ }
+ },
+ "node_modules/acorn": {
+ "version": "8.14.0",
+ "resolved": "https://registry.npmjs.org/acorn/-/acorn-8.14.0.tgz",
+ "integrity": "sha512-cl669nCJTZBsL97OF4kUQm5g5hC2uihk0NxY3WENAC0TYdILVkAyHymAntgxGkl7K+t0cXIrH5siy5S4XkFycA==",
+ "dev": true,
+ "bin": {
+ "acorn": "bin/acorn"
+ },
+ "engines": {
+ "node": ">=0.4.0"
+ }
+ },
+ "node_modules/acorn-jsx": {
+ "version": "5.3.2",
+ "resolved": "https://registry.npmjs.org/acorn-jsx/-/acorn-jsx-5.3.2.tgz",
+ "integrity": "sha512-rq9s+JNhf0IChjtDXxllJ7g41oZk5SlXtp0LHwyA5cejwn7vKmKp4pPri6YEePv2PU65sAsegbXtIinmDFDXgQ==",
+ "dev": true,
+ "peerDependencies": {
+ "acorn": "^6.0.0 || ^7.0.0 || ^8.0.0"
+ }
+ },
+ "node_modules/agent-base": {
+ "version": "7.1.1",
+ "resolved": "https://registry.npmjs.org/agent-base/-/agent-base-7.1.1.tgz",
+ "integrity": "sha512-H0TSyFNDMomMNJQBn8wFV5YC/2eJ+VXECwOadZJT554xP6cODZHPX3H9QMQECxvrgiSOP1pHjy1sMWQVYJOUOA==",
+ "dev": true,
+ "dependencies": {
+ "debug": "^4.3.4"
+ },
+ "engines": {
+ "node": ">= 14"
+ }
+ },
+ "node_modules/ajv": {
+ "version": "6.12.6",
+ "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz",
+ "integrity": "sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==",
+ "dev": true,
+ "dependencies": {
+ "fast-deep-equal": "^3.1.1",
+ "fast-json-stable-stringify": "^2.0.0",
+ "json-schema-traverse": "^0.4.1",
+ "uri-js": "^4.2.2"
+ },
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/epoberezkin"
+ }
+ },
+ "node_modules/ansi-escapes": {
+ "version": "4.3.2",
+ "resolved": "https://registry.npmjs.org/ansi-escapes/-/ansi-escapes-4.3.2.tgz",
+ "integrity": "sha512-gKXj5ALrKWQLsYG9jlTRmR/xKluxHV+Z9QEwNIgCfM1/uwPMCuzVVnh5mwTd+OuBZcwSIMbqssNWRm1lE51QaQ==",
+ "dev": true,
+ "dependencies": {
+ "type-fest": "^0.21.3"
+ },
+ "engines": {
+ "node": ">=8"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/ansi-escapes/node_modules/type-fest": {
+ "version": "0.21.3",
+ "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.21.3.tgz",
+ "integrity": "sha512-t0rzBq87m3fVcduHDUFhKmyyX+9eo6WQjZvf51Ea/M0Q7+T374Jp1aUiyUl0GKxp8M/OETVHSDvmkyPgvX+X2w==",
+ "dev": true,
+ "engines": {
+ "node": ">=10"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/ansi-regex": {
+ "version": "6.1.0",
+ "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-6.1.0.tgz",
+ "integrity": "sha512-7HSX4QQb4CspciLpVFwyRe79O3xsIZDDLER21kERQ71oaPodF8jL725AgJMFAYbooIqolJoRLuM81SpeUkpkvA==",
+ "engines": {
+ "node": ">=12"
+ },
+ "funding": {
+ "url": "https://github.com/chalk/ansi-regex?sponsor=1"
+ }
+ },
+ "node_modules/ansi-styles": {
+ "version": "4.3.0",
+ "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz",
+ "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==",
+ "dependencies": {
+ "color-convert": "^2.0.1"
+ },
+ "engines": {
+ "node": ">=8"
+ },
+ "funding": {
+ "url": "https://github.com/chalk/ansi-styles?sponsor=1"
+ }
+ },
+ "node_modules/any-promise": {
+ "version": "1.3.0",
+ "resolved": "https://registry.npmjs.org/any-promise/-/any-promise-1.3.0.tgz",
+ "integrity": "sha512-7UvmKalWRt1wgjL1RrGxoSJW/0QZFIegpeGvZG9kjp8vrRu55XTHbwnqq2GpXm9uLbcuhxm3IqX9OB4MZR1b2A=="
+ },
+ "node_modules/anymatch": {
+ "version": "3.1.3",
+ "resolved": "https://registry.npmjs.org/anymatch/-/anymatch-3.1.3.tgz",
+ "integrity": "sha512-KMReFUr0B4t+D+OBkjR3KYqvocp2XaSzO55UcB6mgQMd3KbcE+mWTyvVV7D/zsdEbNnV6acZUutkiHQXvTr1Rw==",
+ "dependencies": {
+ "normalize-path": "^3.0.0",
+ "picomatch": "^2.0.4"
+ },
+ "engines": {
+ "node": ">= 8"
+ }
+ },
+ "node_modules/arg": {
+ "version": "5.0.2",
+ "resolved": "https://registry.npmjs.org/arg/-/arg-5.0.2.tgz",
+ "integrity": "sha512-PYjyFOLKQ9y57JvQ6QLo8dAgNqswh8M1RMJYdQduT6xbWSgK36P/Z/v+p888pM69jMMfS8Xd8F6I1kQ/I9HUGg=="
+ },
+ "node_modules/argparse": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/argparse/-/argparse-2.0.1.tgz",
+ "integrity": "sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==",
+ "dev": true
+ },
+ "node_modules/aria-hidden": {
+ "version": "1.2.4",
+ "resolved": "https://registry.npmjs.org/aria-hidden/-/aria-hidden-1.2.4.tgz",
+ "integrity": "sha512-y+CcFFwelSXpLZk/7fMB2mUbGtX9lKycf1MWJ7CaTIERyitVlyQx6C+sxcROU2BAJ24OiZyK+8wj2i8AlBoS3A==",
+ "dependencies": {
+ "tslib": "^2.0.0"
+ },
+ "engines": {
+ "node": ">=10"
+ }
+ },
+ "node_modules/aria-query": {
+ "version": "5.3.0",
+ "resolved": "https://registry.npmjs.org/aria-query/-/aria-query-5.3.0.tgz",
+ "integrity": "sha512-b0P0sZPKtyu8HkeRAfCq0IfURZK+SuwMjY1UXGBU27wpAiTwQAIlq56IbIO+ytk/JjS1fMR14ee5WBBfKi5J6A==",
+ "dev": true,
+ "dependencies": {
+ "dequal": "^2.0.3"
+ }
+ },
+ "node_modules/array-buffer-byte-length": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/array-buffer-byte-length/-/array-buffer-byte-length-1.0.1.tgz",
+ "integrity": "sha512-ahC5W1xgou+KTXix4sAO8Ki12Q+jf4i0+tmk3sC+zgcynshkHxzpXdImBehiUYKKKDwvfFiJl1tZt6ewscS1Mg==",
+ "dev": true,
+ "dependencies": {
+ "call-bind": "^1.0.5",
+ "is-array-buffer": "^3.0.4"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/array-includes": {
+ "version": "3.1.8",
+ "resolved": "https://registry.npmjs.org/array-includes/-/array-includes-3.1.8.tgz",
+ "integrity": "sha512-itaWrbYbqpGXkGhZPGUulwnhVf5Hpy1xiCFsGqyIGglbBxmG5vSjxQen3/WGOjPpNEv1RtBLKxbmVXm8HpJStQ==",
+ "dev": true,
+ "dependencies": {
+ "call-bind": "^1.0.7",
+ "define-properties": "^1.2.1",
+ "es-abstract": "^1.23.2",
+ "es-object-atoms": "^1.0.0",
+ "get-intrinsic": "^1.2.4",
+ "is-string": "^1.0.7"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/array-union": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/array-union/-/array-union-2.1.0.tgz",
+ "integrity": "sha512-HGyxoOTYUyCM6stUe6EJgnd4EoewAI7zMdfqO+kGjnlZmBDz/cR5pf8r/cR4Wq60sL/p0IkcjUEEPwS3GFrIyw==",
+ "dev": true,
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/array.prototype.findlast": {
+ "version": "1.2.5",
+ "resolved": "https://registry.npmjs.org/array.prototype.findlast/-/array.prototype.findlast-1.2.5.tgz",
+ "integrity": "sha512-CVvd6FHg1Z3POpBLxO6E6zr+rSKEQ9L6rZHAaY7lLfhKsWYUBBOuMs0e9o24oopj6H+geRCX0YJ+TJLBK2eHyQ==",
+ "dev": true,
+ "dependencies": {
+ "call-bind": "^1.0.7",
+ "define-properties": "^1.2.1",
+ "es-abstract": "^1.23.2",
+ "es-errors": "^1.3.0",
+ "es-object-atoms": "^1.0.0",
+ "es-shim-unscopables": "^1.0.2"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/array.prototype.flat": {
+ "version": "1.3.2",
+ "resolved": "https://registry.npmjs.org/array.prototype.flat/-/array.prototype.flat-1.3.2.tgz",
+ "integrity": "sha512-djYB+Zx2vLewY8RWlNCUdHjDXs2XOgm602S9E7P/UpHgfeHL00cRiIF+IN/G/aUJ7kGPb6yO/ErDI5V2s8iycA==",
+ "dev": true,
+ "dependencies": {
+ "call-bind": "^1.0.2",
+ "define-properties": "^1.2.0",
+ "es-abstract": "^1.22.1",
+ "es-shim-unscopables": "^1.0.0"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/array.prototype.flatmap": {
+ "version": "1.3.2",
+ "resolved": "https://registry.npmjs.org/array.prototype.flatmap/-/array.prototype.flatmap-1.3.2.tgz",
+ "integrity": "sha512-Ewyx0c9PmpcsByhSW4r+9zDU7sGjFc86qf/kKtuSCRdhfbk0SNLLkaT5qvcHnRGgc5NP/ly/y+qkXkqONX54CQ==",
+ "dev": true,
+ "dependencies": {
+ "call-bind": "^1.0.2",
+ "define-properties": "^1.2.0",
+ "es-abstract": "^1.22.1",
+ "es-shim-unscopables": "^1.0.0"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/array.prototype.tosorted": {
+ "version": "1.1.4",
+ "resolved": "https://registry.npmjs.org/array.prototype.tosorted/-/array.prototype.tosorted-1.1.4.tgz",
+ "integrity": "sha512-p6Fx8B7b7ZhL/gmUsAy0D15WhvDccw3mnGNbZpi3pmeJdxtWsj2jEaI4Y6oo3XiHfzuSgPwKc04MYt6KgvC/wA==",
+ "dev": true,
+ "dependencies": {
+ "call-bind": "^1.0.7",
+ "define-properties": "^1.2.1",
+ "es-abstract": "^1.23.3",
+ "es-errors": "^1.3.0",
+ "es-shim-unscopables": "^1.0.2"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ }
+ },
+ "node_modules/arraybuffer.prototype.slice": {
+ "version": "1.0.3",
+ "resolved": "https://registry.npmjs.org/arraybuffer.prototype.slice/-/arraybuffer.prototype.slice-1.0.3.tgz",
+ "integrity": "sha512-bMxMKAjg13EBSVscxTaYA4mRc5t1UAXa2kXiGTNfZ079HIWXEkKmkgFrh/nJqamaLSrXO5H4WFFkPEaLJWbs3A==",
+ "dev": true,
+ "dependencies": {
+ "array-buffer-byte-length": "^1.0.1",
+ "call-bind": "^1.0.5",
+ "define-properties": "^1.2.1",
+ "es-abstract": "^1.22.3",
+ "es-errors": "^1.2.1",
+ "get-intrinsic": "^1.2.3",
+ "is-array-buffer": "^3.0.4",
+ "is-shared-array-buffer": "^1.0.2"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/assertion-error": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/assertion-error/-/assertion-error-2.0.1.tgz",
+ "integrity": "sha512-Izi8RQcffqCeNVgFigKli1ssklIbpHnCYc6AknXGYoB6grJqyeby7jv12JUQgmTAnIDnbck1uxksT4dzN3PWBA==",
+ "dev": true,
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/ast-types": {
+ "version": "0.16.1",
+ "resolved": "https://registry.npmjs.org/ast-types/-/ast-types-0.16.1.tgz",
+ "integrity": "sha512-6t10qk83GOG8p0vKmaCr8eiilZwO171AvbROMtvvNiwrTly62t+7XkA8RdIIVbpMhCASAsxgAzdRSwh6nw/5Dg==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "tslib": "^2.0.1"
+ },
+ "engines": {
+ "node": ">=4"
+ }
+ },
+ "node_modules/asynckit": {
+ "version": "0.4.0",
+ "resolved": "https://registry.npmjs.org/asynckit/-/asynckit-0.4.0.tgz",
+ "integrity": "sha512-Oei9OH4tRh0YqU3GxhX79dM/mwVgvbZJaSNaRk+bshkj0S5cfHcgYakreBjrHwatXKbz+IoIdYLxrKim2MjW0Q==",
+ "dev": true
+ },
+ "node_modules/autoprefixer": {
+ "version": "10.4.20",
+ "resolved": "https://registry.npmjs.org/autoprefixer/-/autoprefixer-10.4.20.tgz",
+ "integrity": "sha512-XY25y5xSv/wEoqzDyXXME4AFfkZI0P23z6Fs3YgymDnKJkCGOnkL0iTxCa85UTqaSgfcqyf3UA6+c7wUvx/16g==",
+ "dev": true,
+ "funding": [
+ {
+ "type": "opencollective",
+ "url": "https://opencollective.com/postcss/"
+ },
+ {
+ "type": "tidelift",
+ "url": "https://tidelift.com/funding/github/npm/autoprefixer"
+ },
+ {
+ "type": "github",
+ "url": "https://github.com/sponsors/ai"
+ }
+ ],
+ "dependencies": {
+ "browserslist": "^4.23.3",
+ "caniuse-lite": "^1.0.30001646",
+ "fraction.js": "^4.3.7",
+ "normalize-range": "^0.1.2",
+ "picocolors": "^1.0.1",
+ "postcss-value-parser": "^4.2.0"
+ },
+ "bin": {
+ "autoprefixer": "bin/autoprefixer"
+ },
+ "engines": {
+ "node": "^10 || ^12 || >=14"
+ },
+ "peerDependencies": {
+ "postcss": "^8.1.0"
+ }
+ },
+ "node_modules/available-typed-arrays": {
+ "version": "1.0.7",
+ "resolved": "https://registry.npmjs.org/available-typed-arrays/-/available-typed-arrays-1.0.7.tgz",
+ "integrity": "sha512-wvUjBtSGN7+7SjNpq/9M2Tg350UZD3q62IFZLbRAR1bSMlCo1ZaeW+BJ+D090e4hIIZLBcTDWe4Mh4jvUDajzQ==",
+ "dev": true,
+ "dependencies": {
+ "possible-typed-array-names": "^1.0.0"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/babel-dead-code-elimination": {
+ "version": "1.0.6",
+ "resolved": "https://registry.npmjs.org/babel-dead-code-elimination/-/babel-dead-code-elimination-1.0.6.tgz",
+ "integrity": "sha512-JxFi9qyRJpN0LjEbbjbN8g0ux71Qppn9R8Qe3k6QzHg2CaKsbUQtbn307LQGiDLGjV6JCtEFqfxzVig9MyDCHQ==",
+ "dev": true,
+ "dependencies": {
+ "@babel/core": "^7.23.7",
+ "@babel/parser": "^7.23.6",
+ "@babel/traverse": "^7.23.7",
+ "@babel/types": "^7.23.6"
+ }
+ },
+ "node_modules/balanced-match": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.2.tgz",
+ "integrity": "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw=="
+ },
+ "node_modules/better-opn": {
+ "version": "3.0.2",
+ "resolved": "https://registry.npmjs.org/better-opn/-/better-opn-3.0.2.tgz",
+ "integrity": "sha512-aVNobHnJqLiUelTaHat9DZ1qM2w0C0Eym4LPI/3JxOnSokGVdsl1T1kN7TFvsEAD8G47A6VKQ0TVHqbBnYMJlQ==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "open": "^8.0.4"
+ },
+ "engines": {
+ "node": ">=12.0.0"
+ }
+ },
+ "node_modules/binary-extensions": {
+ "version": "2.3.0",
+ "resolved": "https://registry.npmjs.org/binary-extensions/-/binary-extensions-2.3.0.tgz",
+ "integrity": "sha512-Ceh+7ox5qe7LJuLHoY0feh3pHuUDHAcRUeyL2VYghZwfpkNIy/+8Ocg0a3UuSoYzavmylwuLWQOf3hl0jjMMIw==",
+ "engines": {
+ "node": ">=8"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/brace-expansion": {
+ "version": "1.1.11",
+ "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz",
+ "integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==",
+ "dev": true,
+ "dependencies": {
+ "balanced-match": "^1.0.0",
+ "concat-map": "0.0.1"
+ }
+ },
+ "node_modules/braces": {
+ "version": "3.0.3",
+ "resolved": "https://registry.npmjs.org/braces/-/braces-3.0.3.tgz",
+ "integrity": "sha512-yQbXgO/OSZVD2IsiLlro+7Hf6Q18EJrKSEsdoMzKePKXct3gvD8oLcOQdIzGupr5Fj+EDe8gO/lxc1BzfMpxvA==",
+ "dependencies": {
+ "fill-range": "^7.1.1"
+ },
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/browser-assert": {
+ "version": "1.2.1",
+ "resolved": "https://registry.npmjs.org/browser-assert/-/browser-assert-1.2.1.tgz",
+ "integrity": "sha512-nfulgvOR6S4gt9UKCeGJOuSGBPGiFT6oQ/2UBnvTY/5aQ1PnksW72fhZkM30DzoRRv2WpwZf1vHHEr3mtuXIWQ==",
+ "dev": true
+ },
+ "node_modules/browserslist": {
+ "version": "4.24.2",
+ "resolved": "https://registry.npmjs.org/browserslist/-/browserslist-4.24.2.tgz",
+ "integrity": "sha512-ZIc+Q62revdMcqC6aChtW4jz3My3klmCO1fEmINZY/8J3EpBg5/A/D0AKmBveUh6pgoeycoMkVMko84tuYS+Gg==",
+ "dev": true,
+ "funding": [
+ {
+ "type": "opencollective",
+ "url": "https://opencollective.com/browserslist"
+ },
+ {
+ "type": "tidelift",
+ "url": "https://tidelift.com/funding/github/npm/browserslist"
+ },
+ {
+ "type": "github",
+ "url": "https://github.com/sponsors/ai"
+ }
+ ],
+ "dependencies": {
+ "caniuse-lite": "^1.0.30001669",
+ "electron-to-chromium": "^1.5.41",
+ "node-releases": "^2.0.18",
+ "update-browserslist-db": "^1.1.1"
+ },
+ "bin": {
+ "browserslist": "cli.js"
+ },
+ "engines": {
+ "node": "^6 || ^7 || ^8 || ^9 || ^10 || ^11 || ^12 || >=13.7"
+ }
+ },
+ "node_modules/cac": {
+ "version": "6.7.14",
+ "resolved": "https://registry.npmjs.org/cac/-/cac-6.7.14.tgz",
+ "integrity": "sha512-b6Ilus+c3RrdDk+JhLKUAQfzzgLEPy6wcXqS7f/xe1EETvsDP6GORG7SFuOs6cID5YkqchW/LXZbX5bc8j7ZcQ==",
+ "dev": true,
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/call-bind": {
+ "version": "1.0.7",
+ "resolved": "https://registry.npmjs.org/call-bind/-/call-bind-1.0.7.tgz",
+ "integrity": "sha512-GHTSNSYICQ7scH7sZ+M2rFopRoLh8t2bLSW6BbgrtLsahOIB5iyAVJf9GjWK3cYTDaMj4XdBpM1cA6pIS0Kv2w==",
+ "dev": true,
+ "dependencies": {
+ "es-define-property": "^1.0.0",
+ "es-errors": "^1.3.0",
+ "function-bind": "^1.1.2",
+ "get-intrinsic": "^1.2.4",
+ "set-function-length": "^1.2.1"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/callsites": {
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/callsites/-/callsites-3.1.0.tgz",
+ "integrity": "sha512-P8BjAsXvZS+VIDUI11hHCQEv74YT67YUi5JJFNWIqL235sBmjX4+qx9Muvls5ivyNENctx46xQLQ3aTuE7ssaQ==",
+ "dev": true,
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/camelcase-css": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/camelcase-css/-/camelcase-css-2.0.1.tgz",
+ "integrity": "sha512-QOSvevhslijgYwRx6Rv7zKdMF8lbRmx+uQGx2+vDc+KI/eBnsy9kit5aj23AgGu3pa4t9AgwbnXWqS+iOY+2aA==",
+ "engines": {
+ "node": ">= 6"
+ }
+ },
+ "node_modules/caniuse-lite": {
+ "version": "1.0.30001676",
+ "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001676.tgz",
+ "integrity": "sha512-Qz6zwGCiPghQXGJvgQAem79esjitvJ+CxSbSQkW9H/UX5hg8XM88d4lp2W+MEQ81j+Hip58Il+jGVdazk1z9cw==",
+ "dev": true,
+ "funding": [
+ {
+ "type": "opencollective",
+ "url": "https://opencollective.com/browserslist"
+ },
+ {
+ "type": "tidelift",
+ "url": "https://tidelift.com/funding/github/npm/caniuse-lite"
+ },
+ {
+ "type": "github",
+ "url": "https://github.com/sponsors/ai"
+ }
+ ]
+ },
+ "node_modules/chai": {
+ "version": "5.1.2",
+ "resolved": "https://registry.npmjs.org/chai/-/chai-5.1.2.tgz",
+ "integrity": "sha512-aGtmf24DW6MLHHG5gCx4zaI3uBq3KRtxeVs0DjFH6Z0rDNbsvTxFASFvdj79pxjxZ8/5u3PIiN3IwEIQkiiuPw==",
+ "dev": true,
+ "dependencies": {
+ "assertion-error": "^2.0.1",
+ "check-error": "^2.1.1",
+ "deep-eql": "^5.0.1",
+ "loupe": "^3.1.0",
+ "pathval": "^2.0.0"
+ },
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/chalk": {
+ "version": "4.1.2",
+ "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz",
+ "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==",
+ "dev": true,
+ "dependencies": {
+ "ansi-styles": "^4.1.0",
+ "supports-color": "^7.1.0"
+ },
+ "engines": {
+ "node": ">=10"
+ },
+ "funding": {
+ "url": "https://github.com/chalk/chalk?sponsor=1"
+ }
+ },
+ "node_modules/check-error": {
+ "version": "2.1.1",
+ "resolved": "https://registry.npmjs.org/check-error/-/check-error-2.1.1.tgz",
+ "integrity": "sha512-OAlb+T7V4Op9OwdkjmguYRqncdlx5JiofwOAUkmTF+jNdHwzTaTs4sRAGpzLF3oOz5xAyDGrPgeIDFQmDOTiJw==",
+ "dev": true,
+ "engines": {
+ "node": ">= 16"
+ }
+ },
+ "node_modules/chokidar": {
+ "version": "3.6.0",
+ "resolved": "https://registry.npmjs.org/chokidar/-/chokidar-3.6.0.tgz",
+ "integrity": "sha512-7VT13fmjotKpGipCW9JEQAusEPE+Ei8nl6/g4FBAmIm0GOOLMua9NDDo/DWp0ZAxCr3cPq5ZpBqmPAQgDda2Pw==",
+ "dependencies": {
+ "anymatch": "~3.1.2",
+ "braces": "~3.0.2",
+ "glob-parent": "~5.1.2",
+ "is-binary-path": "~2.1.0",
+ "is-glob": "~4.0.1",
+ "normalize-path": "~3.0.0",
+ "readdirp": "~3.6.0"
+ },
+ "engines": {
+ "node": ">= 8.10.0"
+ },
+ "funding": {
+ "url": "https://paulmillr.com/funding/"
+ },
+ "optionalDependencies": {
+ "fsevents": "~2.3.2"
+ }
+ },
+ "node_modules/class-variance-authority": {
+ "version": "0.7.0",
+ "resolved": "https://registry.npmjs.org/class-variance-authority/-/class-variance-authority-0.7.0.tgz",
+ "integrity": "sha512-jFI8IQw4hczaL4ALINxqLEXQbWcNjoSkloa4IaufXCJr6QawJyw7tuRysRsrE8w2p/4gGaxKIt/hX3qz/IbD1A==",
+ "dependencies": {
+ "clsx": "2.0.0"
+ },
+ "funding": {
+ "url": "https://joebell.co.uk"
+ }
+ },
+ "node_modules/class-variance-authority/node_modules/clsx": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/clsx/-/clsx-2.0.0.tgz",
+ "integrity": "sha512-rQ1+kcj+ttHG0MKVGBUXwayCCF1oh39BF5COIpRzuCEv8Mwjv0XucrI2ExNTOn9IlLifGClWQcU9BrZORvtw6Q==",
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/cli-width": {
+ "version": "4.1.0",
+ "resolved": "https://registry.npmjs.org/cli-width/-/cli-width-4.1.0.tgz",
+ "integrity": "sha512-ouuZd4/dm2Sw5Gmqy6bGyNNNe1qt9RpmxveLSO7KcgsTnU7RXfsw+/bukWGo1abgBiMAic068rclZsO4IWmmxQ==",
+ "dev": true,
+ "engines": {
+ "node": ">= 12"
+ }
+ },
+ "node_modules/cliui": {
+ "version": "8.0.1",
+ "resolved": "https://registry.npmjs.org/cliui/-/cliui-8.0.1.tgz",
+ "integrity": "sha512-BSeNnyus75C4//NQ9gQt1/csTXyo/8Sb+afLAkzAptFuMsod9HFokGNudZpi/oQV73hnVK+sR+5PVRMd+Dr7YQ==",
+ "dev": true,
+ "dependencies": {
+ "string-width": "^4.2.0",
+ "strip-ansi": "^6.0.1",
+ "wrap-ansi": "^7.0.0"
+ },
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/cliui/node_modules/ansi-regex": {
+ "version": "5.0.1",
+ "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz",
+ "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==",
+ "dev": true,
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/cliui/node_modules/emoji-regex": {
+ "version": "8.0.0",
+ "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz",
+ "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==",
+ "dev": true
+ },
+ "node_modules/cliui/node_modules/string-width": {
+ "version": "4.2.3",
+ "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz",
+ "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==",
+ "dev": true,
+ "dependencies": {
+ "emoji-regex": "^8.0.0",
+ "is-fullwidth-code-point": "^3.0.0",
+ "strip-ansi": "^6.0.1"
+ },
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/cliui/node_modules/strip-ansi": {
+ "version": "6.0.1",
+ "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz",
+ "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==",
+ "dev": true,
+ "dependencies": {
+ "ansi-regex": "^5.0.1"
+ },
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/cliui/node_modules/wrap-ansi": {
+ "version": "7.0.0",
+ "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-7.0.0.tgz",
+ "integrity": "sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==",
+ "dev": true,
+ "dependencies": {
+ "ansi-styles": "^4.0.0",
+ "string-width": "^4.1.0",
+ "strip-ansi": "^6.0.0"
+ },
+ "engines": {
+ "node": ">=10"
+ },
+ "funding": {
+ "url": "https://github.com/chalk/wrap-ansi?sponsor=1"
+ }
+ },
+ "node_modules/clsx": {
+ "version": "2.1.1",
+ "resolved": "https://registry.npmjs.org/clsx/-/clsx-2.1.1.tgz",
+ "integrity": "sha512-eYm0QWBtUrBWZWG0d386OGAw16Z995PiOVo2B7bjWSbHedGl5e0ZWaq65kOGgUSNesEIDkB9ISbTg/JK9dhCZA==",
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/codemirror": {
+ "version": "6.0.1",
+ "resolved": "https://registry.npmjs.org/codemirror/-/codemirror-6.0.1.tgz",
+ "integrity": "sha512-J8j+nZ+CdWmIeFIGXEFbFPtpiYacFMDR8GlHK3IyHQJMCaVRfGx9NT+Hxivv1ckLWPvNdZqndbr/7lVhrf/Svg==",
+ "dependencies": {
+ "@codemirror/autocomplete": "^6.0.0",
+ "@codemirror/commands": "^6.0.0",
+ "@codemirror/language": "^6.0.0",
+ "@codemirror/lint": "^6.0.0",
+ "@codemirror/search": "^6.0.0",
+ "@codemirror/state": "^6.0.0",
+ "@codemirror/view": "^6.0.0"
+ }
+ },
+ "node_modules/color-convert": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz",
+ "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==",
+ "dependencies": {
+ "color-name": "~1.1.4"
+ },
+ "engines": {
+ "node": ">=7.0.0"
+ }
+ },
+ "node_modules/color-name": {
+ "version": "1.1.4",
+ "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz",
+ "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA=="
+ },
+ "node_modules/combined-stream": {
+ "version": "1.0.8",
+ "resolved": "https://registry.npmjs.org/combined-stream/-/combined-stream-1.0.8.tgz",
+ "integrity": "sha512-FQN4MRfuJeHf7cBbBMJFXhKSDq+2kAArBlmRBvcvFE5BB1HZKXtSFASDhdlz9zOYwxh8lDdnvmMOe/+5cdoEdg==",
+ "dev": true,
+ "dependencies": {
+ "delayed-stream": "~1.0.0"
+ },
+ "engines": {
+ "node": ">= 0.8"
+ }
+ },
+ "node_modules/commander": {
+ "version": "4.1.1",
+ "resolved": "https://registry.npmjs.org/commander/-/commander-4.1.1.tgz",
+ "integrity": "sha512-NOKm8xhkzAjzFx8B2v5OAHT+u5pRQc2UCa2Vq9jYL/31o2wi9mxBA7LIFs3sV5VSC49z6pEhfbMULvShKj26WA==",
+ "engines": {
+ "node": ">= 6"
+ }
+ },
+ "node_modules/concat-map": {
+ "version": "0.0.1",
+ "resolved": "https://registry.npmjs.org/concat-map/-/concat-map-0.0.1.tgz",
+ "integrity": "sha512-/Srv4dswyQNBfohGpz9o6Yb3Gz3SrUDqBH5rTuhGR7ahtlbYKnVxw2bCFMRljaA7EXHaXZ8wsHdodFvbkhKmqg==",
+ "dev": true
+ },
+ "node_modules/convert-source-map": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/convert-source-map/-/convert-source-map-2.0.0.tgz",
+ "integrity": "sha512-Kvp459HrV2FEJ1CAsi1Ku+MY3kasH19TFykTz2xWmMeq6bk2NU3XXvfJ+Q61m0xktWwt+1HSYf3JZsTms3aRJg==",
+ "dev": true
+ },
+ "node_modules/cookie": {
+ "version": "0.7.2",
+ "resolved": "https://registry.npmjs.org/cookie/-/cookie-0.7.2.tgz",
+ "integrity": "sha512-yki5XnKuf750l50uGTllt6kKILY4nQ1eNIQatoXEByZ5dWgnKqbnqmTrBE5B4N7lrMJKQ2ytWMiTO2o0v6Ew/w==",
+ "dev": true,
+ "engines": {
+ "node": ">= 0.6"
+ }
+ },
+ "node_modules/crelt": {
+ "version": "1.0.6",
+ "resolved": "https://registry.npmjs.org/crelt/-/crelt-1.0.6.tgz",
+ "integrity": "sha512-VQ2MBenTq1fWZUH9DJNGti7kKv6EeAuYr3cLwxUWhIu1baTaXh4Ib5W2CqHVqib4/MqbYGJqiL3Zb8GJZr3l4g=="
+ },
+ "node_modules/cross-spawn": {
+ "version": "7.0.6",
+ "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.6.tgz",
+ "integrity": "sha512-uV2QOWP2nWzsy2aMp8aRibhi9dlzF5Hgh5SHaB9OiTGEyDTiJJyx0uy51QXdyWbtAHNua4XJzUKca3OzKUd3vA==",
+ "dependencies": {
+ "path-key": "^3.1.0",
+ "shebang-command": "^2.0.0",
+ "which": "^2.0.1"
+ },
+ "engines": {
+ "node": ">= 8"
+ }
+ },
+ "node_modules/css.escape": {
+ "version": "1.5.1",
+ "resolved": "https://registry.npmjs.org/css.escape/-/css.escape-1.5.1.tgz",
+ "integrity": "sha512-YUifsXXuknHlUsmlgyY0PKzgPOr7/FjCePfHNt0jxm83wHZi44VDMQ7/fGNkjY3/jV1MC+1CmZbaHzugyeRtpg==",
+ "dev": true
+ },
+ "node_modules/cssesc": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/cssesc/-/cssesc-3.0.0.tgz",
+ "integrity": "sha512-/Tb/JcjK111nNScGob5MNtsntNM1aCNUDipB/TkwZFhyDrrE47SOx/18wF2bbjgc3ZzCSKW1T5nt5EbFoAz/Vg==",
+ "bin": {
+ "cssesc": "bin/cssesc"
+ },
+ "engines": {
+ "node": ">=4"
+ }
+ },
+ "node_modules/cssstyle": {
+ "version": "4.1.0",
+ "resolved": "https://registry.npmjs.org/cssstyle/-/cssstyle-4.1.0.tgz",
+ "integrity": "sha512-h66W1URKpBS5YMI/V8PyXvTMFT8SupJ1IzoIV8IeBC/ji8WVmrO8dGlTi+2dh6whmdk6BiKJLD/ZBkhWbcg6nA==",
+ "dev": true,
+ "dependencies": {
+ "rrweb-cssom": "^0.7.1"
+ },
+ "engines": {
+ "node": ">=18"
+ }
+ },
+ "node_modules/csstype": {
+ "version": "3.1.3",
+ "resolved": "https://registry.npmjs.org/csstype/-/csstype-3.1.3.tgz",
+ "integrity": "sha512-M1uQkMl8rQK/szD0LNhtqxIPLpimGm8sOBwU7lLnCpSbTyY3yeU1Vc7l4KT5zT4s/yOxHH5O7tIuuLOCnLADRw=="
+ },
+ "node_modules/d3-array": {
+ "version": "3.2.4",
+ "resolved": "https://registry.npmjs.org/d3-array/-/d3-array-3.2.4.tgz",
+ "integrity": "sha512-tdQAmyA18i4J7wprpYq8ClcxZy3SC31QMeByyCFyRt7BVHdREQZ5lpzoe5mFEYZUWe+oq8HBvk9JjpibyEV4Jg==",
+ "dependencies": {
+ "internmap": "1 - 2"
+ },
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/d3-color": {
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/d3-color/-/d3-color-3.1.0.tgz",
+ "integrity": "sha512-zg/chbXyeBtMQ1LbD/WSoW2DpC3I0mpmPdW+ynRTj/x2DAWYrIY7qeZIHidozwV24m4iavr15lNwIwLxRmOxhA==",
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/d3-ease": {
+ "version": "3.0.1",
+ "resolved": "https://registry.npmjs.org/d3-ease/-/d3-ease-3.0.1.tgz",
+ "integrity": "sha512-wR/XK3D3XcLIZwpbvQwQ5fK+8Ykds1ip7A2Txe0yxncXSdq1L9skcG7blcedkOX+ZcgxGAmLX1FrRGbADwzi0w==",
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/d3-format": {
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/d3-format/-/d3-format-3.1.0.tgz",
+ "integrity": "sha512-YyUI6AEuY/Wpt8KWLgZHsIU86atmikuoOmCfommt0LYHiQSPjvX2AcFc38PX0CBpr2RCyZhjex+NS/LPOv6YqA==",
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/d3-interpolate": {
+ "version": "3.0.1",
+ "resolved": "https://registry.npmjs.org/d3-interpolate/-/d3-interpolate-3.0.1.tgz",
+ "integrity": "sha512-3bYs1rOD33uo8aqJfKP3JWPAibgw8Zm2+L9vBKEHJ2Rg+viTR7o5Mmv5mZcieN+FRYaAOWX5SJATX6k1PWz72g==",
+ "dependencies": {
+ "d3-color": "1 - 3"
+ },
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/d3-path": {
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/d3-path/-/d3-path-3.1.0.tgz",
+ "integrity": "sha512-p3KP5HCf/bvjBSSKuXid6Zqijx7wIfNW+J/maPs+iwR35at5JCbLUT0LzF1cnjbCHWhqzQTIN2Jpe8pRebIEFQ==",
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/d3-scale": {
+ "version": "4.0.2",
+ "resolved": "https://registry.npmjs.org/d3-scale/-/d3-scale-4.0.2.tgz",
+ "integrity": "sha512-GZW464g1SH7ag3Y7hXjf8RoUuAFIqklOAq3MRl4OaWabTFJY9PN/E1YklhXLh+OQ3fM9yS2nOkCoS+WLZ6kvxQ==",
+ "dependencies": {
+ "d3-array": "2.10.0 - 3",
+ "d3-format": "1 - 3",
+ "d3-interpolate": "1.2.0 - 3",
+ "d3-time": "2.1.1 - 3",
+ "d3-time-format": "2 - 4"
+ },
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/d3-shape": {
+ "version": "3.2.0",
+ "resolved": "https://registry.npmjs.org/d3-shape/-/d3-shape-3.2.0.tgz",
+ "integrity": "sha512-SaLBuwGm3MOViRq2ABk3eLoxwZELpH6zhl3FbAoJ7Vm1gofKx6El1Ib5z23NUEhF9AsGl7y+dzLe5Cw2AArGTA==",
+ "dependencies": {
+ "d3-path": "^3.1.0"
+ },
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/d3-time": {
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/d3-time/-/d3-time-3.1.0.tgz",
+ "integrity": "sha512-VqKjzBLejbSMT4IgbmVgDjpkYrNWUYJnbCGo874u7MMKIWsILRX+OpX/gTk8MqjpT1A/c6HY2dCA77ZN0lkQ2Q==",
+ "dependencies": {
+ "d3-array": "2 - 3"
+ },
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/d3-time-format": {
+ "version": "4.1.0",
+ "resolved": "https://registry.npmjs.org/d3-time-format/-/d3-time-format-4.1.0.tgz",
+ "integrity": "sha512-dJxPBlzC7NugB2PDLwo9Q8JiTR3M3e4/XANkreKSUxF8vvXKqm1Yfq4Q5dl8budlunRVlUUaDUgFt7eA8D6NLg==",
+ "dependencies": {
+ "d3-time": "1 - 3"
+ },
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/d3-timer": {
+ "version": "3.0.1",
+ "resolved": "https://registry.npmjs.org/d3-timer/-/d3-timer-3.0.1.tgz",
+ "integrity": "sha512-ndfJ/JxxMd3nw31uyKoY2naivF+r29V+Lc0svZxe1JvvIRmi8hUsrMvdOwgS1o6uBHmiz91geQ0ylPP0aj1VUA==",
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/data-urls": {
+ "version": "5.0.0",
+ "resolved": "https://registry.npmjs.org/data-urls/-/data-urls-5.0.0.tgz",
+ "integrity": "sha512-ZYP5VBHshaDAiVZxjbRVcFJpc+4xGgT0bK3vzy1HLN8jTO975HEbuYzZJcHoQEY5K1a0z8YayJkyVETa08eNTg==",
+ "dev": true,
+ "dependencies": {
+ "whatwg-mimetype": "^4.0.0",
+ "whatwg-url": "^14.0.0"
+ },
+ "engines": {
+ "node": ">=18"
+ }
+ },
+ "node_modules/data-view-buffer": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/data-view-buffer/-/data-view-buffer-1.0.1.tgz",
+ "integrity": "sha512-0lht7OugA5x3iJLOWFhWK/5ehONdprk0ISXqVFn/NFrDu+cuc8iADFrGQz5BnRK7LLU3JmkbXSxaqX+/mXYtUA==",
+ "dev": true,
+ "dependencies": {
+ "call-bind": "^1.0.6",
+ "es-errors": "^1.3.0",
+ "is-data-view": "^1.0.1"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/data-view-byte-length": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/data-view-byte-length/-/data-view-byte-length-1.0.1.tgz",
+ "integrity": "sha512-4J7wRJD3ABAzr8wP+OcIcqq2dlUKp4DVflx++hs5h5ZKydWMI6/D/fAot+yh6g2tHh8fLFTvNOaVN357NvSrOQ==",
+ "dev": true,
+ "dependencies": {
+ "call-bind": "^1.0.7",
+ "es-errors": "^1.3.0",
+ "is-data-view": "^1.0.1"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/data-view-byte-offset": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/data-view-byte-offset/-/data-view-byte-offset-1.0.0.tgz",
+ "integrity": "sha512-t/Ygsytq+R995EJ5PZlD4Cu56sWa8InXySaViRzw9apusqsOO2bQP+SbYzAhR0pFKoB+43lYy8rWban9JSuXnA==",
+ "dev": true,
+ "dependencies": {
+ "call-bind": "^1.0.6",
+ "es-errors": "^1.3.0",
+ "is-data-view": "^1.0.1"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/date-fns": {
+ "version": "3.6.0",
+ "resolved": "https://registry.npmjs.org/date-fns/-/date-fns-3.6.0.tgz",
+ "integrity": "sha512-fRHTG8g/Gif+kSh50gaGEdToemgfj74aRX3swtiouboip5JDLAyDE9F11nHMIcvOaXeOC6D7SpNhi7uFyB7Uww==",
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/kossnocorp"
+ }
+ },
+ "node_modules/date-fns-tz": {
+ "version": "3.2.0",
+ "resolved": "https://registry.npmjs.org/date-fns-tz/-/date-fns-tz-3.2.0.tgz",
+ "integrity": "sha512-sg8HqoTEulcbbbVXeg84u5UnlsQa8GS5QXMqjjYIhS4abEVVKIUwe0/l/UhrZdKaL/W5eWZNlbTeEIiOXTcsBQ==",
+ "peerDependencies": {
+ "date-fns": "^3.0.0 || ^4.0.0"
+ }
+ },
+ "node_modules/debug": {
+ "version": "4.3.7",
+ "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.7.tgz",
+ "integrity": "sha512-Er2nc/H7RrMXZBFCEim6TCmMk02Z8vLC2Rbi1KEBggpo0fS6l0S1nnapwmIi3yW/+GOJap1Krg4w0Hg80oCqgQ==",
+ "dev": true,
+ "dependencies": {
+ "ms": "^2.1.3"
+ },
+ "engines": {
+ "node": ">=6.0"
+ },
+ "peerDependenciesMeta": {
+ "supports-color": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/decimal.js": {
+ "version": "10.4.3",
+ "resolved": "https://registry.npmjs.org/decimal.js/-/decimal.js-10.4.3.tgz",
+ "integrity": "sha512-VBBaLc1MgL5XpzgIP7ny5Z6Nx3UrRkIViUkPUdtl9aya5amy3De1gsUUSB1g3+3sExYNjCAsAznmukyxCb1GRA==",
+ "dev": true
+ },
+ "node_modules/decimal.js-light": {
+ "version": "2.5.1",
+ "resolved": "https://registry.npmjs.org/decimal.js-light/-/decimal.js-light-2.5.1.tgz",
+ "integrity": "sha512-qIMFpTMZmny+MMIitAB6D7iVPEorVw6YQRWkvarTkT4tBeSLLiHzcwj6q0MmYSFCiVpiqPJTJEYIrpcPzVEIvg=="
+ },
+ "node_modules/deep-eql": {
+ "version": "5.0.2",
+ "resolved": "https://registry.npmjs.org/deep-eql/-/deep-eql-5.0.2.tgz",
+ "integrity": "sha512-h5k/5U50IJJFpzfL6nO9jaaumfjO/f2NjK/oYB2Djzm4p9L+3T9qWpZqZ2hAbLPuuYq9wrU08WQyBTL5GbPk5Q==",
+ "dev": true,
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/deep-is": {
+ "version": "0.1.4",
+ "resolved": "https://registry.npmjs.org/deep-is/-/deep-is-0.1.4.tgz",
+ "integrity": "sha512-oIPzksmTg4/MriiaYGO+okXDT7ztn/w3Eptv/+gSIdMdKsJo0u4CfYNFJPy+4SKMuCqGw2wxnA+URMg3t8a/bQ==",
+ "dev": true
+ },
+ "node_modules/define-data-property": {
+ "version": "1.1.4",
+ "resolved": "https://registry.npmjs.org/define-data-property/-/define-data-property-1.1.4.tgz",
+ "integrity": "sha512-rBMvIzlpA8v6E+SJZoo++HAYqsLrkg7MSfIinMPFhmkorw7X+dOXVJQs+QT69zGkzMyfDnIMN2Wid1+NbL3T+A==",
+ "dev": true,
+ "dependencies": {
+ "es-define-property": "^1.0.0",
+ "es-errors": "^1.3.0",
+ "gopd": "^1.0.1"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/define-lazy-prop": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/define-lazy-prop/-/define-lazy-prop-2.0.0.tgz",
+ "integrity": "sha512-Ds09qNh8yw3khSjiJjiUInaGX9xlqZDY7JVryGxdxV7NPeuqQfplOpQ66yJFZut3jLa5zOwkXw1g9EI2uKh4Og==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/define-properties": {
+ "version": "1.2.1",
+ "resolved": "https://registry.npmjs.org/define-properties/-/define-properties-1.2.1.tgz",
+ "integrity": "sha512-8QmQKqEASLd5nx0U1B1okLElbUuuttJ/AnYmRXbbbGDWh6uS208EjD4Xqq/I9wK7u0v6O08XhTWnt5XtEbR6Dg==",
+ "dev": true,
+ "dependencies": {
+ "define-data-property": "^1.0.1",
+ "has-property-descriptors": "^1.0.0",
+ "object-keys": "^1.1.1"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/delayed-stream": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/delayed-stream/-/delayed-stream-1.0.0.tgz",
+ "integrity": "sha512-ZySD7Nf91aLB0RxL4KGrKHBXl7Eds1DAmEdcoVawXnLD7SDhpNgtuII2aAkg7a7QS41jxPSZ17p4VdGnMHk3MQ==",
+ "dev": true,
+ "engines": {
+ "node": ">=0.4.0"
+ }
+ },
+ "node_modules/dequal": {
+ "version": "2.0.3",
+ "resolved": "https://registry.npmjs.org/dequal/-/dequal-2.0.3.tgz",
+ "integrity": "sha512-0je+qPKHEMohvfRTCEo3CrPG6cAzAYgmzKyxRiYSSDkS6eGJdyVJm7WaYA5ECaAD9wLB2T4EEeymA5aFVcYXCA==",
+ "dev": true,
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/detect-node-es": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/detect-node-es/-/detect-node-es-1.1.0.tgz",
+ "integrity": "sha512-ypdmJU/TbBby2Dxibuv7ZLW3Bs1QEmM7nHjEANfohJLvE0XVujisn1qPJcZxg+qDucsr+bP6fLD1rPS3AhJ7EQ=="
+ },
+ "node_modules/didyoumean": {
+ "version": "1.2.2",
+ "resolved": "https://registry.npmjs.org/didyoumean/-/didyoumean-1.2.2.tgz",
+ "integrity": "sha512-gxtyfqMg7GKyhQmb056K7M3xszy/myH8w+B4RT+QXBQsvAOdc3XymqDDPHx1BgPgsdAA5SIifona89YtRATDzw=="
+ },
+ "node_modules/dir-glob": {
+ "version": "3.0.1",
+ "resolved": "https://registry.npmjs.org/dir-glob/-/dir-glob-3.0.1.tgz",
+ "integrity": "sha512-WkrWp9GR4KXfKGYzOLmTuGVi1UWFfws377n9cc55/tb6DuqyF6pcQ5AbiHEshaDpY9v6oaSr2XCDidGmMwdzIA==",
+ "dev": true,
+ "dependencies": {
+ "path-type": "^4.0.0"
+ },
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/dlv": {
+ "version": "1.1.3",
+ "resolved": "https://registry.npmjs.org/dlv/-/dlv-1.1.3.tgz",
+ "integrity": "sha512-+HlytyjlPKnIG8XuRG8WvmBP8xs8P71y+SKKS6ZXWoEgLuePxtDoUEiH7WkdePWrQ5JBpE6aoVqfZfJUQkjXwA=="
+ },
+ "node_modules/doctrine": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/doctrine/-/doctrine-2.1.0.tgz",
+ "integrity": "sha512-35mSku4ZXK0vfCuHEDAwt55dg2jNajHZ1odvF+8SSr82EsZY4QmXfuWso8oEd8zRhVObSN18aM0CjSdoBX7zIw==",
+ "dev": true,
+ "dependencies": {
+ "esutils": "^2.0.2"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/dom-accessibility-api": {
+ "version": "0.5.16",
+ "resolved": "https://registry.npmjs.org/dom-accessibility-api/-/dom-accessibility-api-0.5.16.tgz",
+ "integrity": "sha512-X7BJ2yElsnOJ30pZF4uIIDfBEVgF4XEBxL9Bxhy6dnrm5hkzqmsWHGTiHqRiITNhMyFLyAiWndIJP7Z1NTteDg==",
+ "dev": true
+ },
+ "node_modules/dom-helpers": {
+ "version": "5.2.1",
+ "resolved": "https://registry.npmjs.org/dom-helpers/-/dom-helpers-5.2.1.tgz",
+ "integrity": "sha512-nRCa7CK3VTrM2NmGkIy4cbK7IZlgBE/PYMn55rrXefr5xXDP0LdtfPnblFDoVdcAfslJ7or6iqAUnx0CCGIWQA==",
+ "dependencies": {
+ "@babel/runtime": "^7.8.7",
+ "csstype": "^3.0.2"
+ }
+ },
+ "node_modules/eastasianwidth": {
+ "version": "0.2.0",
+ "resolved": "https://registry.npmjs.org/eastasianwidth/-/eastasianwidth-0.2.0.tgz",
+ "integrity": "sha512-I88TYZWc9XiYHRQ4/3c5rjjfgkjhLyW2luGIheGERbNQ6OY7yTybanSpDXZa8y7VUP9YmDcYa+eyq4ca7iLqWA=="
+ },
+ "node_modules/electron-to-chromium": {
+ "version": "1.5.50",
+ "resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.5.50.tgz",
+ "integrity": "sha512-eMVObiUQ2LdgeO1F/ySTXsvqvxb6ZH2zPGaMYsWzRDdOddUa77tdmI0ltg+L16UpbWdhPmuF3wIQYyQq65WfZw==",
+ "dev": true
+ },
+ "node_modules/emoji-regex": {
+ "version": "9.2.2",
+ "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-9.2.2.tgz",
+ "integrity": "sha512-L18DaJsXSUk2+42pv8mLs5jJT2hqFkFE4j21wOmgbUqsZ2hL72NsUU785g9RXgo3s0ZNgVl42TiHp3ZtOv/Vyg=="
+ },
+ "node_modules/entities": {
+ "version": "4.5.0",
+ "resolved": "https://registry.npmjs.org/entities/-/entities-4.5.0.tgz",
+ "integrity": "sha512-V0hjH4dGPh9Ao5p0MoRY6BVqtwCjhz6vI5LT8AJ55H+4g9/4vbHx1I54fS0XuclLhDHArPQCiMjDxjaL8fPxhw==",
+ "dev": true,
+ "engines": {
+ "node": ">=0.12"
+ },
+ "funding": {
+ "url": "https://github.com/fb55/entities?sponsor=1"
+ }
+ },
+ "node_modules/es-abstract": {
+ "version": "1.23.3",
+ "resolved": "https://registry.npmjs.org/es-abstract/-/es-abstract-1.23.3.tgz",
+ "integrity": "sha512-e+HfNH61Bj1X9/jLc5v1owaLYuHdeHHSQlkhCBiTK8rBvKaULl/beGMxwrMXjpYrv4pz22BlY570vVePA2ho4A==",
+ "dev": true,
+ "dependencies": {
+ "array-buffer-byte-length": "^1.0.1",
+ "arraybuffer.prototype.slice": "^1.0.3",
+ "available-typed-arrays": "^1.0.7",
+ "call-bind": "^1.0.7",
+ "data-view-buffer": "^1.0.1",
+ "data-view-byte-length": "^1.0.1",
+ "data-view-byte-offset": "^1.0.0",
+ "es-define-property": "^1.0.0",
+ "es-errors": "^1.3.0",
+ "es-object-atoms": "^1.0.0",
+ "es-set-tostringtag": "^2.0.3",
+ "es-to-primitive": "^1.2.1",
+ "function.prototype.name": "^1.1.6",
+ "get-intrinsic": "^1.2.4",
+ "get-symbol-description": "^1.0.2",
+ "globalthis": "^1.0.3",
+ "gopd": "^1.0.1",
+ "has-property-descriptors": "^1.0.2",
+ "has-proto": "^1.0.3",
+ "has-symbols": "^1.0.3",
+ "hasown": "^2.0.2",
+ "internal-slot": "^1.0.7",
+ "is-array-buffer": "^3.0.4",
+ "is-callable": "^1.2.7",
+ "is-data-view": "^1.0.1",
+ "is-negative-zero": "^2.0.3",
+ "is-regex": "^1.1.4",
+ "is-shared-array-buffer": "^1.0.3",
+ "is-string": "^1.0.7",
+ "is-typed-array": "^1.1.13",
+ "is-weakref": "^1.0.2",
+ "object-inspect": "^1.13.1",
+ "object-keys": "^1.1.1",
+ "object.assign": "^4.1.5",
+ "regexp.prototype.flags": "^1.5.2",
+ "safe-array-concat": "^1.1.2",
+ "safe-regex-test": "^1.0.3",
+ "string.prototype.trim": "^1.2.9",
+ "string.prototype.trimend": "^1.0.8",
+ "string.prototype.trimstart": "^1.0.8",
+ "typed-array-buffer": "^1.0.2",
+ "typed-array-byte-length": "^1.0.1",
+ "typed-array-byte-offset": "^1.0.2",
+ "typed-array-length": "^1.0.6",
+ "unbox-primitive": "^1.0.2",
+ "which-typed-array": "^1.1.15"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/es-define-property": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/es-define-property/-/es-define-property-1.0.0.tgz",
+ "integrity": "sha512-jxayLKShrEqqzJ0eumQbVhTYQM27CfT1T35+gCgDFoL82JLsXqTJ76zv6A0YLOgEnLUMvLzsDsGIrl8NFpT2gQ==",
+ "dev": true,
+ "dependencies": {
+ "get-intrinsic": "^1.2.4"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ }
+ },
+ "node_modules/es-errors": {
+ "version": "1.3.0",
+ "resolved": "https://registry.npmjs.org/es-errors/-/es-errors-1.3.0.tgz",
+ "integrity": "sha512-Zf5H2Kxt2xjTvbJvP2ZWLEICxA6j+hAmMzIlypy4xcBg1vKVnx89Wy0GbS+kf5cwCVFFzdCFh2XSCFNULS6csw==",
+ "dev": true,
+ "engines": {
+ "node": ">= 0.4"
+ }
+ },
+ "node_modules/es-iterator-helpers": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/es-iterator-helpers/-/es-iterator-helpers-1.1.0.tgz",
+ "integrity": "sha512-/SurEfycdyssORP/E+bj4sEu1CWw4EmLDsHynHwSXQ7utgbrMRWW195pTrCjFgFCddf/UkYm3oqKPRq5i8bJbw==",
+ "dev": true,
+ "dependencies": {
+ "call-bind": "^1.0.7",
+ "define-properties": "^1.2.1",
+ "es-abstract": "^1.23.3",
+ "es-errors": "^1.3.0",
+ "es-set-tostringtag": "^2.0.3",
+ "function-bind": "^1.1.2",
+ "get-intrinsic": "^1.2.4",
+ "globalthis": "^1.0.4",
+ "has-property-descriptors": "^1.0.2",
+ "has-proto": "^1.0.3",
+ "has-symbols": "^1.0.3",
+ "internal-slot": "^1.0.7",
+ "iterator.prototype": "^1.1.3",
+ "safe-array-concat": "^1.1.2"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ }
+ },
+ "node_modules/es-object-atoms": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/es-object-atoms/-/es-object-atoms-1.0.0.tgz",
+ "integrity": "sha512-MZ4iQ6JwHOBQjahnjwaC1ZtIBH+2ohjamzAO3oaHcXYup7qxjF2fixyH+Q71voWHeOkI2q/TnJao/KfXYIZWbw==",
+ "dev": true,
+ "dependencies": {
+ "es-errors": "^1.3.0"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ }
+ },
+ "node_modules/es-set-tostringtag": {
+ "version": "2.0.3",
+ "resolved": "https://registry.npmjs.org/es-set-tostringtag/-/es-set-tostringtag-2.0.3.tgz",
+ "integrity": "sha512-3T8uNMC3OQTHkFUsFq8r/BwAXLHvU/9O9mE0fBc/MY5iq/8H7ncvO947LmYA6ldWw9Uh8Yhf25zu6n7nML5QWQ==",
+ "dev": true,
+ "dependencies": {
+ "get-intrinsic": "^1.2.4",
+ "has-tostringtag": "^1.0.2",
+ "hasown": "^2.0.1"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ }
+ },
+ "node_modules/es-shim-unscopables": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/es-shim-unscopables/-/es-shim-unscopables-1.0.2.tgz",
+ "integrity": "sha512-J3yBRXCzDu4ULnQwxyToo/OjdMx6akgVC7K6few0a7F/0wLtmKKN7I73AH5T2836UuXRqN7Qg+IIUw/+YJksRw==",
+ "dev": true,
+ "dependencies": {
+ "hasown": "^2.0.0"
+ }
+ },
+ "node_modules/es-to-primitive": {
+ "version": "1.2.1",
+ "resolved": "https://registry.npmjs.org/es-to-primitive/-/es-to-primitive-1.2.1.tgz",
+ "integrity": "sha512-QCOllgZJtaUo9miYBcLChTUaHNjJF3PYs1VidD7AwiEj1kYxKeQTctLAezAOH5ZKRH0g2IgPn6KwB4IT8iRpvA==",
+ "dev": true,
+ "dependencies": {
+ "is-callable": "^1.1.4",
+ "is-date-object": "^1.0.1",
+ "is-symbol": "^1.0.2"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/esbuild": {
+ "version": "0.23.1",
+ "resolved": "https://registry.npmjs.org/esbuild/-/esbuild-0.23.1.tgz",
+ "integrity": "sha512-VVNz/9Sa0bs5SELtn3f7qhJCDPCF5oMEl5cO9/SSinpE9hbPVvxbd572HH5AKiP7WD8INO53GgfDDhRjkylHEg==",
+ "devOptional": true,
+ "hasInstallScript": true,
+ "bin": {
+ "esbuild": "bin/esbuild"
+ },
+ "engines": {
+ "node": ">=18"
+ },
+ "optionalDependencies": {
+ "@esbuild/aix-ppc64": "0.23.1",
+ "@esbuild/android-arm": "0.23.1",
+ "@esbuild/android-arm64": "0.23.1",
+ "@esbuild/android-x64": "0.23.1",
+ "@esbuild/darwin-arm64": "0.23.1",
+ "@esbuild/darwin-x64": "0.23.1",
+ "@esbuild/freebsd-arm64": "0.23.1",
+ "@esbuild/freebsd-x64": "0.23.1",
+ "@esbuild/linux-arm": "0.23.1",
+ "@esbuild/linux-arm64": "0.23.1",
+ "@esbuild/linux-ia32": "0.23.1",
+ "@esbuild/linux-loong64": "0.23.1",
+ "@esbuild/linux-mips64el": "0.23.1",
+ "@esbuild/linux-ppc64": "0.23.1",
+ "@esbuild/linux-riscv64": "0.23.1",
+ "@esbuild/linux-s390x": "0.23.1",
+ "@esbuild/linux-x64": "0.23.1",
+ "@esbuild/netbsd-x64": "0.23.1",
+ "@esbuild/openbsd-arm64": "0.23.1",
+ "@esbuild/openbsd-x64": "0.23.1",
+ "@esbuild/sunos-x64": "0.23.1",
+ "@esbuild/win32-arm64": "0.23.1",
+ "@esbuild/win32-ia32": "0.23.1",
+ "@esbuild/win32-x64": "0.23.1"
+ }
+ },
+ "node_modules/esbuild-register": {
+ "version": "3.6.0",
+ "resolved": "https://registry.npmjs.org/esbuild-register/-/esbuild-register-3.6.0.tgz",
+ "integrity": "sha512-H2/S7Pm8a9CL1uhp9OvjwrBh5Pvx0H8qVOxNu8Wed9Y7qv56MPtq+GGM8RJpq6glYJn9Wspr8uw7l55uyinNeg==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "debug": "^4.3.4"
+ },
+ "peerDependencies": {
+ "esbuild": ">=0.12 <1"
+ }
+ },
+ "node_modules/escalade": {
+ "version": "3.2.0",
+ "resolved": "https://registry.npmjs.org/escalade/-/escalade-3.2.0.tgz",
+ "integrity": "sha512-WUj2qlxaQtO4g6Pq5c29GTcWGDyd8itL8zTlipgECz3JesAiiOKotd8JU6otB3PACgG6xkJUyVhboMS+bje/jA==",
+ "dev": true,
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/escape-string-regexp": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-4.0.0.tgz",
+ "integrity": "sha512-TtpcNJ3XAzx3Gq8sWRzJaVajRs0uVxA2YAkdb1jm2YkPz4G6egUFAyA3n5vtEIZefPk5Wa4UXbKuS5fKkJWdgA==",
+ "dev": true,
+ "engines": {
+ "node": ">=10"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/eslint": {
+ "version": "9.13.0",
+ "resolved": "https://registry.npmjs.org/eslint/-/eslint-9.13.0.tgz",
+ "integrity": "sha512-EYZK6SX6zjFHST/HRytOdA/zE72Cq/bfw45LSyuwrdvcclb/gqV8RRQxywOBEWO2+WDpva6UZa4CcDeJKzUCFA==",
+ "dev": true,
+ "dependencies": {
+ "@eslint-community/eslint-utils": "^4.2.0",
+ "@eslint-community/regexpp": "^4.11.0",
+ "@eslint/config-array": "^0.18.0",
+ "@eslint/core": "^0.7.0",
+ "@eslint/eslintrc": "^3.1.0",
+ "@eslint/js": "9.13.0",
+ "@eslint/plugin-kit": "^0.2.0",
+ "@humanfs/node": "^0.16.5",
+ "@humanwhocodes/module-importer": "^1.0.1",
+ "@humanwhocodes/retry": "^0.3.1",
+ "@types/estree": "^1.0.6",
+ "@types/json-schema": "^7.0.15",
+ "ajv": "^6.12.4",
+ "chalk": "^4.0.0",
+ "cross-spawn": "^7.0.2",
+ "debug": "^4.3.2",
+ "escape-string-regexp": "^4.0.0",
+ "eslint-scope": "^8.1.0",
+ "eslint-visitor-keys": "^4.1.0",
+ "espree": "^10.2.0",
+ "esquery": "^1.5.0",
+ "esutils": "^2.0.2",
+ "fast-deep-equal": "^3.1.3",
+ "file-entry-cache": "^8.0.0",
+ "find-up": "^5.0.0",
+ "glob-parent": "^6.0.2",
+ "ignore": "^5.2.0",
+ "imurmurhash": "^0.1.4",
+ "is-glob": "^4.0.0",
+ "json-stable-stringify-without-jsonify": "^1.0.1",
+ "lodash.merge": "^4.6.2",
+ "minimatch": "^3.1.2",
+ "natural-compare": "^1.4.0",
+ "optionator": "^0.9.3",
+ "text-table": "^0.2.0"
+ },
+ "bin": {
+ "eslint": "bin/eslint.js"
+ },
+ "engines": {
+ "node": "^18.18.0 || ^20.9.0 || >=21.1.0"
+ },
+ "funding": {
+ "url": "https://eslint.org/donate"
+ },
+ "peerDependencies": {
+ "jiti": "*"
+ },
+ "peerDependenciesMeta": {
+ "jiti": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/eslint-plugin-jest-dom": {
+ "version": "5.4.0",
+ "resolved": "https://registry.npmjs.org/eslint-plugin-jest-dom/-/eslint-plugin-jest-dom-5.4.0.tgz",
+ "integrity": "sha512-yBqvFsnpS5Sybjoq61cJiUsenRkC9K32hYQBFS9doBR7nbQZZ5FyO+X7MlmfM1C48Ejx/qTuOCgukDUNyzKZ7A==",
+ "dev": true,
+ "dependencies": {
+ "@babel/runtime": "^7.16.3",
+ "requireindex": "^1.2.0"
+ },
+ "engines": {
+ "node": "^12.22.0 || ^14.17.0 || >=16.0.0",
+ "npm": ">=6",
+ "yarn": ">=1"
+ },
+ "peerDependencies": {
+ "@testing-library/dom": "^8.0.0 || ^9.0.0 || ^10.0.0",
+ "eslint": "^6.8.0 || ^7.0.0 || ^8.0.0 || ^9.0.0"
+ },
+ "peerDependenciesMeta": {
+ "@testing-library/dom": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/eslint-plugin-react": {
+ "version": "7.37.2",
+ "resolved": "https://registry.npmjs.org/eslint-plugin-react/-/eslint-plugin-react-7.37.2.tgz",
+ "integrity": "sha512-EsTAnj9fLVr/GZleBLFbj/sSuXeWmp1eXIN60ceYnZveqEaUCyW4X+Vh4WTdUhCkW4xutXYqTXCUSyqD4rB75w==",
+ "dev": true,
+ "dependencies": {
+ "array-includes": "^3.1.8",
+ "array.prototype.findlast": "^1.2.5",
+ "array.prototype.flatmap": "^1.3.2",
+ "array.prototype.tosorted": "^1.1.4",
+ "doctrine": "^2.1.0",
+ "es-iterator-helpers": "^1.1.0",
+ "estraverse": "^5.3.0",
+ "hasown": "^2.0.2",
+ "jsx-ast-utils": "^2.4.1 || ^3.0.0",
+ "minimatch": "^3.1.2",
+ "object.entries": "^1.1.8",
+ "object.fromentries": "^2.0.8",
+ "object.values": "^1.2.0",
+ "prop-types": "^15.8.1",
+ "resolve": "^2.0.0-next.5",
+ "semver": "^6.3.1",
+ "string.prototype.matchall": "^4.0.11",
+ "string.prototype.repeat": "^1.0.0"
+ },
+ "engines": {
+ "node": ">=4"
+ },
+ "peerDependencies": {
+ "eslint": "^3 || ^4 || ^5 || ^6 || ^7 || ^8 || ^9.7"
+ }
+ },
+ "node_modules/eslint-plugin-react-hooks": {
+ "version": "5.1.0-rc-fb9a90fa48-20240614",
+ "resolved": "https://registry.npmjs.org/eslint-plugin-react-hooks/-/eslint-plugin-react-hooks-5.1.0-rc-fb9a90fa48-20240614.tgz",
+ "integrity": "sha512-xsiRwaDNF5wWNC4ZHLut+x/YcAxksUd9Rizt7LaEn3bV8VyYRpXnRJQlLOfYaVy9esk4DFP4zPPnoNVjq5Gc0w==",
+ "dev": true,
+ "engines": {
+ "node": ">=10"
+ },
+ "peerDependencies": {
+ "eslint": "^3.0.0 || ^4.0.0 || ^5.0.0 || ^6.0.0 || ^7.0.0 || ^8.0.0-0 || ^9.0.0"
+ }
+ },
+ "node_modules/eslint-plugin-react-refresh": {
+ "version": "0.4.14",
+ "resolved": "https://registry.npmjs.org/eslint-plugin-react-refresh/-/eslint-plugin-react-refresh-0.4.14.tgz",
+ "integrity": "sha512-aXvzCTK7ZBv1e7fahFuR3Z/fyQQSIQ711yPgYRj+Oj64tyTgO4iQIDmYXDBqvSWQ/FA4OSCsXOStlF+noU0/NA==",
+ "dev": true,
+ "peerDependencies": {
+ "eslint": ">=7"
+ }
+ },
+ "node_modules/eslint-plugin-storybook": {
+ "version": "0.11.0",
+ "resolved": "https://registry.npmjs.org/eslint-plugin-storybook/-/eslint-plugin-storybook-0.11.0.tgz",
+ "integrity": "sha512-MvPJgF+ORwgK04a1CY5itO4pwdAOFIRqczlNEHL62+4Ocvj1d61GWRqIdeX1BNCKno6fdPC6TksUHCZMGsq26g==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@storybook/csf": "^0.1.11",
+ "@typescript-eslint/utils": "^8.8.1",
+ "ts-dedent": "^2.2.0"
+ },
+ "engines": {
+ "node": ">= 18"
+ },
+ "peerDependencies": {
+ "eslint": ">=6"
+ }
+ },
+ "node_modules/eslint-plugin-testing-library": {
+ "version": "6.4.0",
+ "resolved": "https://registry.npmjs.org/eslint-plugin-testing-library/-/eslint-plugin-testing-library-6.4.0.tgz",
+ "integrity": "sha512-yeWF+YgCgvNyPNI9UKnG0FjeE2sk93N/3lsKqcmR8dSfeXJwFT5irnWo7NjLf152HkRzfoFjh3LsBUrhvFz4eA==",
+ "dev": true,
+ "dependencies": {
+ "@typescript-eslint/utils": "^5.62.0"
+ },
+ "engines": {
+ "node": "^12.22.0 || ^14.17.0 || >=16.0.0",
+ "npm": ">=6"
+ },
+ "peerDependencies": {
+ "eslint": "^7.5.0 || ^8.0.0 || ^9.0.0"
+ }
+ },
+ "node_modules/eslint-plugin-testing-library/node_modules/@typescript-eslint/scope-manager": {
+ "version": "5.62.0",
+ "resolved": "https://registry.npmjs.org/@typescript-eslint/scope-manager/-/scope-manager-5.62.0.tgz",
+ "integrity": "sha512-VXuvVvZeQCQb5Zgf4HAxc04q5j+WrNAtNh9OwCsCgpKqESMTu3tF/jhZ3xG6T4NZwWl65Bg8KuS2uEvhSfLl0w==",
+ "dev": true,
+ "dependencies": {
+ "@typescript-eslint/types": "5.62.0",
+ "@typescript-eslint/visitor-keys": "5.62.0"
+ },
+ "engines": {
+ "node": "^12.22.0 || ^14.17.0 || >=16.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/typescript-eslint"
+ }
+ },
+ "node_modules/eslint-plugin-testing-library/node_modules/@typescript-eslint/types": {
+ "version": "5.62.0",
+ "resolved": "https://registry.npmjs.org/@typescript-eslint/types/-/types-5.62.0.tgz",
+ "integrity": "sha512-87NVngcbVXUahrRTqIK27gD2t5Cu1yuCXxbLcFtCzZGlfyVWWh8mLHkoxzjsB6DDNnvdL+fW8MiwPEJyGJQDgQ==",
+ "dev": true,
+ "engines": {
+ "node": "^12.22.0 || ^14.17.0 || >=16.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/typescript-eslint"
+ }
+ },
+ "node_modules/eslint-plugin-testing-library/node_modules/@typescript-eslint/typescript-estree": {
+ "version": "5.62.0",
+ "resolved": "https://registry.npmjs.org/@typescript-eslint/typescript-estree/-/typescript-estree-5.62.0.tgz",
+ "integrity": "sha512-CmcQ6uY7b9y694lKdRB8FEel7JbU/40iSAPomu++SjLMntB+2Leay2LO6i8VnJk58MtE9/nQSFIH6jpyRWyYzA==",
+ "dev": true,
+ "dependencies": {
+ "@typescript-eslint/types": "5.62.0",
+ "@typescript-eslint/visitor-keys": "5.62.0",
+ "debug": "^4.3.4",
+ "globby": "^11.1.0",
+ "is-glob": "^4.0.3",
+ "semver": "^7.3.7",
+ "tsutils": "^3.21.0"
+ },
+ "engines": {
+ "node": "^12.22.0 || ^14.17.0 || >=16.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/typescript-eslint"
+ },
+ "peerDependenciesMeta": {
+ "typescript": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/eslint-plugin-testing-library/node_modules/@typescript-eslint/utils": {
+ "version": "5.62.0",
+ "resolved": "https://registry.npmjs.org/@typescript-eslint/utils/-/utils-5.62.0.tgz",
+ "integrity": "sha512-n8oxjeb5aIbPFEtmQxQYOLI0i9n5ySBEY/ZEHHZqKQSFnxio1rv6dthascc9dLuwrL0RC5mPCxB7vnAVGAYWAQ==",
+ "dev": true,
+ "dependencies": {
+ "@eslint-community/eslint-utils": "^4.2.0",
+ "@types/json-schema": "^7.0.9",
+ "@types/semver": "^7.3.12",
+ "@typescript-eslint/scope-manager": "5.62.0",
+ "@typescript-eslint/types": "5.62.0",
+ "@typescript-eslint/typescript-estree": "5.62.0",
+ "eslint-scope": "^5.1.1",
+ "semver": "^7.3.7"
+ },
+ "engines": {
+ "node": "^12.22.0 || ^14.17.0 || >=16.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/typescript-eslint"
+ },
+ "peerDependencies": {
+ "eslint": "^6.0.0 || ^7.0.0 || ^8.0.0"
+ }
+ },
+ "node_modules/eslint-plugin-testing-library/node_modules/@typescript-eslint/visitor-keys": {
+ "version": "5.62.0",
+ "resolved": "https://registry.npmjs.org/@typescript-eslint/visitor-keys/-/visitor-keys-5.62.0.tgz",
+ "integrity": "sha512-07ny+LHRzQXepkGg6w0mFY41fVUNBrL2Roj/++7V1txKugfjm/Ci/qSND03r2RhlJhJYMcTn9AhhSSqQp0Ysyw==",
+ "dev": true,
+ "dependencies": {
+ "@typescript-eslint/types": "5.62.0",
+ "eslint-visitor-keys": "^3.3.0"
+ },
+ "engines": {
+ "node": "^12.22.0 || ^14.17.0 || >=16.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/typescript-eslint"
+ }
+ },
+ "node_modules/eslint-plugin-testing-library/node_modules/eslint-scope": {
+ "version": "5.1.1",
+ "resolved": "https://registry.npmjs.org/eslint-scope/-/eslint-scope-5.1.1.tgz",
+ "integrity": "sha512-2NxwbF/hZ0KpepYN0cNbo+FN6XoK7GaHlQhgx/hIZl6Va0bF45RQOOwhLIy8lQDbuCiadSLCBnH2CFYquit5bw==",
+ "dev": true,
+ "dependencies": {
+ "esrecurse": "^4.3.0",
+ "estraverse": "^4.1.1"
+ },
+ "engines": {
+ "node": ">=8.0.0"
+ }
+ },
+ "node_modules/eslint-plugin-testing-library/node_modules/eslint-visitor-keys": {
+ "version": "3.4.3",
+ "resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-3.4.3.tgz",
+ "integrity": "sha512-wpc+LXeiyiisxPlEkUzU6svyS1frIO3Mgxj1fdy7Pm8Ygzguax2N3Fa/D/ag1WqbOprdI+uY6wMUl8/a2G+iag==",
+ "dev": true,
+ "engines": {
+ "node": "^12.22.0 || ^14.17.0 || >=16.0.0"
+ },
+ "funding": {
+ "url": "https://opencollective.com/eslint"
+ }
+ },
+ "node_modules/eslint-plugin-testing-library/node_modules/estraverse": {
+ "version": "4.3.0",
+ "resolved": "https://registry.npmjs.org/estraverse/-/estraverse-4.3.0.tgz",
+ "integrity": "sha512-39nnKffWz8xN1BU/2c79n9nB9HDzo0niYUqx6xyqUnyoAnQyyWpOTdZEeiCch8BBu515t4wp9ZmgVfVhn9EBpw==",
+ "dev": true,
+ "engines": {
+ "node": ">=4.0"
+ }
+ },
+ "node_modules/eslint-plugin-testing-library/node_modules/semver": {
+ "version": "7.6.3",
+ "resolved": "https://registry.npmjs.org/semver/-/semver-7.6.3.tgz",
+ "integrity": "sha512-oVekP1cKtI+CTDvHWYFUcMtsK/00wmAEfyqKfNdARm8u1wNVhSgaX7A8d4UuIlUI5e84iEwOhs7ZPYRmzU9U6A==",
+ "dev": true,
+ "bin": {
+ "semver": "bin/semver.js"
+ },
+ "engines": {
+ "node": ">=10"
+ }
+ },
+ "node_modules/eslint-plugin-unused-imports": {
+ "version": "4.1.4",
+ "resolved": "https://registry.npmjs.org/eslint-plugin-unused-imports/-/eslint-plugin-unused-imports-4.1.4.tgz",
+ "integrity": "sha512-YptD6IzQjDardkl0POxnnRBhU1OEePMV0nd6siHaRBbd+lyh6NAhFEobiznKU7kTsSsDeSD62Pe7kAM1b7dAZQ==",
+ "dev": true,
+ "peerDependencies": {
+ "@typescript-eslint/eslint-plugin": "^8.0.0-0 || ^7.0.0 || ^6.0.0 || ^5.0.0",
+ "eslint": "^9.0.0 || ^8.0.0"
+ },
+ "peerDependenciesMeta": {
+ "@typescript-eslint/eslint-plugin": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/eslint-scope": {
+ "version": "8.2.0",
+ "resolved": "https://registry.npmjs.org/eslint-scope/-/eslint-scope-8.2.0.tgz",
+ "integrity": "sha512-PHlWUfG6lvPc3yvP5A4PNyBL1W8fkDUccmI21JUu/+GKZBoH/W5u6usENXUrWFRsyoW5ACUjFGgAFQp5gUlb/A==",
+ "dev": true,
+ "dependencies": {
+ "esrecurse": "^4.3.0",
+ "estraverse": "^5.2.0"
+ },
+ "engines": {
+ "node": "^18.18.0 || ^20.9.0 || >=21.1.0"
+ },
+ "funding": {
+ "url": "https://opencollective.com/eslint"
+ }
+ },
+ "node_modules/eslint-visitor-keys": {
+ "version": "4.2.0",
+ "resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-4.2.0.tgz",
+ "integrity": "sha512-UyLnSehNt62FFhSwjZlHmeokpRK59rcz29j+F1/aDgbkbRTk7wIc9XzdoasMUbRNKDM0qQt/+BJ4BrpFeABemw==",
+ "dev": true,
+ "engines": {
+ "node": "^18.18.0 || ^20.9.0 || >=21.1.0"
+ },
+ "funding": {
+ "url": "https://opencollective.com/eslint"
+ }
+ },
+ "node_modules/eslint/node_modules/glob-parent": {
+ "version": "6.0.2",
+ "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-6.0.2.tgz",
+ "integrity": "sha512-XxwI8EOhVQgWp6iDL+3b0r86f4d6AX6zSU55HfB4ydCEuXLXc5FcYeOu+nnGftS4TEju/11rt4KJPTMgbfmv4A==",
+ "dev": true,
+ "dependencies": {
+ "is-glob": "^4.0.3"
+ },
+ "engines": {
+ "node": ">=10.13.0"
+ }
+ },
+ "node_modules/espree": {
+ "version": "10.3.0",
+ "resolved": "https://registry.npmjs.org/espree/-/espree-10.3.0.tgz",
+ "integrity": "sha512-0QYC8b24HWY8zjRnDTL6RiHfDbAWn63qb4LMj1Z4b076A4une81+z03Kg7l7mn/48PUTqoLptSXez8oknU8Clg==",
+ "dev": true,
+ "dependencies": {
+ "acorn": "^8.14.0",
+ "acorn-jsx": "^5.3.2",
+ "eslint-visitor-keys": "^4.2.0"
+ },
+ "engines": {
+ "node": "^18.18.0 || ^20.9.0 || >=21.1.0"
+ },
+ "funding": {
+ "url": "https://opencollective.com/eslint"
+ }
+ },
+ "node_modules/esprima": {
+ "version": "4.0.1",
+ "resolved": "https://registry.npmjs.org/esprima/-/esprima-4.0.1.tgz",
+ "integrity": "sha512-eGuFFw7Upda+g4p+QHvnW0RyTX/SVeJBDM/gCtMARO0cLuT2HcEKnTPvhjV6aGeqrCB/sbNop0Kszm0jsaWU4A==",
+ "dev": true,
+ "license": "BSD-2-Clause",
+ "bin": {
+ "esparse": "bin/esparse.js",
+ "esvalidate": "bin/esvalidate.js"
+ },
+ "engines": {
+ "node": ">=4"
+ }
+ },
+ "node_modules/esquery": {
+ "version": "1.6.0",
+ "resolved": "https://registry.npmjs.org/esquery/-/esquery-1.6.0.tgz",
+ "integrity": "sha512-ca9pw9fomFcKPvFLXhBKUK90ZvGibiGOvRJNbjljY7s7uq/5YO4BOzcYtJqExdx99rF6aAcnRxHmcUHcz6sQsg==",
+ "dev": true,
+ "dependencies": {
+ "estraverse": "^5.1.0"
+ },
+ "engines": {
+ "node": ">=0.10"
+ }
+ },
+ "node_modules/esrecurse": {
+ "version": "4.3.0",
+ "resolved": "https://registry.npmjs.org/esrecurse/-/esrecurse-4.3.0.tgz",
+ "integrity": "sha512-KmfKL3b6G+RXvP8N1vr3Tq1kL/oCFgn2NYXEtqP8/L3pKapUA4G8cFVaoF3SU323CD4XypR/ffioHmkti6/Tag==",
+ "dev": true,
+ "dependencies": {
+ "estraverse": "^5.2.0"
+ },
+ "engines": {
+ "node": ">=4.0"
+ }
+ },
+ "node_modules/estraverse": {
+ "version": "5.3.0",
+ "resolved": "https://registry.npmjs.org/estraverse/-/estraverse-5.3.0.tgz",
+ "integrity": "sha512-MMdARuVEQziNTeJD8DgMqmhwR11BRQ/cBP+pLtYdSTnf3MIO8fFeiINEbX36ZdNlfU/7A9f3gUw49B3oQsvwBA==",
+ "dev": true,
+ "engines": {
+ "node": ">=4.0"
+ }
+ },
+ "node_modules/estree-walker": {
+ "version": "3.0.3",
+ "resolved": "https://registry.npmjs.org/estree-walker/-/estree-walker-3.0.3.tgz",
+ "integrity": "sha512-7RUKfXgSMMkzt6ZuXmqapOurLGPPfgj6l9uRZ7lRGolvk0y2yocc35LdcxKC5PQZdn2DMqioAQ2NoWcrTKmm6g==",
+ "dev": true,
+ "dependencies": {
+ "@types/estree": "^1.0.0"
+ }
+ },
+ "node_modules/esutils": {
+ "version": "2.0.3",
+ "resolved": "https://registry.npmjs.org/esutils/-/esutils-2.0.3.tgz",
+ "integrity": "sha512-kVscqXk4OCp68SZ0dkgEKVi6/8ij300KBWTJq32P/dYeWTSwK41WyTxalN1eRmA5Z9UU/LX9D7FWSmV9SAYx6g==",
+ "dev": true,
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/eventemitter3": {
+ "version": "4.0.7",
+ "resolved": "https://registry.npmjs.org/eventemitter3/-/eventemitter3-4.0.7.tgz",
+ "integrity": "sha512-8guHBZCwKnFhYdHr2ysuRWErTwhoN2X8XELRlrRwpmfeY2jjuUN4taQMsULKUVo1K4DvZl+0pgfyoysHxvmvEw=="
+ },
+ "node_modules/expect-type": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/expect-type/-/expect-type-1.1.0.tgz",
+ "integrity": "sha512-bFi65yM+xZgk+u/KRIpekdSYkTB5W1pEf0Lt8Q8Msh7b+eQ7LXVtIB1Bkm4fvclDEL1b2CZkMhv2mOeF8tMdkA==",
+ "dev": true,
+ "engines": {
+ "node": ">=12.0.0"
+ }
+ },
+ "node_modules/fast-deep-equal": {
+ "version": "3.1.3",
+ "resolved": "https://registry.npmjs.org/fast-deep-equal/-/fast-deep-equal-3.1.3.tgz",
+ "integrity": "sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q==",
+ "dev": true
+ },
+ "node_modules/fast-equals": {
+ "version": "5.0.1",
+ "resolved": "https://registry.npmjs.org/fast-equals/-/fast-equals-5.0.1.tgz",
+ "integrity": "sha512-WF1Wi8PwwSY7/6Kx0vKXtw8RwuSGoM1bvDaJbu7MxDlR1vovZjIAKrnzyrThgAjm6JDTu0fVgWXDlMGspodfoQ==",
+ "engines": {
+ "node": ">=6.0.0"
+ }
+ },
+ "node_modules/fast-glob": {
+ "version": "3.3.2",
+ "resolved": "https://registry.npmjs.org/fast-glob/-/fast-glob-3.3.2.tgz",
+ "integrity": "sha512-oX2ruAFQwf/Orj8m737Y5adxDQO0LAB7/S5MnxCdTNDd4p6BsyIVsv9JQsATbTSq8KHRpLwIHbVlUNatxd+1Ow==",
+ "dependencies": {
+ "@nodelib/fs.stat": "^2.0.2",
+ "@nodelib/fs.walk": "^1.2.3",
+ "glob-parent": "^5.1.2",
+ "merge2": "^1.3.0",
+ "micromatch": "^4.0.4"
+ },
+ "engines": {
+ "node": ">=8.6.0"
+ }
+ },
+ "node_modules/fast-json-stable-stringify": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/fast-json-stable-stringify/-/fast-json-stable-stringify-2.1.0.tgz",
+ "integrity": "sha512-lhd/wF+Lk98HZoTCtlVraHtfh5XYijIjalXck7saUtuanSDyLMxnHhSXEDJqHxD7msR8D0uCmqlkwjCV8xvwHw==",
+ "dev": true
+ },
+ "node_modules/fast-levenshtein": {
+ "version": "2.0.6",
+ "resolved": "https://registry.npmjs.org/fast-levenshtein/-/fast-levenshtein-2.0.6.tgz",
+ "integrity": "sha512-DCXu6Ifhqcks7TZKY3Hxp3y6qphY5SJZmrWMDrKcERSOXWQdMhU9Ig/PYrzyw/ul9jOIyh0N4M0tbC5hodg8dw==",
+ "dev": true
+ },
+ "node_modules/fastq": {
+ "version": "1.17.1",
+ "resolved": "https://registry.npmjs.org/fastq/-/fastq-1.17.1.tgz",
+ "integrity": "sha512-sRVD3lWVIXWg6By68ZN7vho9a1pQcN/WBFaAAsDDFzlJjvoGx0P8z7V1t72grFJfJhu3YPZBuu25f7Kaw2jN1w==",
+ "dependencies": {
+ "reusify": "^1.0.4"
+ }
+ },
+ "node_modules/file-entry-cache": {
+ "version": "8.0.0",
+ "resolved": "https://registry.npmjs.org/file-entry-cache/-/file-entry-cache-8.0.0.tgz",
+ "integrity": "sha512-XXTUwCvisa5oacNGRP9SfNtYBNAMi+RPwBFmblZEF7N7swHYQS6/Zfk7SRwx4D5j3CH211YNRco1DEMNVfZCnQ==",
+ "dev": true,
+ "dependencies": {
+ "flat-cache": "^4.0.0"
+ },
+ "engines": {
+ "node": ">=16.0.0"
+ }
+ },
+ "node_modules/fill-range": {
+ "version": "7.1.1",
+ "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.1.1.tgz",
+ "integrity": "sha512-YsGpe3WHLK8ZYi4tWDg2Jy3ebRz2rXowDxnld4bkQB00cc/1Zw9AWnC0i9ztDJitivtQvaI9KaLyKrc+hBW0yg==",
+ "dependencies": {
+ "to-regex-range": "^5.0.1"
+ },
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/find-up": {
+ "version": "5.0.0",
+ "resolved": "https://registry.npmjs.org/find-up/-/find-up-5.0.0.tgz",
+ "integrity": "sha512-78/PXT1wlLLDgTzDs7sjq9hzz0vXD+zn+7wypEe4fXQxCmdmqfGsEPQxmiCSQI3ajFV91bVSsvNtrJRiW6nGng==",
+ "dev": true,
+ "dependencies": {
+ "locate-path": "^6.0.0",
+ "path-exists": "^4.0.0"
+ },
+ "engines": {
+ "node": ">=10"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/flat-cache": {
+ "version": "4.0.1",
+ "resolved": "https://registry.npmjs.org/flat-cache/-/flat-cache-4.0.1.tgz",
+ "integrity": "sha512-f7ccFPK3SXFHpx15UIGyRJ/FJQctuKZ0zVuN3frBo4HnK3cay9VEW0R6yPYFHC0AgqhukPzKjq22t5DmAyqGyw==",
+ "dev": true,
+ "dependencies": {
+ "flatted": "^3.2.9",
+ "keyv": "^4.5.4"
+ },
+ "engines": {
+ "node": ">=16"
+ }
+ },
+ "node_modules/flatted": {
+ "version": "3.3.1",
+ "resolved": "https://registry.npmjs.org/flatted/-/flatted-3.3.1.tgz",
+ "integrity": "sha512-X8cqMLLie7KsNUDSdzeN8FYK9rEt4Dt67OsG/DNGnYTSDBG4uFAJFBnUeiV+zCVAvwFy56IjM9sH51jVaEhNxw==",
+ "dev": true
+ },
+ "node_modules/for-each": {
+ "version": "0.3.3",
+ "resolved": "https://registry.npmjs.org/for-each/-/for-each-0.3.3.tgz",
+ "integrity": "sha512-jqYfLp7mo9vIyQf8ykW2v7A+2N4QjeCeI5+Dz9XraiO1ign81wjiH7Fb9vSOWvQfNtmSa4H2RoQTrrXivdUZmw==",
+ "dev": true,
+ "dependencies": {
+ "is-callable": "^1.1.3"
+ }
+ },
+ "node_modules/foreground-child": {
+ "version": "3.3.0",
+ "resolved": "https://registry.npmjs.org/foreground-child/-/foreground-child-3.3.0.tgz",
+ "integrity": "sha512-Ld2g8rrAyMYFXBhEqMz8ZAHBi4J4uS1i/CxGMDnjyFWddMXLVcDp051DZfu+t7+ab7Wv6SMqpWmyFIj5UbfFvg==",
+ "dependencies": {
+ "cross-spawn": "^7.0.0",
+ "signal-exit": "^4.0.1"
+ },
+ "engines": {
+ "node": ">=14"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/isaacs"
+ }
+ },
+ "node_modules/form-data": {
+ "version": "4.0.1",
+ "resolved": "https://registry.npmjs.org/form-data/-/form-data-4.0.1.tgz",
+ "integrity": "sha512-tzN8e4TX8+kkxGPK8D5u0FNmjPUjw3lwC9lSLxxoB/+GtsJG91CO8bSWy73APlgAZzZbXEYZJuxjkHH2w+Ezhw==",
+ "dev": true,
+ "dependencies": {
+ "asynckit": "^0.4.0",
+ "combined-stream": "^1.0.8",
+ "mime-types": "^2.1.12"
+ },
+ "engines": {
+ "node": ">= 6"
+ }
+ },
+ "node_modules/fraction.js": {
+ "version": "4.3.7",
+ "resolved": "https://registry.npmjs.org/fraction.js/-/fraction.js-4.3.7.tgz",
+ "integrity": "sha512-ZsDfxO51wGAXREY55a7la9LScWpwv9RxIrYABrlvOFBlH/ShPnrtsXeuUIfXKKOVicNxQ+o8JTbJvjS4M89yew==",
+ "dev": true,
+ "engines": {
+ "node": "*"
+ },
+ "funding": {
+ "type": "patreon",
+ "url": "https://github.com/sponsors/rawify"
+ }
+ },
+ "node_modules/fs.realpath": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/fs.realpath/-/fs.realpath-1.0.0.tgz",
+ "integrity": "sha512-OO0pH2lK6a0hZnAdau5ItzHPI6pUlvI7jMVnxUQRtw4owF2wk8lOSabtGDCTP4Ggrg2MbGnWO9X8K1t4+fGMDw==",
+ "dev": true,
+ "license": "ISC"
+ },
+ "node_modules/fsevents": {
+ "version": "2.3.3",
+ "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.3.tgz",
+ "integrity": "sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw==",
+ "hasInstallScript": true,
+ "optional": true,
+ "os": [
+ "darwin"
+ ],
+ "engines": {
+ "node": "^8.16.0 || ^10.6.0 || >=11.0.0"
+ }
+ },
+ "node_modules/function-bind": {
+ "version": "1.1.2",
+ "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.2.tgz",
+ "integrity": "sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA==",
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/function.prototype.name": {
+ "version": "1.1.6",
+ "resolved": "https://registry.npmjs.org/function.prototype.name/-/function.prototype.name-1.1.6.tgz",
+ "integrity": "sha512-Z5kx79swU5P27WEayXM1tBi5Ze/lbIyiNgU3qyXUOf9b2rgXYyF9Dy9Cx+IQv/Lc8WCG6L82zwUPpSS9hGehIg==",
+ "dev": true,
+ "dependencies": {
+ "call-bind": "^1.0.2",
+ "define-properties": "^1.2.0",
+ "es-abstract": "^1.22.1",
+ "functions-have-names": "^1.2.3"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/functions-have-names": {
+ "version": "1.2.3",
+ "resolved": "https://registry.npmjs.org/functions-have-names/-/functions-have-names-1.2.3.tgz",
+ "integrity": "sha512-xckBUXyTIqT97tq2x2AMb+g163b5JFysYk0x4qxNFwbfQkmNZoiRHb6sPzI9/QV33WeuvVYBUIiD4NzNIyqaRQ==",
+ "dev": true,
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/gensync": {
+ "version": "1.0.0-beta.2",
+ "resolved": "https://registry.npmjs.org/gensync/-/gensync-1.0.0-beta.2.tgz",
+ "integrity": "sha512-3hN7NaskYvMDLQY55gnW3NQ+mesEAepTqlg+VEbj7zzqEMBVNhzcGYYeqFo/TlYz6eQiFcp1HcsCZO+nGgS8zg==",
+ "dev": true,
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/get-caller-file": {
+ "version": "2.0.5",
+ "resolved": "https://registry.npmjs.org/get-caller-file/-/get-caller-file-2.0.5.tgz",
+ "integrity": "sha512-DyFP3BM/3YHTQOCUL/w0OZHR0lpKeGrxotcHWcqNEdnltqFwXVfhEBQ94eIo34AfQpo0rGki4cyIiftY06h2Fg==",
+ "dev": true,
+ "engines": {
+ "node": "6.* || 8.* || >= 10.*"
+ }
+ },
+ "node_modules/get-intrinsic": {
+ "version": "1.2.4",
+ "resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.2.4.tgz",
+ "integrity": "sha512-5uYhsJH8VJBTv7oslg4BznJYhDoRI6waYCxMmCdnTrcCrHA/fCFKoTFz2JKKE0HdDFUF7/oQuhzumXJK7paBRQ==",
+ "dev": true,
+ "dependencies": {
+ "es-errors": "^1.3.0",
+ "function-bind": "^1.1.2",
+ "has-proto": "^1.0.1",
+ "has-symbols": "^1.0.3",
+ "hasown": "^2.0.0"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/get-nonce": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/get-nonce/-/get-nonce-1.0.1.tgz",
+ "integrity": "sha512-FJhYRoDaiatfEkUK8HKlicmu/3SGFD51q3itKDGoSTysQJBnfOcxU5GxnhE1E6soB76MbT0MBtnKJuXyAx+96Q==",
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/get-symbol-description": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/get-symbol-description/-/get-symbol-description-1.0.2.tgz",
+ "integrity": "sha512-g0QYk1dZBxGwk+Ngc+ltRH2IBp2f7zBkBMBJZCDerh6EhlhSR6+9irMCuT/09zD6qkarHUSn529sK/yL4S27mg==",
+ "dev": true,
+ "dependencies": {
+ "call-bind": "^1.0.5",
+ "es-errors": "^1.3.0",
+ "get-intrinsic": "^1.2.4"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/get-tsconfig": {
+ "version": "4.8.1",
+ "resolved": "https://registry.npmjs.org/get-tsconfig/-/get-tsconfig-4.8.1.tgz",
+ "integrity": "sha512-k9PN+cFBmaLWtVz29SkUoqU5O0slLuHJXt/2P+tMVFT+phsSGXGkp9t3rQIqdz0e+06EHNGs3oM6ZX1s2zHxRg==",
+ "devOptional": true,
+ "dependencies": {
+ "resolve-pkg-maps": "^1.0.0"
+ },
+ "funding": {
+ "url": "https://github.com/privatenumber/get-tsconfig?sponsor=1"
+ }
+ },
+ "node_modules/glob": {
+ "version": "10.4.5",
+ "resolved": "https://registry.npmjs.org/glob/-/glob-10.4.5.tgz",
+ "integrity": "sha512-7Bv8RF0k6xjo7d4A/PxYLbUCfb6c+Vpd2/mB2yRDlew7Jb5hEXiCD9ibfO7wpk8i4sevK6DFny9h7EYbM3/sHg==",
+ "dependencies": {
+ "foreground-child": "^3.1.0",
+ "jackspeak": "^3.1.2",
+ "minimatch": "^9.0.4",
+ "minipass": "^7.1.2",
+ "package-json-from-dist": "^1.0.0",
+ "path-scurry": "^1.11.1"
+ },
+ "bin": {
+ "glob": "dist/esm/bin.mjs"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/isaacs"
+ }
+ },
+ "node_modules/glob-parent": {
+ "version": "5.1.2",
+ "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-5.1.2.tgz",
+ "integrity": "sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==",
+ "dependencies": {
+ "is-glob": "^4.0.1"
+ },
+ "engines": {
+ "node": ">= 6"
+ }
+ },
+ "node_modules/glob/node_modules/brace-expansion": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.1.tgz",
+ "integrity": "sha512-XnAIvQ8eM+kC6aULx6wuQiwVsnzsi9d3WxzV3FpWTGA19F621kwdbsAcFKXgKUHZWsy+mY6iL1sHTxWEFCytDA==",
+ "dependencies": {
+ "balanced-match": "^1.0.0"
+ }
+ },
+ "node_modules/glob/node_modules/minimatch": {
+ "version": "9.0.5",
+ "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-9.0.5.tgz",
+ "integrity": "sha512-G6T0ZX48xgozx7587koeX9Ys2NYy6Gmv//P89sEte9V9whIapMNF4idKxnW2QtCcLiTWlb/wfCabAtAFWhhBow==",
+ "dependencies": {
+ "brace-expansion": "^2.0.1"
+ },
+ "engines": {
+ "node": ">=16 || 14 >=14.17"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/isaacs"
+ }
+ },
+ "node_modules/globals": {
+ "version": "15.11.0",
+ "resolved": "https://registry.npmjs.org/globals/-/globals-15.11.0.tgz",
+ "integrity": "sha512-yeyNSjdbyVaWurlwCpcA6XNBrHTMIeDdj0/hnvX/OLJ9ekOXYbLsLinH/MucQyGvNnXhidTdNhTtJaffL2sMfw==",
+ "dev": true,
+ "engines": {
+ "node": ">=18"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/globalthis": {
+ "version": "1.0.4",
+ "resolved": "https://registry.npmjs.org/globalthis/-/globalthis-1.0.4.tgz",
+ "integrity": "sha512-DpLKbNU4WylpxJykQujfCcwYWiV/Jhm50Goo0wrVILAv5jOr9d+H+UR3PhSCD2rCCEIg0uc+G+muBTwD54JhDQ==",
+ "dev": true,
+ "dependencies": {
+ "define-properties": "^1.2.1",
+ "gopd": "^1.0.1"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/globby": {
+ "version": "11.1.0",
+ "resolved": "https://registry.npmjs.org/globby/-/globby-11.1.0.tgz",
+ "integrity": "sha512-jhIXaOzy1sb8IyocaruWSn1TjmnBVs8Ayhcy83rmxNJ8q2uWKCAj3CnJY+KpGSXCueAPc0i05kVvVKtP1t9S3g==",
+ "dev": true,
+ "dependencies": {
+ "array-union": "^2.1.0",
+ "dir-glob": "^3.0.1",
+ "fast-glob": "^3.2.9",
+ "ignore": "^5.2.0",
+ "merge2": "^1.4.1",
+ "slash": "^3.0.0"
+ },
+ "engines": {
+ "node": ">=10"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/goober": {
+ "version": "2.1.16",
+ "resolved": "https://registry.npmjs.org/goober/-/goober-2.1.16.tgz",
+ "integrity": "sha512-erjk19y1U33+XAMe1VTvIONHYoSqE4iS7BYUZfHaqeohLmnC0FdxEh7rQU+6MZ4OajItzjZFSRtVANrQwNq6/g==",
+ "dev": true,
+ "peerDependencies": {
+ "csstype": "^3.0.10"
+ }
+ },
+ "node_modules/gopd": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/gopd/-/gopd-1.0.1.tgz",
+ "integrity": "sha512-d65bNlIadxvpb/A2abVdlqKqV563juRnZ1Wtk6s1sIR8uNsXR70xqIzVqxVf1eTqDunwT2MkczEeaezCKTZhwA==",
+ "dev": true,
+ "dependencies": {
+ "get-intrinsic": "^1.1.3"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/graphemer": {
+ "version": "1.4.0",
+ "resolved": "https://registry.npmjs.org/graphemer/-/graphemer-1.4.0.tgz",
+ "integrity": "sha512-EtKwoO6kxCL9WO5xipiHTZlSzBm7WLT627TqC/uVRd0HKmq8NXyebnNYxDoBi7wt8eTWrUrKXCOVaFq9x1kgag==",
+ "dev": true
+ },
+ "node_modules/graphql": {
+ "version": "16.9.0",
+ "resolved": "https://registry.npmjs.org/graphql/-/graphql-16.9.0.tgz",
+ "integrity": "sha512-GGTKBX4SD7Wdb8mqeDLni2oaRGYQWjWHGKPQ24ZMnUtKfcsVoiv4uX8+LJr1K6U5VW2Lu1BwJnj7uiori0YtRw==",
+ "dev": true,
+ "engines": {
+ "node": "^12.22.0 || ^14.16.0 || ^16.0.0 || >=17.0.0"
+ }
+ },
+ "node_modules/has-bigints": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/has-bigints/-/has-bigints-1.0.2.tgz",
+ "integrity": "sha512-tSvCKtBr9lkF0Ex0aQiP9N+OpV4zi2r/Nee5VkRDbaqv35RLYMzbwQfFSZZH0kR+Rd6302UJZ2p/bJCEoR3VoQ==",
+ "dev": true,
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/has-flag": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz",
+ "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==",
+ "dev": true,
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/has-property-descriptors": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/has-property-descriptors/-/has-property-descriptors-1.0.2.tgz",
+ "integrity": "sha512-55JNKuIW+vq4Ke1BjOTjM2YctQIvCT7GFzHwmfZPGo5wnrgkid0YQtnAleFSqumZm4az3n2BS+erby5ipJdgrg==",
+ "dev": true,
+ "dependencies": {
+ "es-define-property": "^1.0.0"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/has-proto": {
+ "version": "1.0.3",
+ "resolved": "https://registry.npmjs.org/has-proto/-/has-proto-1.0.3.tgz",
+ "integrity": "sha512-SJ1amZAJUiZS+PhsVLf5tGydlaVB8EdFpaSO4gmiUKUOxk8qzn5AIy4ZeJUmh22znIdk/uMAUT2pl3FxzVUH+Q==",
+ "dev": true,
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/has-symbols": {
+ "version": "1.0.3",
+ "resolved": "https://registry.npmjs.org/has-symbols/-/has-symbols-1.0.3.tgz",
+ "integrity": "sha512-l3LCuF6MgDNwTDKkdYGEihYjt5pRPbEg46rtlmnSPlUbgmB8LOIrKJbYYFBSbnPaJexMKtiPO8hmeRjRz2Td+A==",
+ "dev": true,
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/has-tostringtag": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/has-tostringtag/-/has-tostringtag-1.0.2.tgz",
+ "integrity": "sha512-NqADB8VjPFLM2V0VvHUewwwsw0ZWBaIdgo+ieHtK3hasLz4qeCRjYcqfB6AQrBggRKppKF8L52/VqdVsO47Dlw==",
+ "dev": true,
+ "dependencies": {
+ "has-symbols": "^1.0.3"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/hasown": {
+ "version": "2.0.2",
+ "resolved": "https://registry.npmjs.org/hasown/-/hasown-2.0.2.tgz",
+ "integrity": "sha512-0hJU9SCPvmMzIBdZFqNPXWa6dqh7WdH0cII9y+CyS8rG3nL48Bclra9HmKhVVUHyPWNH5Y7xDwAB7bfgSjkUMQ==",
+ "dependencies": {
+ "function-bind": "^1.1.2"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ }
+ },
+ "node_modules/headers-polyfill": {
+ "version": "4.0.3",
+ "resolved": "https://registry.npmjs.org/headers-polyfill/-/headers-polyfill-4.0.3.tgz",
+ "integrity": "sha512-IScLbePpkvO846sIwOtOTDjutRMWdXdJmXdMvk6gCBHxFO8d+QKOQedyZSxFTTFYRSmlgSTDtXqqq4pcenBXLQ==",
+ "dev": true
+ },
+ "node_modules/html-encoding-sniffer": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/html-encoding-sniffer/-/html-encoding-sniffer-4.0.0.tgz",
+ "integrity": "sha512-Y22oTqIU4uuPgEemfz7NDJz6OeKf12Lsu+QC+s3BVpda64lTiMYCyGwg5ki4vFxkMwQdeZDl2adZoqUgdFuTgQ==",
+ "dev": true,
+ "dependencies": {
+ "whatwg-encoding": "^3.1.1"
+ },
+ "engines": {
+ "node": ">=18"
+ }
+ },
+ "node_modules/http-proxy-agent": {
+ "version": "7.0.2",
+ "resolved": "https://registry.npmjs.org/http-proxy-agent/-/http-proxy-agent-7.0.2.tgz",
+ "integrity": "sha512-T1gkAiYYDWYx3V5Bmyu7HcfcvL7mUrTWiM6yOfa3PIphViJ/gFPbvidQ+veqSOHci/PxBcDabeUNCzpOODJZig==",
+ "dev": true,
+ "dependencies": {
+ "agent-base": "^7.1.0",
+ "debug": "^4.3.4"
+ },
+ "engines": {
+ "node": ">= 14"
+ }
+ },
+ "node_modules/https-proxy-agent": {
+ "version": "7.0.5",
+ "resolved": "https://registry.npmjs.org/https-proxy-agent/-/https-proxy-agent-7.0.5.tgz",
+ "integrity": "sha512-1e4Wqeblerz+tMKPIq2EMGiiWW1dIjZOksyHWSUm1rmuvw/how9hBHZ38lAGj5ID4Ik6EdkOw7NmWPy6LAwalw==",
+ "dev": true,
+ "dependencies": {
+ "agent-base": "^7.0.2",
+ "debug": "4"
+ },
+ "engines": {
+ "node": ">= 14"
+ }
+ },
+ "node_modules/husky": {
+ "version": "9.1.6",
+ "resolved": "https://registry.npmjs.org/husky/-/husky-9.1.6.tgz",
+ "integrity": "sha512-sqbjZKK7kf44hfdE94EoX8MZNk0n7HeW37O4YrVGCF4wzgQjp+akPAkfUK5LZ6KuR/6sqeAVuXHji+RzQgOn5A==",
+ "dev": true,
+ "bin": {
+ "husky": "bin.js"
+ },
+ "engines": {
+ "node": ">=18"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/typicode"
+ }
+ },
+ "node_modules/iconv-lite": {
+ "version": "0.6.3",
+ "resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.6.3.tgz",
+ "integrity": "sha512-4fCk79wshMdzMp2rH06qWrJE4iolqLhCUH+OiuIgU++RB0+94NlDL81atO7GX55uUKueo0txHNtvEyI6D7WdMw==",
+ "dev": true,
+ "dependencies": {
+ "safer-buffer": ">= 2.1.2 < 3.0.0"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/ignore": {
+ "version": "5.3.2",
+ "resolved": "https://registry.npmjs.org/ignore/-/ignore-5.3.2.tgz",
+ "integrity": "sha512-hsBTNUqQTDwkWtcdYI2i06Y/nUBEsNEDJKjWdigLvegy8kDuJAS8uRlpkkcQpyEXL0Z/pjDy5HBmMjRCJ2gq+g==",
+ "dev": true,
+ "engines": {
+ "node": ">= 4"
+ }
+ },
+ "node_modules/import-fresh": {
+ "version": "3.3.0",
+ "resolved": "https://registry.npmjs.org/import-fresh/-/import-fresh-3.3.0.tgz",
+ "integrity": "sha512-veYYhQa+D1QBKznvhUHxb8faxlrwUnxseDAbAp457E0wLNio2bOSKnjYDhMj+YiAq61xrMGhQk9iXVk5FzgQMw==",
+ "dev": true,
+ "dependencies": {
+ "parent-module": "^1.0.0",
+ "resolve-from": "^4.0.0"
+ },
+ "engines": {
+ "node": ">=6"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/imurmurhash": {
+ "version": "0.1.4",
+ "resolved": "https://registry.npmjs.org/imurmurhash/-/imurmurhash-0.1.4.tgz",
+ "integrity": "sha512-JmXMZ6wuvDmLiHEml9ykzqO6lwFbof0GG4IkcGaENdCRDDmMVnny7s5HsIgHCbaq0w2MyPhDqkhTUgS2LU2PHA==",
+ "dev": true,
+ "engines": {
+ "node": ">=0.8.19"
+ }
+ },
+ "node_modules/indent-string": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/indent-string/-/indent-string-4.0.0.tgz",
+ "integrity": "sha512-EdDDZu4A2OyIK7Lr/2zG+w5jmbuk1DVBnEwREQvBzspBJkCEbRa8GxU1lghYcaGJCnRWibjDXlq779X1/y5xwg==",
+ "dev": true,
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/inflight": {
+ "version": "1.0.6",
+ "resolved": "https://registry.npmjs.org/inflight/-/inflight-1.0.6.tgz",
+ "integrity": "sha512-k92I/b08q4wvFscXCLvqfsHCrjrF7yiXsQuIVvVE7N82W3+aqpzuUdBbfhWcy/FZR3/4IgflMgKLOsvPDrGCJA==",
+ "deprecated": "This module is not supported, and leaks memory. Do not use it. Check out lru-cache if you want a good and tested way to coalesce async requests by a key value, which is much more comprehensive and powerful.",
+ "dev": true,
+ "license": "ISC",
+ "dependencies": {
+ "once": "^1.3.0",
+ "wrappy": "1"
+ }
+ },
+ "node_modules/inherits": {
+ "version": "2.0.4",
+ "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz",
+ "integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==",
+ "dev": true,
+ "license": "ISC"
+ },
+ "node_modules/internal-slot": {
+ "version": "1.0.7",
+ "resolved": "https://registry.npmjs.org/internal-slot/-/internal-slot-1.0.7.tgz",
+ "integrity": "sha512-NGnrKwXzSms2qUUih/ILZ5JBqNTSa1+ZmP6flaIp6KmSElgE9qdndzS3cqjrDovwFdmwsGsLdeFgB6suw+1e9g==",
+ "dev": true,
+ "dependencies": {
+ "es-errors": "^1.3.0",
+ "hasown": "^2.0.0",
+ "side-channel": "^1.0.4"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ }
+ },
+ "node_modules/internmap": {
+ "version": "2.0.3",
+ "resolved": "https://registry.npmjs.org/internmap/-/internmap-2.0.3.tgz",
+ "integrity": "sha512-5Hh7Y1wQbvY5ooGgPbDaL5iYLAPzMTUrjMulskHLH6wnv/A+1q5rgEaiuqEjB+oxGXIVZs1FF+R/KPN3ZSQYYg==",
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/invariant": {
+ "version": "2.2.4",
+ "resolved": "https://registry.npmjs.org/invariant/-/invariant-2.2.4.tgz",
+ "integrity": "sha512-phJfQVBuaJM5raOpJjSfkiD6BpbCE4Ns//LaXl6wGYtUBY83nWS6Rf9tXm2e8VaK60JEjYldbPif/A2B1C2gNA==",
+ "dependencies": {
+ "loose-envify": "^1.0.0"
+ }
+ },
+ "node_modules/is-arguments": {
+ "version": "1.1.1",
+ "resolved": "https://registry.npmjs.org/is-arguments/-/is-arguments-1.1.1.tgz",
+ "integrity": "sha512-8Q7EARjzEnKpt/PCD7e1cgUS0a6X8u5tdSiMqXhojOdoV9TsMsiO+9VLC5vAmO8N7/GmXn7yjR8qnA6bVAEzfA==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "call-bind": "^1.0.2",
+ "has-tostringtag": "^1.0.0"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/is-array-buffer": {
+ "version": "3.0.4",
+ "resolved": "https://registry.npmjs.org/is-array-buffer/-/is-array-buffer-3.0.4.tgz",
+ "integrity": "sha512-wcjaerHw0ydZwfhiKbXJWLDY8A7yV7KhjQOpb83hGgGfId/aQa4TOvwyzn2PuswW2gPCYEL/nEAiSVpdOj1lXw==",
+ "dev": true,
+ "dependencies": {
+ "call-bind": "^1.0.2",
+ "get-intrinsic": "^1.2.1"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/is-async-function": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/is-async-function/-/is-async-function-2.0.0.tgz",
+ "integrity": "sha512-Y1JXKrfykRJGdlDwdKlLpLyMIiWqWvuSd17TvZk68PLAOGOoF4Xyav1z0Xhoi+gCYjZVeC5SI+hYFOfvXmGRCA==",
+ "dev": true,
+ "dependencies": {
+ "has-tostringtag": "^1.0.0"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/is-bigint": {
+ "version": "1.0.4",
+ "resolved": "https://registry.npmjs.org/is-bigint/-/is-bigint-1.0.4.tgz",
+ "integrity": "sha512-zB9CruMamjym81i2JZ3UMn54PKGsQzsJeo6xvN3HJJ4CAsQNB6iRutp2To77OfCNuoxspsIhzaPoO1zyCEhFOg==",
+ "dev": true,
+ "dependencies": {
+ "has-bigints": "^1.0.1"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/is-binary-path": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/is-binary-path/-/is-binary-path-2.1.0.tgz",
+ "integrity": "sha512-ZMERYes6pDydyuGidse7OsHxtbI7WVeUEozgR/g7rd0xUimYNlvZRE/K2MgZTjWy725IfelLeVcEM97mmtRGXw==",
+ "dependencies": {
+ "binary-extensions": "^2.0.0"
+ },
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/is-boolean-object": {
+ "version": "1.1.2",
+ "resolved": "https://registry.npmjs.org/is-boolean-object/-/is-boolean-object-1.1.2.tgz",
+ "integrity": "sha512-gDYaKHJmnj4aWxyj6YHyXVpdQawtVLHU5cb+eztPGczf6cjuTdwve5ZIEfgXqH4e57An1D1AKf8CZ3kYrQRqYA==",
+ "dev": true,
+ "dependencies": {
+ "call-bind": "^1.0.2",
+ "has-tostringtag": "^1.0.0"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/is-callable": {
+ "version": "1.2.7",
+ "resolved": "https://registry.npmjs.org/is-callable/-/is-callable-1.2.7.tgz",
+ "integrity": "sha512-1BC0BVFhS/p0qtw6enp8e+8OD0UrK0oFLztSjNzhcKA3WDuJxxAPXzPuPtKkjEY9UUoEWlX/8fgKeu2S8i9JTA==",
+ "dev": true,
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/is-core-module": {
+ "version": "2.15.1",
+ "resolved": "https://registry.npmjs.org/is-core-module/-/is-core-module-2.15.1.tgz",
+ "integrity": "sha512-z0vtXSwucUJtANQWldhbtbt7BnL0vxiFjIdDLAatwhDYty2bad6s+rijD6Ri4YuYJubLzIJLUidCh09e1djEVQ==",
+ "dependencies": {
+ "hasown": "^2.0.2"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/is-data-view": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/is-data-view/-/is-data-view-1.0.1.tgz",
+ "integrity": "sha512-AHkaJrsUVW6wq6JS8y3JnM/GJF/9cf+k20+iDzlSaJrinEo5+7vRiteOSwBhHRiAyQATN1AmY4hwzxJKPmYf+w==",
+ "dev": true,
+ "dependencies": {
+ "is-typed-array": "^1.1.13"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/is-date-object": {
+ "version": "1.0.5",
+ "resolved": "https://registry.npmjs.org/is-date-object/-/is-date-object-1.0.5.tgz",
+ "integrity": "sha512-9YQaSxsAiSwcvS33MBk3wTCVnWK+HhF8VZR2jRxehM16QcVOdHqPn4VPHmRK4lSr38n9JriurInLcP90xsYNfQ==",
+ "dev": true,
+ "dependencies": {
+ "has-tostringtag": "^1.0.0"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/is-docker": {
+ "version": "2.2.1",
+ "resolved": "https://registry.npmjs.org/is-docker/-/is-docker-2.2.1.tgz",
+ "integrity": "sha512-F+i2BKsFrH66iaUFc0woD8sLy8getkwTwtOBjvs56Cx4CgJDeKQeqfz8wAYiSb8JOprWhHH5p77PbmYCvvUuXQ==",
+ "dev": true,
+ "license": "MIT",
+ "bin": {
+ "is-docker": "cli.js"
+ },
+ "engines": {
+ "node": ">=8"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/is-extglob": {
+ "version": "2.1.1",
+ "resolved": "https://registry.npmjs.org/is-extglob/-/is-extglob-2.1.1.tgz",
+ "integrity": "sha512-SbKbANkN603Vi4jEZv49LeVJMn4yGwsbzZworEoyEiutsN3nJYdbO36zfhGJ6QEDpOZIFkDtnq5JRxmvl3jsoQ==",
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/is-finalizationregistry": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/is-finalizationregistry/-/is-finalizationregistry-1.0.2.tgz",
+ "integrity": "sha512-0by5vtUJs8iFQb5TYUHHPudOR+qXYIMKtiUzvLIZITZUjknFmziyBJuLhVRc+Ds0dREFlskDNJKYIdIzu/9pfw==",
+ "dev": true,
+ "dependencies": {
+ "call-bind": "^1.0.2"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/is-fullwidth-code-point": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz",
+ "integrity": "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==",
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/is-generator-function": {
+ "version": "1.0.10",
+ "resolved": "https://registry.npmjs.org/is-generator-function/-/is-generator-function-1.0.10.tgz",
+ "integrity": "sha512-jsEjy9l3yiXEQ+PsXdmBwEPcOxaXWLspKdplFUVI9vq1iZgIekeC0L167qeu86czQaxed3q/Uzuw0swL0irL8A==",
+ "dev": true,
+ "dependencies": {
+ "has-tostringtag": "^1.0.0"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/is-glob": {
+ "version": "4.0.3",
+ "resolved": "https://registry.npmjs.org/is-glob/-/is-glob-4.0.3.tgz",
+ "integrity": "sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg==",
+ "dependencies": {
+ "is-extglob": "^2.1.1"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/is-map": {
+ "version": "2.0.3",
+ "resolved": "https://registry.npmjs.org/is-map/-/is-map-2.0.3.tgz",
+ "integrity": "sha512-1Qed0/Hr2m+YqxnM09CjA2d/i6YZNfF6R2oRAOj36eUdS6qIV/huPJNSEpKbupewFs+ZsJlxsjjPbc0/afW6Lw==",
+ "dev": true,
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/is-negative-zero": {
+ "version": "2.0.3",
+ "resolved": "https://registry.npmjs.org/is-negative-zero/-/is-negative-zero-2.0.3.tgz",
+ "integrity": "sha512-5KoIu2Ngpyek75jXodFvnafB6DJgr3u8uuK0LEZJjrU19DrMD3EVERaR8sjz8CCGgpZvxPl9SuE1GMVPFHx1mw==",
+ "dev": true,
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/is-node-process": {
+ "version": "1.2.0",
+ "resolved": "https://registry.npmjs.org/is-node-process/-/is-node-process-1.2.0.tgz",
+ "integrity": "sha512-Vg4o6/fqPxIjtxgUH5QLJhwZ7gW5diGCVlXpuUfELC62CuxM1iHcRe51f2W1FDy04Ai4KJkagKjx3XaqyfRKXw==",
+ "dev": true
+ },
+ "node_modules/is-number": {
+ "version": "7.0.0",
+ "resolved": "https://registry.npmjs.org/is-number/-/is-number-7.0.0.tgz",
+ "integrity": "sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==",
+ "engines": {
+ "node": ">=0.12.0"
+ }
+ },
+ "node_modules/is-number-object": {
+ "version": "1.0.7",
+ "resolved": "https://registry.npmjs.org/is-number-object/-/is-number-object-1.0.7.tgz",
+ "integrity": "sha512-k1U0IRzLMo7ZlYIfzRu23Oh6MiIFasgpb9X76eqfFZAqwH44UI4KTBvBYIZ1dSL9ZzChTB9ShHfLkR4pdW5krQ==",
+ "dev": true,
+ "dependencies": {
+ "has-tostringtag": "^1.0.0"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/is-potential-custom-element-name": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/is-potential-custom-element-name/-/is-potential-custom-element-name-1.0.1.tgz",
+ "integrity": "sha512-bCYeRA2rVibKZd+s2625gGnGF/t7DSqDs4dP7CrLA1m7jKWz6pps0LpYLJN8Q64HtmPKJ1hrN3nzPNKFEKOUiQ==",
+ "dev": true
+ },
+ "node_modules/is-regex": {
+ "version": "1.1.4",
+ "resolved": "https://registry.npmjs.org/is-regex/-/is-regex-1.1.4.tgz",
+ "integrity": "sha512-kvRdxDsxZjhzUX07ZnLydzS1TU/TJlTUHHY4YLL87e37oUA49DfkLqgy+VjFocowy29cKvcSiu+kIv728jTTVg==",
+ "dev": true,
+ "dependencies": {
+ "call-bind": "^1.0.2",
+ "has-tostringtag": "^1.0.0"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/is-set": {
+ "version": "2.0.3",
+ "resolved": "https://registry.npmjs.org/is-set/-/is-set-2.0.3.tgz",
+ "integrity": "sha512-iPAjerrse27/ygGLxw+EBR9agv9Y6uLeYVJMu+QNCoouJ1/1ri0mGrcWpfCqFZuzzx3WjtwxG098X+n4OuRkPg==",
+ "dev": true,
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/is-shared-array-buffer": {
+ "version": "1.0.3",
+ "resolved": "https://registry.npmjs.org/is-shared-array-buffer/-/is-shared-array-buffer-1.0.3.tgz",
+ "integrity": "sha512-nA2hv5XIhLR3uVzDDfCIknerhx8XUKnstuOERPNNIinXG7v9u+ohXF67vxm4TPTEPU6lm61ZkwP3c9PCB97rhg==",
+ "dev": true,
+ "dependencies": {
+ "call-bind": "^1.0.7"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/is-string": {
+ "version": "1.0.7",
+ "resolved": "https://registry.npmjs.org/is-string/-/is-string-1.0.7.tgz",
+ "integrity": "sha512-tE2UXzivje6ofPW7l23cjDOMa09gb7xlAqG6jG5ej6uPV32TlWP3NKPigtaGeHNu9fohccRYvIiZMfOOnOYUtg==",
+ "dev": true,
+ "dependencies": {
+ "has-tostringtag": "^1.0.0"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/is-symbol": {
+ "version": "1.0.4",
+ "resolved": "https://registry.npmjs.org/is-symbol/-/is-symbol-1.0.4.tgz",
+ "integrity": "sha512-C/CPBqKWnvdcxqIARxyOh4v1UUEOCHpgDa0WYgpKDFMszcrPcffg5uhwSgPCLD2WWxmq6isisz87tzT01tuGhg==",
+ "dev": true,
+ "dependencies": {
+ "has-symbols": "^1.0.2"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/is-typed-array": {
+ "version": "1.1.13",
+ "resolved": "https://registry.npmjs.org/is-typed-array/-/is-typed-array-1.1.13.tgz",
+ "integrity": "sha512-uZ25/bUAlUY5fR4OKT4rZQEBrzQWYV9ZJYGGsUmEJ6thodVJ1HX64ePQ6Z0qPWP+m+Uq6e9UugrE38jeYsDSMw==",
+ "dev": true,
+ "dependencies": {
+ "which-typed-array": "^1.1.14"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/is-weakmap": {
+ "version": "2.0.2",
+ "resolved": "https://registry.npmjs.org/is-weakmap/-/is-weakmap-2.0.2.tgz",
+ "integrity": "sha512-K5pXYOm9wqY1RgjpL3YTkF39tni1XajUIkawTLUo9EZEVUFga5gSQJF8nNS7ZwJQ02y+1YCNYcMh+HIf1ZqE+w==",
+ "dev": true,
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/is-weakref": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/is-weakref/-/is-weakref-1.0.2.tgz",
+ "integrity": "sha512-qctsuLZmIQ0+vSSMfoVvyFe2+GSEvnmZ2ezTup1SBse9+twCCeial6EEi3Nc2KFcf6+qz2FBPnjXsk8xhKSaPQ==",
+ "dev": true,
+ "dependencies": {
+ "call-bind": "^1.0.2"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/is-weakset": {
+ "version": "2.0.3",
+ "resolved": "https://registry.npmjs.org/is-weakset/-/is-weakset-2.0.3.tgz",
+ "integrity": "sha512-LvIm3/KWzS9oRFHugab7d+M/GcBXuXX5xZkzPmN+NxihdQlZUQ4dWuSV1xR/sq6upL1TJEDrfBgRepHFdBtSNQ==",
+ "dev": true,
+ "dependencies": {
+ "call-bind": "^1.0.7",
+ "get-intrinsic": "^1.2.4"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/is-wsl": {
+ "version": "2.2.0",
+ "resolved": "https://registry.npmjs.org/is-wsl/-/is-wsl-2.2.0.tgz",
+ "integrity": "sha512-fKzAra0rGJUUBwGBgNkHZuToZcn+TtXHpeCgmkMJMMYx1sQDYaCSyjJBSCa2nH1DGm7s3n1oBnohoVTBaN7Lww==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "is-docker": "^2.0.0"
+ },
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/isarray": {
+ "version": "2.0.5",
+ "resolved": "https://registry.npmjs.org/isarray/-/isarray-2.0.5.tgz",
+ "integrity": "sha512-xHjhDr3cNBK0BzdUJSPXZntQUx/mwMS5Rw4A7lPJ90XGAO6ISP/ePDNuo0vhqOZU+UD5JoodwCAAoZQd3FeAKw==",
+ "dev": true
+ },
+ "node_modules/isexe": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/isexe/-/isexe-2.0.0.tgz",
+ "integrity": "sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw=="
+ },
+ "node_modules/iterator.prototype": {
+ "version": "1.1.3",
+ "resolved": "https://registry.npmjs.org/iterator.prototype/-/iterator.prototype-1.1.3.tgz",
+ "integrity": "sha512-FW5iMbeQ6rBGm/oKgzq2aW4KvAGpxPzYES8N4g4xNXUKpL1mclMvOe+76AcLDTvD+Ze+sOpVhgdAQEKF4L9iGQ==",
+ "dev": true,
+ "dependencies": {
+ "define-properties": "^1.2.1",
+ "get-intrinsic": "^1.2.1",
+ "has-symbols": "^1.0.3",
+ "reflect.getprototypeof": "^1.0.4",
+ "set-function-name": "^2.0.1"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ }
+ },
+ "node_modules/jackspeak": {
+ "version": "3.4.3",
+ "resolved": "https://registry.npmjs.org/jackspeak/-/jackspeak-3.4.3.tgz",
+ "integrity": "sha512-OGlZQpz2yfahA/Rd1Y8Cd9SIEsqvXkLVoSw/cgwhnhFMDbsQFeZYoJJ7bIZBS9BcamUW96asq/npPWugM+RQBw==",
+ "dependencies": {
+ "@isaacs/cliui": "^8.0.2"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/isaacs"
+ },
+ "optionalDependencies": {
+ "@pkgjs/parseargs": "^0.11.0"
+ }
+ },
+ "node_modules/jiti": {
+ "version": "1.21.6",
+ "resolved": "https://registry.npmjs.org/jiti/-/jiti-1.21.6.tgz",
+ "integrity": "sha512-2yTgeWTWzMWkHu6Jp9NKgePDaYHbntiwvYuuJLbbN9vl7DC9DvXKOB2BC3ZZ92D3cvV/aflH0osDfwpHepQ53w==",
+ "bin": {
+ "jiti": "bin/jiti.js"
+ }
+ },
+ "node_modules/js-tokens": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/js-tokens/-/js-tokens-4.0.0.tgz",
+ "integrity": "sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ=="
+ },
+ "node_modules/js-yaml": {
+ "version": "4.1.0",
+ "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.0.tgz",
+ "integrity": "sha512-wpxZs9NoxZaJESJGIZTyDEaYpl0FKSA+FB9aJiyemKhMwkxQg63h4T1KJgUGHpTqPDNRcmmYLugrRjJlBtWvRA==",
+ "dev": true,
+ "dependencies": {
+ "argparse": "^2.0.1"
+ },
+ "bin": {
+ "js-yaml": "bin/js-yaml.js"
+ }
+ },
+ "node_modules/jsdoc-type-pratt-parser": {
+ "version": "4.1.0",
+ "resolved": "https://registry.npmjs.org/jsdoc-type-pratt-parser/-/jsdoc-type-pratt-parser-4.1.0.tgz",
+ "integrity": "sha512-Hicd6JK5Njt2QB6XYFS7ok9e37O8AYk3jTcppG4YVQnYjOemymvTcmc7OWsmq/Qqj5TdRFO5/x/tIPmBeRtGHg==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">=12.0.0"
+ }
+ },
+ "node_modules/jsdom": {
+ "version": "25.0.1",
+ "resolved": "https://registry.npmjs.org/jsdom/-/jsdom-25.0.1.tgz",
+ "integrity": "sha512-8i7LzZj7BF8uplX+ZyOlIz86V6TAsSs+np6m1kpW9u0JWi4z/1t+FzcK1aek+ybTnAC4KhBL4uXCNT0wcUIeCw==",
+ "dev": true,
+ "dependencies": {
+ "cssstyle": "^4.1.0",
+ "data-urls": "^5.0.0",
+ "decimal.js": "^10.4.3",
+ "form-data": "^4.0.0",
+ "html-encoding-sniffer": "^4.0.0",
+ "http-proxy-agent": "^7.0.2",
+ "https-proxy-agent": "^7.0.5",
+ "is-potential-custom-element-name": "^1.0.1",
+ "nwsapi": "^2.2.12",
+ "parse5": "^7.1.2",
+ "rrweb-cssom": "^0.7.1",
+ "saxes": "^6.0.0",
+ "symbol-tree": "^3.2.4",
+ "tough-cookie": "^5.0.0",
+ "w3c-xmlserializer": "^5.0.0",
+ "webidl-conversions": "^7.0.0",
+ "whatwg-encoding": "^3.1.1",
+ "whatwg-mimetype": "^4.0.0",
+ "whatwg-url": "^14.0.0",
+ "ws": "^8.18.0",
+ "xml-name-validator": "^5.0.0"
+ },
+ "engines": {
+ "node": ">=18"
+ },
+ "peerDependencies": {
+ "canvas": "^2.11.2"
+ },
+ "peerDependenciesMeta": {
+ "canvas": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/jsesc": {
+ "version": "3.0.2",
+ "resolved": "https://registry.npmjs.org/jsesc/-/jsesc-3.0.2.tgz",
+ "integrity": "sha512-xKqzzWXDttJuOcawBt4KnKHHIf5oQ/Cxax+0PWFG+DFDgHNAdi+TXECADI+RYiFUMmx8792xsMbbgXj4CwnP4g==",
+ "dev": true,
+ "bin": {
+ "jsesc": "bin/jsesc"
+ },
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/json-buffer": {
+ "version": "3.0.1",
+ "resolved": "https://registry.npmjs.org/json-buffer/-/json-buffer-3.0.1.tgz",
+ "integrity": "sha512-4bV5BfR2mqfQTJm+V5tPPdf+ZpuhiIvTuAB5g8kcrXOZpTT/QwwVRWBywX1ozr6lEuPdbHxwaJlm9G6mI2sfSQ==",
+ "dev": true
+ },
+ "node_modules/json-schema-traverse": {
+ "version": "0.4.1",
+ "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz",
+ "integrity": "sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==",
+ "dev": true
+ },
+ "node_modules/json-stable-stringify-without-jsonify": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/json-stable-stringify-without-jsonify/-/json-stable-stringify-without-jsonify-1.0.1.tgz",
+ "integrity": "sha512-Bdboy+l7tA3OGW6FjyFHWkP5LuByj1Tk33Ljyq0axyzdk9//JSi2u3fP1QSmd1KNwq6VOKYGlAu87CisVir6Pw==",
+ "dev": true
+ },
+ "node_modules/json5": {
+ "version": "2.2.3",
+ "resolved": "https://registry.npmjs.org/json5/-/json5-2.2.3.tgz",
+ "integrity": "sha512-XmOWe7eyHYH14cLdVPoyg+GOH3rYX++KpzrylJwSW98t3Nk+U8XOl8FWKOgwtzdb8lXGf6zYwDUzeHMWfxasyg==",
+ "dev": true,
+ "bin": {
+ "json5": "lib/cli.js"
+ },
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/jsx-ast-utils": {
+ "version": "3.3.5",
+ "resolved": "https://registry.npmjs.org/jsx-ast-utils/-/jsx-ast-utils-3.3.5.tgz",
+ "integrity": "sha512-ZZow9HBI5O6EPgSJLUb8n2NKgmVWTwCvHGwFuJlMjvLFqlGG6pjirPhtdsseaLZjSibD8eegzmYpUZwoIlj2cQ==",
+ "dev": true,
+ "dependencies": {
+ "array-includes": "^3.1.6",
+ "array.prototype.flat": "^1.3.1",
+ "object.assign": "^4.1.4",
+ "object.values": "^1.1.6"
+ },
+ "engines": {
+ "node": ">=4.0"
+ }
+ },
+ "node_modules/keyv": {
+ "version": "4.5.4",
+ "resolved": "https://registry.npmjs.org/keyv/-/keyv-4.5.4.tgz",
+ "integrity": "sha512-oxVHkHR/EJf2CNXnWxRLW6mg7JyCCUcG0DtEGmL2ctUo1PNTin1PUil+r/+4r5MpVgC/fn1kjsx7mjSujKqIpw==",
+ "dev": true,
+ "dependencies": {
+ "json-buffer": "3.0.1"
+ }
+ },
+ "node_modules/levn": {
+ "version": "0.4.1",
+ "resolved": "https://registry.npmjs.org/levn/-/levn-0.4.1.tgz",
+ "integrity": "sha512-+bT2uH4E5LGE7h/n3evcS/sQlJXCpIp6ym8OWJ5eV6+67Dsql/LaaT7qJBAt2rzfoa/5QBGBhxDix1dMt2kQKQ==",
+ "dev": true,
+ "dependencies": {
+ "prelude-ls": "^1.2.1",
+ "type-check": "~0.4.0"
+ },
+ "engines": {
+ "node": ">= 0.8.0"
+ }
+ },
+ "node_modules/lilconfig": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/lilconfig/-/lilconfig-2.1.0.tgz",
+ "integrity": "sha512-utWOt/GHzuUxnLKxB6dk81RoOeoNeHgbrXiuGk4yyF5qlRz+iIVWu56E2fqGHFrXz0QNUhLB/8nKqvRH66JKGQ==",
+ "engines": {
+ "node": ">=10"
+ }
+ },
+ "node_modules/lines-and-columns": {
+ "version": "1.2.4",
+ "resolved": "https://registry.npmjs.org/lines-and-columns/-/lines-and-columns-1.2.4.tgz",
+ "integrity": "sha512-7ylylesZQ/PV29jhEDl3Ufjo6ZX7gCqJr5F7PKrqc93v7fzSymt1BpwEU8nAUXs8qzzvqhbjhK5QZg6Mt/HkBg=="
+ },
+ "node_modules/locate-path": {
+ "version": "6.0.0",
+ "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-6.0.0.tgz",
+ "integrity": "sha512-iPZK6eYjbxRu3uB4/WZ3EsEIMJFMqAoopl3R+zuq0UjcAm/MO6KCweDgPfP3elTztoKP3KtnVHxTn2NHBSDVUw==",
+ "dev": true,
+ "dependencies": {
+ "p-locate": "^5.0.0"
+ },
+ "engines": {
+ "node": ">=10"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/lodash": {
+ "version": "4.17.21",
+ "resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.21.tgz",
+ "integrity": "sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg=="
+ },
+ "node_modules/lodash.merge": {
+ "version": "4.6.2",
+ "resolved": "https://registry.npmjs.org/lodash.merge/-/lodash.merge-4.6.2.tgz",
+ "integrity": "sha512-0KpjqXRVvrYyCsX1swR/XTK0va6VQkQM6MNo7PqW77ByjAhoARA8EfrP1N4+KlKj8YS0ZUCtRT/YUuhyYDujIQ==",
+ "dev": true
+ },
+ "node_modules/loose-envify": {
+ "version": "1.4.0",
+ "resolved": "https://registry.npmjs.org/loose-envify/-/loose-envify-1.4.0.tgz",
+ "integrity": "sha512-lyuxPGr/Wfhrlem2CL/UcnUc1zcqKAImBDzukY7Y5F/yQiNdko6+fRLevlw1HgMySw7f611UIY408EtxRSoK3Q==",
+ "dependencies": {
+ "js-tokens": "^3.0.0 || ^4.0.0"
+ },
+ "bin": {
+ "loose-envify": "cli.js"
+ }
+ },
+ "node_modules/loupe": {
+ "version": "3.1.2",
+ "resolved": "https://registry.npmjs.org/loupe/-/loupe-3.1.2.tgz",
+ "integrity": "sha512-23I4pFZHmAemUnz8WZXbYRSKYj801VDaNv9ETuMh7IrMc7VuVVSo+Z9iLE3ni30+U48iDWfi30d3twAXBYmnCg==",
+ "dev": true
+ },
+ "node_modules/lru-cache": {
+ "version": "5.1.1",
+ "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-5.1.1.tgz",
+ "integrity": "sha512-KpNARQA3Iwv+jTA0utUVVbrh+Jlrr1Fv0e56GGzAFOXN7dk/FviaDW8LHmK52DlcH4WP2n6gI8vN1aesBFgo9w==",
+ "dev": true,
+ "dependencies": {
+ "yallist": "^3.0.2"
+ }
+ },
+ "node_modules/lucide-react": {
+ "version": "0.447.0",
+ "resolved": "https://registry.npmjs.org/lucide-react/-/lucide-react-0.447.0.tgz",
+ "integrity": "sha512-SZ//hQmvi+kDKrNepArVkYK7/jfeZ5uFNEnYmd45RKZcbGD78KLnrcNXmgeg6m+xNHFvTG+CblszXCy4n6DN4w==",
+ "peerDependencies": {
+ "react": "^16.5.1 || ^17.0.0 || ^18.0.0 || ^19.0.0-rc"
+ }
+ },
+ "node_modules/lz-string": {
+ "version": "1.5.0",
+ "resolved": "https://registry.npmjs.org/lz-string/-/lz-string-1.5.0.tgz",
+ "integrity": "sha512-h5bgJWpxJNswbU7qCrV0tIKQCaS3blPDrqKWx+QxzuzL1zGUzij9XCWLrSLsJPu5t+eWA/ycetzYAO5IOMcWAQ==",
+ "dev": true,
+ "bin": {
+ "lz-string": "bin/bin.js"
+ }
+ },
+ "node_modules/magic-string": {
+ "version": "0.30.12",
+ "resolved": "https://registry.npmjs.org/magic-string/-/magic-string-0.30.12.tgz",
+ "integrity": "sha512-Ea8I3sQMVXr8JhN4z+H/d8zwo+tYDgHE9+5G4Wnrwhs0gaK9fXTKx0Tw5Xwsd/bCPTTZNRAdpyzvoeORe9LYpw==",
+ "dev": true,
+ "dependencies": {
+ "@jridgewell/sourcemap-codec": "^1.5.0"
+ }
+ },
+ "node_modules/map-or-similar": {
+ "version": "1.5.0",
+ "resolved": "https://registry.npmjs.org/map-or-similar/-/map-or-similar-1.5.0.tgz",
+ "integrity": "sha512-0aF7ZmVon1igznGI4VS30yugpduQW3y3GkcgGJOp7d8x8QrizhigUxjI/m2UojsXXto+jLAH3KSz+xOJTiORjg==",
+ "dev": true,
+ "license": "MIT"
+ },
+ "node_modules/memoizerific": {
+ "version": "1.11.3",
+ "resolved": "https://registry.npmjs.org/memoizerific/-/memoizerific-1.11.3.tgz",
+ "integrity": "sha512-/EuHYwAPdLtXwAwSZkh/Gutery6pD2KYd44oQLhAvQp/50mpyduZh8Q7PYHXTCJ+wuXxt7oij2LXyIJOOYFPog==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "map-or-similar": "^1.5.0"
+ }
+ },
+ "node_modules/merge2": {
+ "version": "1.4.1",
+ "resolved": "https://registry.npmjs.org/merge2/-/merge2-1.4.1.tgz",
+ "integrity": "sha512-8q7VEgMJW4J8tcfVPy8g09NcQwZdbwFEqhe/WZkoIzjn/3TGDwtOCYtXGxA3O8tPzpczCCDgv+P2P5y00ZJOOg==",
+ "engines": {
+ "node": ">= 8"
+ }
+ },
+ "node_modules/micromatch": {
+ "version": "4.0.8",
+ "resolved": "https://registry.npmjs.org/micromatch/-/micromatch-4.0.8.tgz",
+ "integrity": "sha512-PXwfBhYu0hBCPw8Dn0E+WDYb7af3dSLVWKi3HGv84IdF4TyFoC0ysxFd0Goxw7nSv4T/PzEJQxsYsEiFCKo2BA==",
+ "dependencies": {
+ "braces": "^3.0.3",
+ "picomatch": "^2.3.1"
+ },
+ "engines": {
+ "node": ">=8.6"
+ }
+ },
+ "node_modules/mime-db": {
+ "version": "1.52.0",
+ "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz",
+ "integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==",
+ "dev": true,
+ "engines": {
+ "node": ">= 0.6"
+ }
+ },
+ "node_modules/mime-types": {
+ "version": "2.1.35",
+ "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz",
+ "integrity": "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==",
+ "dev": true,
+ "dependencies": {
+ "mime-db": "1.52.0"
+ },
+ "engines": {
+ "node": ">= 0.6"
+ }
+ },
+ "node_modules/min-indent": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/min-indent/-/min-indent-1.0.1.tgz",
+ "integrity": "sha512-I9jwMn07Sy/IwOj3zVkVik2JTvgpaykDZEigL6Rx6N9LbMywwUSMtxET+7lVoDLLd3O3IXwJwvuuns8UB/HeAg==",
+ "dev": true,
+ "engines": {
+ "node": ">=4"
+ }
+ },
+ "node_modules/minimatch": {
+ "version": "3.1.2",
+ "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz",
+ "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==",
+ "dev": true,
+ "dependencies": {
+ "brace-expansion": "^1.1.7"
+ },
+ "engines": {
+ "node": "*"
+ }
+ },
+ "node_modules/minimist": {
+ "version": "1.2.8",
+ "resolved": "https://registry.npmjs.org/minimist/-/minimist-1.2.8.tgz",
+ "integrity": "sha512-2yyAR8qBkN3YuheJanUpWC5U3bb5osDywNB8RzDVlDwDHbocAJveqqj1u8+SVD7jkWT4yvsHCpWqqWqAxb0zCA==",
+ "dev": true,
+ "license": "MIT",
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/minipass": {
+ "version": "7.1.2",
+ "resolved": "https://registry.npmjs.org/minipass/-/minipass-7.1.2.tgz",
+ "integrity": "sha512-qOOzS1cBTWYF4BH8fVePDBOO9iptMnGUEZwNc/cMWnTV2nVLZ7VoNWEPHkYczZA0pdoA7dl6e7FL659nX9S2aw==",
+ "engines": {
+ "node": ">=16 || 14 >=14.17"
+ }
+ },
+ "node_modules/ms": {
+ "version": "2.1.3",
+ "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz",
+ "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==",
+ "dev": true
+ },
+ "node_modules/msw": {
+ "version": "2.6.0",
+ "resolved": "https://registry.npmjs.org/msw/-/msw-2.6.0.tgz",
+ "integrity": "sha512-n3tx2w0MZ3H4pxY0ozrQ4sNPzK/dGtlr2cIIyuEsgq2Bhy4wvcW6ZH2w/gXM9+MEUY6HC1fWhqtcXDxVZr5Jxw==",
+ "dev": true,
+ "hasInstallScript": true,
+ "dependencies": {
+ "@bundled-es-modules/cookie": "^2.0.0",
+ "@bundled-es-modules/statuses": "^1.0.1",
+ "@bundled-es-modules/tough-cookie": "^0.1.6",
+ "@inquirer/confirm": "^5.0.0",
+ "@mswjs/interceptors": "^0.36.5",
+ "@open-draft/deferred-promise": "^2.2.0",
+ "@open-draft/until": "^2.1.0",
+ "@types/cookie": "^0.6.0",
+ "@types/statuses": "^2.0.4",
+ "chalk": "^4.1.2",
+ "graphql": "^16.8.1",
+ "headers-polyfill": "^4.0.2",
+ "is-node-process": "^1.2.0",
+ "outvariant": "^1.4.3",
+ "path-to-regexp": "^6.3.0",
+ "strict-event-emitter": "^0.5.1",
+ "type-fest": "^4.26.1",
+ "yargs": "^17.7.2"
+ },
+ "bin": {
+ "msw": "cli/index.js"
+ },
+ "engines": {
+ "node": ">=18"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/mswjs"
+ },
+ "peerDependencies": {
+ "typescript": ">= 4.8.x"
+ },
+ "peerDependenciesMeta": {
+ "typescript": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/mute-stream": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/mute-stream/-/mute-stream-2.0.0.tgz",
+ "integrity": "sha512-WWdIxpyjEn+FhQJQQv9aQAYlHoNVdzIzUySNV1gHUPDSdZJ3yZn7pAAbQcV7B56Mvu881q9FZV+0Vx2xC44VWA==",
+ "dev": true,
+ "engines": {
+ "node": "^18.17.0 || >=20.5.0"
+ }
+ },
+ "node_modules/mz": {
+ "version": "2.7.0",
+ "resolved": "https://registry.npmjs.org/mz/-/mz-2.7.0.tgz",
+ "integrity": "sha512-z81GNO7nnYMEhrGh9LeymoE4+Yr0Wn5McHIZMK5cfQCl+NDX08sCZgUc9/6MHni9IWuFLm1Z3HTCXu2z9fN62Q==",
+ "dependencies": {
+ "any-promise": "^1.0.0",
+ "object-assign": "^4.0.1",
+ "thenify-all": "^1.0.0"
+ }
+ },
+ "node_modules/nanoid": {
+ "version": "3.3.7",
+ "resolved": "https://registry.npmjs.org/nanoid/-/nanoid-3.3.7.tgz",
+ "integrity": "sha512-eSRppjcPIatRIMC1U6UngP8XFcz8MQWGQdt1MTBQ7NaAmvXDfvNxbvWV3x2y6CdEUciCSsDHDQZbhYaB8QEo2g==",
+ "funding": [
+ {
+ "type": "github",
+ "url": "https://github.com/sponsors/ai"
+ }
+ ],
+ "bin": {
+ "nanoid": "bin/nanoid.cjs"
+ },
+ "engines": {
+ "node": "^10 || ^12 || ^13.7 || ^14 || >=15.0.1"
+ }
+ },
+ "node_modules/natural-compare": {
+ "version": "1.4.0",
+ "resolved": "https://registry.npmjs.org/natural-compare/-/natural-compare-1.4.0.tgz",
+ "integrity": "sha512-OWND8ei3VtNC9h7V60qff3SVobHr996CTwgxubgyQYEpg290h9J0buyECNNJexkFm5sOajh5G116RYA1c8ZMSw==",
+ "dev": true
+ },
+ "node_modules/node-releases": {
+ "version": "2.0.18",
+ "resolved": "https://registry.npmjs.org/node-releases/-/node-releases-2.0.18.tgz",
+ "integrity": "sha512-d9VeXT4SJ7ZeOqGX6R5EM022wpL+eWPooLI+5UpWn2jCT1aosUQEhQP214x33Wkwx3JQMvIm+tIoVOdodFS40g==",
+ "dev": true
+ },
+ "node_modules/normalize-path": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/normalize-path/-/normalize-path-3.0.0.tgz",
+ "integrity": "sha512-6eZs5Ls3WtCisHWp9S2GUy8dqkpGi4BVSz3GaqiE6ezub0512ESztXUwUB6C6IKbQkY2Pnb/mD4WYojCRwcwLA==",
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/normalize-range": {
+ "version": "0.1.2",
+ "resolved": "https://registry.npmjs.org/normalize-range/-/normalize-range-0.1.2.tgz",
+ "integrity": "sha512-bdok/XvKII3nUpklnV6P2hxtMNrCboOjAcyBuQnWEhO665FwrSNRxU+AqpsyvO6LgGYPspN+lu5CLtw4jPRKNA==",
+ "dev": true,
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/nwsapi": {
+ "version": "2.2.13",
+ "resolved": "https://registry.npmjs.org/nwsapi/-/nwsapi-2.2.13.tgz",
+ "integrity": "sha512-cTGB9ptp9dY9A5VbMSe7fQBcl/tt22Vcqdq8+eN93rblOuE0aCFu4aZ2vMwct/2t+lFnosm8RkQW1I0Omb1UtQ==",
+ "dev": true
+ },
+ "node_modules/object-assign": {
+ "version": "4.1.1",
+ "resolved": "https://registry.npmjs.org/object-assign/-/object-assign-4.1.1.tgz",
+ "integrity": "sha512-rJgTQnkUnH1sFw8yT6VSU3zD3sWmu6sZhIseY8VX+GRu3P6F7Fu+JNDoXfklElbLJSnc3FUQHVe4cU5hj+BcUg==",
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/object-hash": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/object-hash/-/object-hash-3.0.0.tgz",
+ "integrity": "sha512-RSn9F68PjH9HqtltsSnqYC1XXoWe9Bju5+213R98cNGttag9q9yAOTzdbsqvIa7aNm5WffBZFpWYr2aWrklWAw==",
+ "engines": {
+ "node": ">= 6"
+ }
+ },
+ "node_modules/object-inspect": {
+ "version": "1.13.2",
+ "resolved": "https://registry.npmjs.org/object-inspect/-/object-inspect-1.13.2.tgz",
+ "integrity": "sha512-IRZSRuzJiynemAXPYtPe5BoI/RESNYR7TYm50MC5Mqbd3Jmw5y790sErYw3V6SryFJD64b74qQQs9wn5Bg/k3g==",
+ "dev": true,
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/object-keys": {
+ "version": "1.1.1",
+ "resolved": "https://registry.npmjs.org/object-keys/-/object-keys-1.1.1.tgz",
+ "integrity": "sha512-NuAESUOUMrlIXOfHKzD6bpPu3tYt3xvjNdRIQ+FeT0lNb4K8WR70CaDxhuNguS2XG+GjkyMwOzsN5ZktImfhLA==",
+ "dev": true,
+ "engines": {
+ "node": ">= 0.4"
+ }
+ },
+ "node_modules/object.assign": {
+ "version": "4.1.5",
+ "resolved": "https://registry.npmjs.org/object.assign/-/object.assign-4.1.5.tgz",
+ "integrity": "sha512-byy+U7gp+FVwmyzKPYhW2h5l3crpmGsxl7X2s8y43IgxvG4g3QZ6CffDtsNQy1WsmZpQbO+ybo0AlW7TY6DcBQ==",
+ "dev": true,
+ "dependencies": {
+ "call-bind": "^1.0.5",
+ "define-properties": "^1.2.1",
+ "has-symbols": "^1.0.3",
+ "object-keys": "^1.1.1"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/object.entries": {
+ "version": "1.1.8",
+ "resolved": "https://registry.npmjs.org/object.entries/-/object.entries-1.1.8.tgz",
+ "integrity": "sha512-cmopxi8VwRIAw/fkijJohSfpef5PdN0pMQJN6VC/ZKvn0LIknWD8KtgY6KlQdEc4tIjcQ3HxSMmnvtzIscdaYQ==",
+ "dev": true,
+ "dependencies": {
+ "call-bind": "^1.0.7",
+ "define-properties": "^1.2.1",
+ "es-object-atoms": "^1.0.0"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ }
+ },
+ "node_modules/object.fromentries": {
+ "version": "2.0.8",
+ "resolved": "https://registry.npmjs.org/object.fromentries/-/object.fromentries-2.0.8.tgz",
+ "integrity": "sha512-k6E21FzySsSK5a21KRADBd/NGneRegFO5pLHfdQLpRDETUNJueLXs3WCzyQ3tFRDYgbq3KHGXfTbi2bs8WQ6rQ==",
+ "dev": true,
+ "dependencies": {
+ "call-bind": "^1.0.7",
+ "define-properties": "^1.2.1",
+ "es-abstract": "^1.23.2",
+ "es-object-atoms": "^1.0.0"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/object.values": {
+ "version": "1.2.0",
+ "resolved": "https://registry.npmjs.org/object.values/-/object.values-1.2.0.tgz",
+ "integrity": "sha512-yBYjY9QX2hnRmZHAjG/f13MzmBzxzYgQhFrke06TTyKY5zSTEqkOeukBzIdVA3j3ulu8Qa3MbVFShV7T2RmGtQ==",
+ "dev": true,
+ "dependencies": {
+ "call-bind": "^1.0.7",
+ "define-properties": "^1.2.1",
+ "es-object-atoms": "^1.0.0"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/once": {
+ "version": "1.4.0",
+ "resolved": "https://registry.npmjs.org/once/-/once-1.4.0.tgz",
+ "integrity": "sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w==",
+ "dev": true,
+ "license": "ISC",
+ "dependencies": {
+ "wrappy": "1"
+ }
+ },
+ "node_modules/open": {
+ "version": "8.4.2",
+ "resolved": "https://registry.npmjs.org/open/-/open-8.4.2.tgz",
+ "integrity": "sha512-7x81NCL719oNbsq/3mh+hVrAWmFuEYUqrq/Iw3kUzH8ReypT9QQ0BLoJS7/G9k6N81XjW4qHWtjWwe/9eLy1EQ==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "define-lazy-prop": "^2.0.0",
+ "is-docker": "^2.1.1",
+ "is-wsl": "^2.2.0"
+ },
+ "engines": {
+ "node": ">=12"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/openapi-fetch": {
+ "version": "0.12.5",
+ "resolved": "https://registry.npmjs.org/openapi-fetch/-/openapi-fetch-0.12.5.tgz",
+ "integrity": "sha512-FnAMWLt0MNL6ComcL4q/YbB1tUgyz5YnYtwA1+zlJ5xcucmK5RlWsgH1ynxmEeu8fGJkYjm8armU/HVpORc9lw==",
+ "dependencies": {
+ "openapi-typescript-helpers": "^0.0.15"
+ }
+ },
+ "node_modules/openapi-typescript-helpers": {
+ "version": "0.0.15",
+ "resolved": "https://registry.npmjs.org/openapi-typescript-helpers/-/openapi-typescript-helpers-0.0.15.tgz",
+ "integrity": "sha512-opyTPaunsklCBpTK8JGef6mfPhLSnyy5a0IN9vKtx3+4aExf+KxEqYwIy3hqkedXIB97u357uLMJsOnm3GVjsw=="
+ },
+ "node_modules/optionator": {
+ "version": "0.9.4",
+ "resolved": "https://registry.npmjs.org/optionator/-/optionator-0.9.4.tgz",
+ "integrity": "sha512-6IpQ7mKUxRcZNLIObR0hz7lxsapSSIYNZJwXPGeF0mTVqGKFIXj1DQcMoT22S3ROcLyY/rz0PWaWZ9ayWmad9g==",
+ "dev": true,
+ "dependencies": {
+ "deep-is": "^0.1.3",
+ "fast-levenshtein": "^2.0.6",
+ "levn": "^0.4.1",
+ "prelude-ls": "^1.2.1",
+ "type-check": "^0.4.0",
+ "word-wrap": "^1.2.5"
+ },
+ "engines": {
+ "node": ">= 0.8.0"
+ }
+ },
+ "node_modules/outvariant": {
+ "version": "1.4.3",
+ "resolved": "https://registry.npmjs.org/outvariant/-/outvariant-1.4.3.tgz",
+ "integrity": "sha512-+Sl2UErvtsoajRDKCE5/dBz4DIvHXQQnAxtQTF04OJxY0+DyZXSo5P5Bb7XYWOh81syohlYL24hbDwxedPUJCA==",
+ "dev": true
+ },
+ "node_modules/p-limit": {
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-3.1.0.tgz",
+ "integrity": "sha512-TYOanM3wGwNGsZN2cVTYPArw454xnXj5qmWF1bEoAc4+cU/ol7GVh7odevjp1FNHduHc3KZMcFduxU5Xc6uJRQ==",
+ "dev": true,
+ "dependencies": {
+ "yocto-queue": "^0.1.0"
+ },
+ "engines": {
+ "node": ">=10"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/p-locate": {
+ "version": "5.0.0",
+ "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-5.0.0.tgz",
+ "integrity": "sha512-LaNjtRWUBY++zB5nE/NwcaoMylSPk+S+ZHNB1TzdbMJMny6dynpAGt7X/tl/QYq3TIeE6nxHppbo2LGymrG5Pw==",
+ "dev": true,
+ "dependencies": {
+ "p-limit": "^3.0.2"
+ },
+ "engines": {
+ "node": ">=10"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/package-json-from-dist": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/package-json-from-dist/-/package-json-from-dist-1.0.1.tgz",
+ "integrity": "sha512-UEZIS3/by4OC8vL3P2dTXRETpebLI2NiI5vIrjaD/5UtrkFX/tNbwjTSRAGC/+7CAo2pIcBaRgWmcBBHcsaCIw=="
+ },
+ "node_modules/parent-module": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/parent-module/-/parent-module-1.0.1.tgz",
+ "integrity": "sha512-GQ2EWRpQV8/o+Aw8YqtfZZPfNRWZYkbidE9k5rpl/hC3vtHHBfGm2Ifi6qWV+coDGkrUKZAxE3Lot5kcsRlh+g==",
+ "dev": true,
+ "dependencies": {
+ "callsites": "^3.0.0"
+ },
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/parse5": {
+ "version": "7.2.1",
+ "resolved": "https://registry.npmjs.org/parse5/-/parse5-7.2.1.tgz",
+ "integrity": "sha512-BuBYQYlv1ckiPdQi/ohiivi9Sagc9JG+Ozs0r7b/0iK3sKmrb0b9FdWdBbOdx6hBCM/F9Ir82ofnBhtZOjCRPQ==",
+ "dev": true,
+ "dependencies": {
+ "entities": "^4.5.0"
+ },
+ "funding": {
+ "url": "https://github.com/inikulin/parse5?sponsor=1"
+ }
+ },
+ "node_modules/path-exists": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-4.0.0.tgz",
+ "integrity": "sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w==",
+ "dev": true,
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/path-is-absolute": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/path-is-absolute/-/path-is-absolute-1.0.1.tgz",
+ "integrity": "sha512-AVbw3UJ2e9bq64vSaS9Am0fje1Pa8pbGqTTsmXfaIiMpnr5DlDhfJOuLj9Sf95ZPVDAUerDfEk88MPmPe7UCQg==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/path-key": {
+ "version": "3.1.1",
+ "resolved": "https://registry.npmjs.org/path-key/-/path-key-3.1.1.tgz",
+ "integrity": "sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==",
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/path-parse": {
+ "version": "1.0.7",
+ "resolved": "https://registry.npmjs.org/path-parse/-/path-parse-1.0.7.tgz",
+ "integrity": "sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw=="
+ },
+ "node_modules/path-scurry": {
+ "version": "1.11.1",
+ "resolved": "https://registry.npmjs.org/path-scurry/-/path-scurry-1.11.1.tgz",
+ "integrity": "sha512-Xa4Nw17FS9ApQFJ9umLiJS4orGjm7ZzwUrwamcGQuHSzDyth9boKDaycYdDcZDuqYATXw4HFXgaqWTctW/v1HA==",
+ "dependencies": {
+ "lru-cache": "^10.2.0",
+ "minipass": "^5.0.0 || ^6.0.2 || ^7.0.0"
+ },
+ "engines": {
+ "node": ">=16 || 14 >=14.18"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/isaacs"
+ }
+ },
+ "node_modules/path-scurry/node_modules/lru-cache": {
+ "version": "10.4.3",
+ "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-10.4.3.tgz",
+ "integrity": "sha512-JNAzZcXrCt42VGLuYz0zfAzDfAvJWW6AfYlDBQyDV5DClI2m5sAmK+OIO7s59XfsRsWHp02jAJrRadPRGTt6SQ=="
+ },
+ "node_modules/path-to-regexp": {
+ "version": "6.3.0",
+ "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-6.3.0.tgz",
+ "integrity": "sha512-Yhpw4T9C6hPpgPeA28us07OJeqZ5EzQTkbfwuhsUg0c237RomFoETJgmp2sa3F/41gfLE6G5cqcYwznmeEeOlQ==",
+ "dev": true
+ },
+ "node_modules/path-type": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/path-type/-/path-type-4.0.0.tgz",
+ "integrity": "sha512-gDKb8aZMDeD/tZWs9P6+q0J9Mwkdl6xMV8TjnGP3qJVJ06bdMgkbBlLU8IdfOsIsFz2BW1rNVT3XuNEl8zPAvw==",
+ "dev": true,
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/pathe": {
+ "version": "1.1.2",
+ "resolved": "https://registry.npmjs.org/pathe/-/pathe-1.1.2.tgz",
+ "integrity": "sha512-whLdWMYL2TwI08hn8/ZqAbrVemu0LNaNNJZX73O6qaIdCTfXutsLhMkjdENX0qhsQ9uIimo4/aQOmXkoon2nDQ==",
+ "dev": true
+ },
+ "node_modules/pathval": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/pathval/-/pathval-2.0.0.tgz",
+ "integrity": "sha512-vE7JKRyES09KiunauX7nd2Q9/L7lhok4smP9RZTDeD4MVs72Dp2qNFVz39Nz5a0FVEW0BJR6C0DYrq6unoziZA==",
+ "dev": true,
+ "engines": {
+ "node": ">= 14.16"
+ }
+ },
+ "node_modules/picocolors": {
+ "version": "1.1.1",
+ "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.1.1.tgz",
+ "integrity": "sha512-xceH2snhtb5M9liqDsmEw56le376mTZkEX/jEb/RxNFyegNul7eNslCXP9FDj/Lcu0X8KEyMceP2ntpaHrDEVA=="
+ },
+ "node_modules/picomatch": {
+ "version": "2.3.1",
+ "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.3.1.tgz",
+ "integrity": "sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==",
+ "engines": {
+ "node": ">=8.6"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/jonschlinkert"
+ }
+ },
+ "node_modules/pify": {
+ "version": "2.3.0",
+ "resolved": "https://registry.npmjs.org/pify/-/pify-2.3.0.tgz",
+ "integrity": "sha512-udgsAY+fTnvv7kI7aaxbqwWNb0AHiB0qBO89PZKPkoTmGOgdbrHDKD+0B2X4uTfJ/FT1R09r9gTsjUjNJotuog==",
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/pirates": {
+ "version": "4.0.6",
+ "resolved": "https://registry.npmjs.org/pirates/-/pirates-4.0.6.tgz",
+ "integrity": "sha512-saLsH7WeYYPiD25LDuLRRY/i+6HaPYr6G1OUlN39otzkSTxKnubR9RTxS3/Kk50s1g2JTgFwWQDQyplC5/SHZg==",
+ "engines": {
+ "node": ">= 6"
+ }
+ },
+ "node_modules/polished": {
+ "version": "4.3.1",
+ "resolved": "https://registry.npmjs.org/polished/-/polished-4.3.1.tgz",
+ "integrity": "sha512-OBatVyC/N7SCW/FaDHrSd+vn0o5cS855TOmYi4OkdWUMSJCET/xip//ch8xGUvtr3i44X9LVyWwQlRMTN3pwSA==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@babel/runtime": "^7.17.8"
+ },
+ "engines": {
+ "node": ">=10"
+ }
+ },
+ "node_modules/possible-typed-array-names": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/possible-typed-array-names/-/possible-typed-array-names-1.0.0.tgz",
+ "integrity": "sha512-d7Uw+eZoloe0EHDIYoe+bQ5WXnGMOpmiZFTuMWCwpjzzkL2nTjcKiAk4hh8TjnGye2TwWOk3UXucZ+3rbmBa8Q==",
+ "dev": true,
+ "engines": {
+ "node": ">= 0.4"
+ }
+ },
+ "node_modules/postcss": {
+ "version": "8.4.47",
+ "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.4.47.tgz",
+ "integrity": "sha512-56rxCq7G/XfB4EkXq9Egn5GCqugWvDFjafDOThIdMBsI15iqPqR5r15TfSr1YPYeEI19YeaXMCbY6u88Y76GLQ==",
+ "funding": [
+ {
+ "type": "opencollective",
+ "url": "https://opencollective.com/postcss/"
+ },
+ {
+ "type": "tidelift",
+ "url": "https://tidelift.com/funding/github/npm/postcss"
+ },
+ {
+ "type": "github",
+ "url": "https://github.com/sponsors/ai"
+ }
+ ],
+ "dependencies": {
+ "nanoid": "^3.3.7",
+ "picocolors": "^1.1.0",
+ "source-map-js": "^1.2.1"
+ },
+ "engines": {
+ "node": "^10 || ^12 || >=14"
+ }
+ },
+ "node_modules/postcss-import": {
+ "version": "15.1.0",
+ "resolved": "https://registry.npmjs.org/postcss-import/-/postcss-import-15.1.0.tgz",
+ "integrity": "sha512-hpr+J05B2FVYUAXHeK1YyI267J/dDDhMU6B6civm8hSY1jYJnBXxzKDKDswzJmtLHryrjhnDjqqp/49t8FALew==",
+ "dependencies": {
+ "postcss-value-parser": "^4.0.0",
+ "read-cache": "^1.0.0",
+ "resolve": "^1.1.7"
+ },
+ "engines": {
+ "node": ">=14.0.0"
+ },
+ "peerDependencies": {
+ "postcss": "^8.0.0"
+ }
+ },
+ "node_modules/postcss-import/node_modules/resolve": {
+ "version": "1.22.8",
+ "resolved": "https://registry.npmjs.org/resolve/-/resolve-1.22.8.tgz",
+ "integrity": "sha512-oKWePCxqpd6FlLvGV1VU0x7bkPmmCNolxzjMf4NczoDnQcIWrAF+cPtZn5i6n+RfD2d9i0tzpKnG6Yk168yIyw==",
+ "dependencies": {
+ "is-core-module": "^2.13.0",
+ "path-parse": "^1.0.7",
+ "supports-preserve-symlinks-flag": "^1.0.0"
+ },
+ "bin": {
+ "resolve": "bin/resolve"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/postcss-js": {
+ "version": "4.0.1",
+ "resolved": "https://registry.npmjs.org/postcss-js/-/postcss-js-4.0.1.tgz",
+ "integrity": "sha512-dDLF8pEO191hJMtlHFPRa8xsizHaM82MLfNkUHdUtVEV3tgTp5oj+8qbEqYM57SLfc74KSbw//4SeJma2LRVIw==",
+ "dependencies": {
+ "camelcase-css": "^2.0.1"
+ },
+ "engines": {
+ "node": "^12 || ^14 || >= 16"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/postcss/"
+ },
+ "peerDependencies": {
+ "postcss": "^8.4.21"
+ }
+ },
+ "node_modules/postcss-load-config": {
+ "version": "4.0.2",
+ "resolved": "https://registry.npmjs.org/postcss-load-config/-/postcss-load-config-4.0.2.tgz",
+ "integrity": "sha512-bSVhyJGL00wMVoPUzAVAnbEoWyqRxkjv64tUl427SKnPrENtq6hJwUojroMz2VB+Q1edmi4IfrAPpami5VVgMQ==",
+ "funding": [
+ {
+ "type": "opencollective",
+ "url": "https://opencollective.com/postcss/"
+ },
+ {
+ "type": "github",
+ "url": "https://github.com/sponsors/ai"
+ }
+ ],
+ "dependencies": {
+ "lilconfig": "^3.0.0",
+ "yaml": "^2.3.4"
+ },
+ "engines": {
+ "node": ">= 14"
+ },
+ "peerDependencies": {
+ "postcss": ">=8.0.9",
+ "ts-node": ">=9.0.0"
+ },
+ "peerDependenciesMeta": {
+ "postcss": {
+ "optional": true
+ },
+ "ts-node": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/postcss-load-config/node_modules/lilconfig": {
+ "version": "3.1.2",
+ "resolved": "https://registry.npmjs.org/lilconfig/-/lilconfig-3.1.2.tgz",
+ "integrity": "sha512-eop+wDAvpItUys0FWkHIKeC9ybYrTGbU41U5K7+bttZZeohvnY7M9dZ5kB21GNWiFT2q1OoPTvncPCgSOVO5ow==",
+ "engines": {
+ "node": ">=14"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/antonk52"
+ }
+ },
+ "node_modules/postcss-nested": {
+ "version": "6.2.0",
+ "resolved": "https://registry.npmjs.org/postcss-nested/-/postcss-nested-6.2.0.tgz",
+ "integrity": "sha512-HQbt28KulC5AJzG+cZtj9kvKB93CFCdLvog1WFLf1D+xmMvPGlBstkpTEZfK5+AN9hfJocyBFCNiqyS48bpgzQ==",
+ "funding": [
+ {
+ "type": "opencollective",
+ "url": "https://opencollective.com/postcss/"
+ },
+ {
+ "type": "github",
+ "url": "https://github.com/sponsors/ai"
+ }
+ ],
+ "dependencies": {
+ "postcss-selector-parser": "^6.1.1"
+ },
+ "engines": {
+ "node": ">=12.0"
+ },
+ "peerDependencies": {
+ "postcss": "^8.2.14"
+ }
+ },
+ "node_modules/postcss-selector-parser": {
+ "version": "6.1.2",
+ "resolved": "https://registry.npmjs.org/postcss-selector-parser/-/postcss-selector-parser-6.1.2.tgz",
+ "integrity": "sha512-Q8qQfPiZ+THO/3ZrOrO0cJJKfpYCagtMUkXbnEfmgUjwXg6z/WBeOyS9APBBPCTSiDV+s4SwQGu8yFsiMRIudg==",
+ "dependencies": {
+ "cssesc": "^3.0.0",
+ "util-deprecate": "^1.0.2"
+ },
+ "engines": {
+ "node": ">=4"
+ }
+ },
+ "node_modules/postcss-value-parser": {
+ "version": "4.2.0",
+ "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-4.2.0.tgz",
+ "integrity": "sha512-1NNCs6uurfkVbeXG4S8JFT9t19m45ICnif8zWLd5oPSZ50QnwMfK+H3jv408d4jw/7Bttv5axS5IiHoLaVNHeQ=="
+ },
+ "node_modules/prelude-ls": {
+ "version": "1.2.1",
+ "resolved": "https://registry.npmjs.org/prelude-ls/-/prelude-ls-1.2.1.tgz",
+ "integrity": "sha512-vkcDPrRZo1QZLbn5RLGPpg/WmIQ65qoWWhcGKf/b5eplkkarX0m9z8ppCat4mlOqUsWpyNuYgO3VRyrYHSzX5g==",
+ "dev": true,
+ "engines": {
+ "node": ">= 0.8.0"
+ }
+ },
+ "node_modules/prettier": {
+ "version": "3.3.3",
+ "resolved": "https://registry.npmjs.org/prettier/-/prettier-3.3.3.tgz",
+ "integrity": "sha512-i2tDNA0O5IrMO757lfrdQZCc2jPNDVntV0m/+4whiDfWaTKfMNgR7Qz0NAeGz/nRqF4m5/6CLzbP4/liHt12Ew==",
+ "devOptional": true,
+ "bin": {
+ "prettier": "bin/prettier.cjs"
+ },
+ "engines": {
+ "node": ">=14"
+ },
+ "funding": {
+ "url": "https://github.com/prettier/prettier?sponsor=1"
+ }
+ },
+ "node_modules/pretty-format": {
+ "version": "27.5.1",
+ "resolved": "https://registry.npmjs.org/pretty-format/-/pretty-format-27.5.1.tgz",
+ "integrity": "sha512-Qb1gy5OrP5+zDf2Bvnzdl3jsTf1qXVMazbvCoKhtKqVs4/YK4ozX4gKQJJVyNe+cajNPn0KoC0MC3FUmaHWEmQ==",
+ "dev": true,
+ "dependencies": {
+ "ansi-regex": "^5.0.1",
+ "ansi-styles": "^5.0.0",
+ "react-is": "^17.0.1"
+ },
+ "engines": {
+ "node": "^10.13.0 || ^12.13.0 || ^14.15.0 || >=15.0.0"
+ }
+ },
+ "node_modules/pretty-format/node_modules/ansi-regex": {
+ "version": "5.0.1",
+ "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz",
+ "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==",
+ "dev": true,
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/pretty-format/node_modules/ansi-styles": {
+ "version": "5.2.0",
+ "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-5.2.0.tgz",
+ "integrity": "sha512-Cxwpt2SfTzTtXcfOlzGEee8O+c+MmUgGrNiBcXnuWxuFJHe6a5Hz7qwhwe5OgaSYI0IJvkLqWX1ASG+cJOkEiA==",
+ "dev": true,
+ "engines": {
+ "node": ">=10"
+ },
+ "funding": {
+ "url": "https://github.com/chalk/ansi-styles?sponsor=1"
+ }
+ },
+ "node_modules/pretty-format/node_modules/react-is": {
+ "version": "17.0.2",
+ "resolved": "https://registry.npmjs.org/react-is/-/react-is-17.0.2.tgz",
+ "integrity": "sha512-w2GsyukL62IJnlaff/nRegPQR94C/XXamvMWmSHRJ4y7Ts/4ocGRmTHvOs8PSE6pB3dWOrD/nueuU5sduBsQ4w==",
+ "dev": true
+ },
+ "node_modules/process": {
+ "version": "0.11.10",
+ "resolved": "https://registry.npmjs.org/process/-/process-0.11.10.tgz",
+ "integrity": "sha512-cdGef/drWFoydD1JsMzuFf8100nZl+GT+yacc2bEced5f9Rjk4z+WtFUTBu9PhOi9j/jfmBPu0mMEY4wIdAF8A==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">= 0.6.0"
+ }
+ },
+ "node_modules/prop-types": {
+ "version": "15.8.1",
+ "resolved": "https://registry.npmjs.org/prop-types/-/prop-types-15.8.1.tgz",
+ "integrity": "sha512-oj87CgZICdulUohogVAR7AjlC0327U4el4L6eAvOqCeudMDVU0NThNaV+b9Df4dXgSP1gXMTnPdhfe/2qDH5cg==",
+ "dependencies": {
+ "loose-envify": "^1.4.0",
+ "object-assign": "^4.1.1",
+ "react-is": "^16.13.1"
+ }
+ },
+ "node_modules/psl": {
+ "version": "1.9.0",
+ "resolved": "https://registry.npmjs.org/psl/-/psl-1.9.0.tgz",
+ "integrity": "sha512-E/ZsdU4HLs/68gYzgGTkMicWTLPdAftJLfJFlLUAAKZGkStNU72sZjT66SnMDVOfOWY/YAoiD7Jxa9iHvngcag==",
+ "dev": true
+ },
+ "node_modules/punycode": {
+ "version": "2.3.1",
+ "resolved": "https://registry.npmjs.org/punycode/-/punycode-2.3.1.tgz",
+ "integrity": "sha512-vYt7UD1U9Wg6138shLtLOvdAu+8DsC/ilFtEVHcH+wydcSpNE20AfSOduf6MkRFahL5FY7X1oU7nKVZFtfq8Fg==",
+ "dev": true,
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/querystringify": {
+ "version": "2.2.0",
+ "resolved": "https://registry.npmjs.org/querystringify/-/querystringify-2.2.0.tgz",
+ "integrity": "sha512-FIqgj2EUvTa7R50u0rGsyTftzjYmv/a3hO345bZNrqabNqjtgiDMgmo4mkUjd+nzU5oF3dClKqFIPUKybUyqoQ==",
+ "dev": true
+ },
+ "node_modules/queue-microtask": {
+ "version": "1.2.3",
+ "resolved": "https://registry.npmjs.org/queue-microtask/-/queue-microtask-1.2.3.tgz",
+ "integrity": "sha512-NuaNSa6flKT5JaSYQzJok04JzTL1CA6aGhv5rfLW3PgqA+M2ChpZQnAC8h8i4ZFkBS8X5RqkDBHA7r4hej3K9A==",
+ "funding": [
+ {
+ "type": "github",
+ "url": "https://github.com/sponsors/feross"
+ },
+ {
+ "type": "patreon",
+ "url": "https://www.patreon.com/feross"
+ },
+ {
+ "type": "consulting",
+ "url": "https://feross.org/support"
+ }
+ ]
+ },
+ "node_modules/react": {
+ "version": "18.3.1",
+ "resolved": "https://registry.npmjs.org/react/-/react-18.3.1.tgz",
+ "integrity": "sha512-wS+hAgJShR0KhEvPJArfuPVN1+Hz1t0Y6n5jLrGQbkb4urgPE/0Rve+1kMB1v/oWgHgm4WIcV+i7F2pTVj+2iQ==",
+ "dependencies": {
+ "loose-envify": "^1.1.0"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/react-day-picker": {
+ "version": "8.10.1",
+ "resolved": "https://registry.npmjs.org/react-day-picker/-/react-day-picker-8.10.1.tgz",
+ "integrity": "sha512-TMx7fNbhLk15eqcMt+7Z7S2KF7mfTId/XJDjKE8f+IUcFn0l08/kI4FiYTL/0yuOLmEcbR4Fwe3GJf/NiiMnPA==",
+ "funding": {
+ "type": "individual",
+ "url": "https://github.com/sponsors/gpbl"
+ },
+ "peerDependencies": {
+ "date-fns": "^2.28.0 || ^3.0.0",
+ "react": "^16.8.0 || ^17.0.0 || ^18.0.0"
+ }
+ },
+ "node_modules/react-docgen": {
+ "version": "7.1.0",
+ "resolved": "https://registry.npmjs.org/react-docgen/-/react-docgen-7.1.0.tgz",
+ "integrity": "sha512-APPU8HB2uZnpl6Vt/+0AFoVYgSRtfiP6FLrZgPPTDmqSb2R4qZRbgd0A3VzIFxDt5e+Fozjx79WjLWnF69DK8g==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@babel/core": "^7.18.9",
+ "@babel/traverse": "^7.18.9",
+ "@babel/types": "^7.18.9",
+ "@types/babel__core": "^7.18.0",
+ "@types/babel__traverse": "^7.18.0",
+ "@types/doctrine": "^0.0.9",
+ "@types/resolve": "^1.20.2",
+ "doctrine": "^3.0.0",
+ "resolve": "^1.22.1",
+ "strip-indent": "^4.0.0"
+ },
+ "engines": {
+ "node": ">=16.14.0"
+ }
+ },
+ "node_modules/react-docgen-typescript": {
+ "version": "2.2.2",
+ "resolved": "https://registry.npmjs.org/react-docgen-typescript/-/react-docgen-typescript-2.2.2.tgz",
+ "integrity": "sha512-tvg2ZtOpOi6QDwsb3GZhOjDkkX0h8Z2gipvTg6OVMUyoYoURhEiRNePT8NZItTVCDh39JJHnLdfCOkzoLbFnTg==",
+ "dev": true,
+ "license": "MIT",
+ "peerDependencies": {
+ "typescript": ">= 4.3.x"
+ }
+ },
+ "node_modules/react-docgen/node_modules/doctrine": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/doctrine/-/doctrine-3.0.0.tgz",
+ "integrity": "sha512-yS+Q5i3hBf7GBkd4KG8a7eBNNWNGLTaEwwYWUijIYM7zrlYDM0BFXHjjPWlWZ1Rg7UaddZeIDmi9jF3HmqiQ2w==",
+ "dev": true,
+ "license": "Apache-2.0",
+ "dependencies": {
+ "esutils": "^2.0.2"
+ },
+ "engines": {
+ "node": ">=6.0.0"
+ }
+ },
+ "node_modules/react-docgen/node_modules/resolve": {
+ "version": "1.22.8",
+ "resolved": "https://registry.npmjs.org/resolve/-/resolve-1.22.8.tgz",
+ "integrity": "sha512-oKWePCxqpd6FlLvGV1VU0x7bkPmmCNolxzjMf4NczoDnQcIWrAF+cPtZn5i6n+RfD2d9i0tzpKnG6Yk168yIyw==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "is-core-module": "^2.13.0",
+ "path-parse": "^1.0.7",
+ "supports-preserve-symlinks-flag": "^1.0.0"
+ },
+ "bin": {
+ "resolve": "bin/resolve"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/react-docgen/node_modules/strip-indent": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/strip-indent/-/strip-indent-4.0.0.tgz",
+ "integrity": "sha512-mnVSV2l+Zv6BLpSD/8V87CW/y9EmmbYzGCIavsnsI6/nwn26DwffM/yztm30Z/I2DY9wdS3vXVCMnHDgZaVNoA==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "min-indent": "^1.0.1"
+ },
+ "engines": {
+ "node": ">=12"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/react-dom": {
+ "version": "18.3.1",
+ "resolved": "https://registry.npmjs.org/react-dom/-/react-dom-18.3.1.tgz",
+ "integrity": "sha512-5m4nQKp+rZRb09LNH59GM4BxTh9251/ylbKIbpe7TpGxfJ+9kv6BLkLBXIjjspbgbnIBNqlI23tRnTWT0snUIw==",
+ "dependencies": {
+ "loose-envify": "^1.1.0",
+ "scheduler": "^0.23.2"
+ },
+ "peerDependencies": {
+ "react": "^18.3.1"
+ }
+ },
+ "node_modules/react-hook-form": {
+ "version": "7.53.1",
+ "resolved": "https://registry.npmjs.org/react-hook-form/-/react-hook-form-7.53.1.tgz",
+ "integrity": "sha512-6aiQeBda4zjcuaugWvim9WsGqisoUk+etmFEsSUMm451/Ic8L/UAb7sRtMj3V+Hdzm6mMjU1VhiSzYUZeBm0Vg==",
+ "engines": {
+ "node": ">=18.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/react-hook-form"
+ },
+ "peerDependencies": {
+ "react": "^16.8.0 || ^17 || ^18 || ^19"
+ }
+ },
+ "node_modules/react-is": {
+ "version": "16.13.1",
+ "resolved": "https://registry.npmjs.org/react-is/-/react-is-16.13.1.tgz",
+ "integrity": "sha512-24e6ynE2H+OKt4kqsOvNd8kBpV65zoxbA4BVsEOB3ARVWQki/DHzaUoC5KuON/BiccDaCCTZBuOcfZs70kR8bQ=="
+ },
+ "node_modules/react-remove-scroll": {
+ "version": "2.6.0",
+ "resolved": "https://registry.npmjs.org/react-remove-scroll/-/react-remove-scroll-2.6.0.tgz",
+ "integrity": "sha512-I2U4JVEsQenxDAKaVa3VZ/JeJZe0/2DxPWL8Tj8yLKctQJQiZM52pn/GWFpSp8dftjM3pSAHVJZscAnC/y+ySQ==",
+ "dependencies": {
+ "react-remove-scroll-bar": "^2.3.6",
+ "react-style-singleton": "^2.2.1",
+ "tslib": "^2.1.0",
+ "use-callback-ref": "^1.3.0",
+ "use-sidecar": "^1.1.2"
+ },
+ "engines": {
+ "node": ">=10"
+ },
+ "peerDependencies": {
+ "@types/react": "^16.8.0 || ^17.0.0 || ^18.0.0",
+ "react": "^16.8.0 || ^17.0.0 || ^18.0.0"
+ },
+ "peerDependenciesMeta": {
+ "@types/react": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/react-remove-scroll-bar": {
+ "version": "2.3.6",
+ "resolved": "https://registry.npmjs.org/react-remove-scroll-bar/-/react-remove-scroll-bar-2.3.6.tgz",
+ "integrity": "sha512-DtSYaao4mBmX+HDo5YWYdBWQwYIQQshUV/dVxFxK+KM26Wjwp1gZ6rv6OC3oujI6Bfu6Xyg3TwK533AQutsn/g==",
+ "dependencies": {
+ "react-style-singleton": "^2.2.1",
+ "tslib": "^2.0.0"
+ },
+ "engines": {
+ "node": ">=10"
+ },
+ "peerDependencies": {
+ "@types/react": "^16.8.0 || ^17.0.0 || ^18.0.0",
+ "react": "^16.8.0 || ^17.0.0 || ^18.0.0"
+ },
+ "peerDependenciesMeta": {
+ "@types/react": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/react-smooth": {
+ "version": "4.0.1",
+ "resolved": "https://registry.npmjs.org/react-smooth/-/react-smooth-4.0.1.tgz",
+ "integrity": "sha512-OE4hm7XqR0jNOq3Qmk9mFLyd6p2+j6bvbPJ7qlB7+oo0eNcL2l7WQzG6MBnT3EXY6xzkLMUBec3AfewJdA0J8w==",
+ "dependencies": {
+ "fast-equals": "^5.0.1",
+ "prop-types": "^15.8.1",
+ "react-transition-group": "^4.4.5"
+ },
+ "peerDependencies": {
+ "react": "^16.8.0 || ^17.0.0 || ^18.0.0",
+ "react-dom": "^16.8.0 || ^17.0.0 || ^18.0.0"
+ }
+ },
+ "node_modules/react-style-singleton": {
+ "version": "2.2.1",
+ "resolved": "https://registry.npmjs.org/react-style-singleton/-/react-style-singleton-2.2.1.tgz",
+ "integrity": "sha512-ZWj0fHEMyWkHzKYUr2Bs/4zU6XLmq9HsgBURm7g5pAVfyn49DgUiNgY2d4lXRlYSiCif9YBGpQleewkcqddc7g==",
+ "dependencies": {
+ "get-nonce": "^1.0.0",
+ "invariant": "^2.2.4",
+ "tslib": "^2.0.0"
+ },
+ "engines": {
+ "node": ">=10"
+ },
+ "peerDependencies": {
+ "@types/react": "^16.8.0 || ^17.0.0 || ^18.0.0",
+ "react": "^16.8.0 || ^17.0.0 || ^18.0.0"
+ },
+ "peerDependenciesMeta": {
+ "@types/react": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/react-transition-group": {
+ "version": "4.4.5",
+ "resolved": "https://registry.npmjs.org/react-transition-group/-/react-transition-group-4.4.5.tgz",
+ "integrity": "sha512-pZcd1MCJoiKiBR2NRxeCRg13uCXbydPnmB4EOeRrY7480qNWO8IIgQG6zlDkm6uRMsURXPuKq0GWtiM59a5Q6g==",
+ "dependencies": {
+ "@babel/runtime": "^7.5.5",
+ "dom-helpers": "^5.0.1",
+ "loose-envify": "^1.4.0",
+ "prop-types": "^15.6.2"
+ },
+ "peerDependencies": {
+ "react": ">=16.6.0",
+ "react-dom": ">=16.6.0"
+ }
+ },
+ "node_modules/read-cache": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/read-cache/-/read-cache-1.0.0.tgz",
+ "integrity": "sha512-Owdv/Ft7IjOgm/i0xvNDZ1LrRANRfew4b2prF3OWMQLxLfu3bS8FVhCsrSCMK4lR56Y9ya+AThoTpDCTxCmpRA==",
+ "dependencies": {
+ "pify": "^2.3.0"
+ }
+ },
+ "node_modules/readdirp": {
+ "version": "3.6.0",
+ "resolved": "https://registry.npmjs.org/readdirp/-/readdirp-3.6.0.tgz",
+ "integrity": "sha512-hOS089on8RduqdbhvQ5Z37A0ESjsqz6qnRcffsMU3495FuTdqSm+7bhJ29JvIOsBDEEnan5DPu9t3To9VRlMzA==",
+ "dependencies": {
+ "picomatch": "^2.2.1"
+ },
+ "engines": {
+ "node": ">=8.10.0"
+ }
+ },
+ "node_modules/recast": {
+ "version": "0.23.9",
+ "resolved": "https://registry.npmjs.org/recast/-/recast-0.23.9.tgz",
+ "integrity": "sha512-Hx/BGIbwj+Des3+xy5uAtAbdCyqK9y9wbBcDFDYanLS9JnMqf7OeF87HQwUimE87OEc72mr6tkKUKMBBL+hF9Q==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "ast-types": "^0.16.1",
+ "esprima": "~4.0.0",
+ "source-map": "~0.6.1",
+ "tiny-invariant": "^1.3.3",
+ "tslib": "^2.0.1"
+ },
+ "engines": {
+ "node": ">= 4"
+ }
+ },
+ "node_modules/recharts": {
+ "version": "2.13.2",
+ "resolved": "https://registry.npmjs.org/recharts/-/recharts-2.13.2.tgz",
+ "integrity": "sha512-UDLGFmnsBluDIPpQb9uty0ejb+jiVI71vkki8vVsR6ZCJdgjBfKQoQfft4re99CKlTy9qjQApxCLG6TrxJkeAg==",
+ "dependencies": {
+ "clsx": "^2.0.0",
+ "eventemitter3": "^4.0.1",
+ "lodash": "^4.17.21",
+ "react-is": "^18.3.1",
+ "react-smooth": "^4.0.0",
+ "recharts-scale": "^0.4.4",
+ "tiny-invariant": "^1.3.1",
+ "victory-vendor": "^36.6.8"
+ },
+ "engines": {
+ "node": ">=14"
+ },
+ "peerDependencies": {
+ "react": "^16.0.0 || ^17.0.0 || ^18.0.0",
+ "react-dom": "^16.0.0 || ^17.0.0 || ^18.0.0"
+ }
+ },
+ "node_modules/recharts-scale": {
+ "version": "0.4.5",
+ "resolved": "https://registry.npmjs.org/recharts-scale/-/recharts-scale-0.4.5.tgz",
+ "integrity": "sha512-kivNFO+0OcUNu7jQquLXAxz1FIwZj8nrj+YkOKc5694NbjCvcT6aSZiIzNzd2Kul4o4rTto8QVR9lMNtxD4G1w==",
+ "dependencies": {
+ "decimal.js-light": "^2.4.1"
+ }
+ },
+ "node_modules/recharts/node_modules/react-is": {
+ "version": "18.3.1",
+ "resolved": "https://registry.npmjs.org/react-is/-/react-is-18.3.1.tgz",
+ "integrity": "sha512-/LLMVyas0ljjAtoYiPqYiL8VWXzUUdThrmU5+n20DZv+a+ClRoevUzw5JxU+Ieh5/c87ytoTBV9G1FiKfNJdmg=="
+ },
+ "node_modules/redent": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/redent/-/redent-3.0.0.tgz",
+ "integrity": "sha512-6tDA8g98We0zd0GvVeMT9arEOnTw9qM03L9cJXaCjrip1OO764RDBLBfrB4cwzNGDj5OA5ioymC9GkizgWJDUg==",
+ "dev": true,
+ "dependencies": {
+ "indent-string": "^4.0.0",
+ "strip-indent": "^3.0.0"
+ },
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/reflect.getprototypeof": {
+ "version": "1.0.6",
+ "resolved": "https://registry.npmjs.org/reflect.getprototypeof/-/reflect.getprototypeof-1.0.6.tgz",
+ "integrity": "sha512-fmfw4XgoDke3kdI6h4xcUz1dG8uaiv5q9gcEwLS4Pnth2kxT+GZ7YehS1JTMGBQmtV7Y4GFGbs2re2NqhdozUg==",
+ "dev": true,
+ "dependencies": {
+ "call-bind": "^1.0.7",
+ "define-properties": "^1.2.1",
+ "es-abstract": "^1.23.1",
+ "es-errors": "^1.3.0",
+ "get-intrinsic": "^1.2.4",
+ "globalthis": "^1.0.3",
+ "which-builtin-type": "^1.1.3"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/regenerator-runtime": {
+ "version": "0.14.1",
+ "resolved": "https://registry.npmjs.org/regenerator-runtime/-/regenerator-runtime-0.14.1.tgz",
+ "integrity": "sha512-dYnhHh0nJoMfnkZs6GmmhFknAGRrLznOu5nc9ML+EJxGvrx6H7teuevqVqCuPcPK//3eDrrjQhehXVx9cnkGdw=="
+ },
+ "node_modules/regexp.prototype.flags": {
+ "version": "1.5.3",
+ "resolved": "https://registry.npmjs.org/regexp.prototype.flags/-/regexp.prototype.flags-1.5.3.tgz",
+ "integrity": "sha512-vqlC04+RQoFalODCbCumG2xIOvapzVMHwsyIGM/SIE8fRhFFsXeH8/QQ+s0T0kDAhKc4k30s73/0ydkHQz6HlQ==",
+ "dev": true,
+ "dependencies": {
+ "call-bind": "^1.0.7",
+ "define-properties": "^1.2.1",
+ "es-errors": "^1.3.0",
+ "set-function-name": "^2.0.2"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/require-directory": {
+ "version": "2.1.1",
+ "resolved": "https://registry.npmjs.org/require-directory/-/require-directory-2.1.1.tgz",
+ "integrity": "sha512-fGxEI7+wsG9xrvdjsrlmL22OMTTiHRwAMroiEeMgq8gzoLC/PQr7RsRDSTLUg/bZAZtF+TVIkHc6/4RIKrui+Q==",
+ "dev": true,
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/requireindex": {
+ "version": "1.2.0",
+ "resolved": "https://registry.npmjs.org/requireindex/-/requireindex-1.2.0.tgz",
+ "integrity": "sha512-L9jEkOi3ASd9PYit2cwRfyppc9NoABujTP8/5gFcbERmo5jUoAKovIC3fsF17pkTnGsrByysqX+Kxd2OTNI1ww==",
+ "dev": true,
+ "engines": {
+ "node": ">=0.10.5"
+ }
+ },
+ "node_modules/requires-port": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/requires-port/-/requires-port-1.0.0.tgz",
+ "integrity": "sha512-KigOCHcocU3XODJxsu8i/j8T9tzT4adHiecwORRQ0ZZFcp7ahwXuRU1m+yuO90C5ZUyGeGfocHDI14M3L3yDAQ==",
+ "dev": true
+ },
+ "node_modules/resolve": {
+ "version": "2.0.0-next.5",
+ "resolved": "https://registry.npmjs.org/resolve/-/resolve-2.0.0-next.5.tgz",
+ "integrity": "sha512-U7WjGVG9sH8tvjW5SmGbQuui75FiyjAX72HX15DwBBwF9dNiQZRQAg9nnPhYy+TUnE0+VcrttuvNI8oSxZcocA==",
+ "dev": true,
+ "dependencies": {
+ "is-core-module": "^2.13.0",
+ "path-parse": "^1.0.7",
+ "supports-preserve-symlinks-flag": "^1.0.0"
+ },
+ "bin": {
+ "resolve": "bin/resolve"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/resolve-from": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/resolve-from/-/resolve-from-4.0.0.tgz",
+ "integrity": "sha512-pb/MYmXstAkysRFx8piNI1tGFNQIFA3vkE3Gq4EuA1dF6gHp/+vgZqsCGJapvy8N3Q+4o7FwvquPJcnZ7RYy4g==",
+ "dev": true,
+ "engines": {
+ "node": ">=4"
+ }
+ },
+ "node_modules/resolve-pkg-maps": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/resolve-pkg-maps/-/resolve-pkg-maps-1.0.0.tgz",
+ "integrity": "sha512-seS2Tj26TBVOC2NIc2rOe2y2ZO7efxITtLZcGSOnHHNOQ7CkiUBfw0Iw2ck6xkIhPwLhKNLS8BO+hEpngQlqzw==",
+ "devOptional": true,
+ "funding": {
+ "url": "https://github.com/privatenumber/resolve-pkg-maps?sponsor=1"
+ }
+ },
+ "node_modules/reusify": {
+ "version": "1.0.4",
+ "resolved": "https://registry.npmjs.org/reusify/-/reusify-1.0.4.tgz",
+ "integrity": "sha512-U9nH88a3fc/ekCF1l0/UP1IosiuIjyTh7hBvXVMHYgVcfGvt897Xguj2UOLDeI5BG2m7/uwyaLVT6fbtCwTyzw==",
+ "engines": {
+ "iojs": ">=1.0.0",
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/rollup": {
+ "version": "4.24.3",
+ "resolved": "https://registry.npmjs.org/rollup/-/rollup-4.24.3.tgz",
+ "integrity": "sha512-HBW896xR5HGmoksbi3JBDtmVzWiPAYqp7wip50hjQ67JbDz61nyoMPdqu1DvVW9asYb2M65Z20ZHsyJCMqMyDg==",
+ "dev": true,
+ "dependencies": {
+ "@types/estree": "1.0.6"
+ },
+ "bin": {
+ "rollup": "dist/bin/rollup"
+ },
+ "engines": {
+ "node": ">=18.0.0",
+ "npm": ">=8.0.0"
+ },
+ "optionalDependencies": {
+ "@rollup/rollup-android-arm-eabi": "4.24.3",
+ "@rollup/rollup-android-arm64": "4.24.3",
+ "@rollup/rollup-darwin-arm64": "4.24.3",
+ "@rollup/rollup-darwin-x64": "4.24.3",
+ "@rollup/rollup-freebsd-arm64": "4.24.3",
+ "@rollup/rollup-freebsd-x64": "4.24.3",
+ "@rollup/rollup-linux-arm-gnueabihf": "4.24.3",
+ "@rollup/rollup-linux-arm-musleabihf": "4.24.3",
+ "@rollup/rollup-linux-arm64-gnu": "4.24.3",
+ "@rollup/rollup-linux-arm64-musl": "4.24.3",
+ "@rollup/rollup-linux-powerpc64le-gnu": "4.24.3",
+ "@rollup/rollup-linux-riscv64-gnu": "4.24.3",
+ "@rollup/rollup-linux-s390x-gnu": "4.24.3",
+ "@rollup/rollup-linux-x64-gnu": "4.24.3",
+ "@rollup/rollup-linux-x64-musl": "4.24.3",
+ "@rollup/rollup-win32-arm64-msvc": "4.24.3",
+ "@rollup/rollup-win32-ia32-msvc": "4.24.3",
+ "@rollup/rollup-win32-x64-msvc": "4.24.3",
+ "fsevents": "~2.3.2"
+ }
+ },
+ "node_modules/rrweb-cssom": {
+ "version": "0.7.1",
+ "resolved": "https://registry.npmjs.org/rrweb-cssom/-/rrweb-cssom-0.7.1.tgz",
+ "integrity": "sha512-TrEMa7JGdVm0UThDJSx7ddw5nVm3UJS9o9CCIZ72B1vSyEZoziDqBYP3XIoi/12lKrJR8rE3jeFHMok2F/Mnsg==",
+ "dev": true
+ },
+ "node_modules/run-parallel": {
+ "version": "1.2.0",
+ "resolved": "https://registry.npmjs.org/run-parallel/-/run-parallel-1.2.0.tgz",
+ "integrity": "sha512-5l4VyZR86LZ/lDxZTR6jqL8AFE2S0IFLMP26AbjsLVADxHdhB/c0GUsH+y39UfCi3dzz8OlQuPmnaJOMoDHQBA==",
+ "funding": [
+ {
+ "type": "github",
+ "url": "https://github.com/sponsors/feross"
+ },
+ {
+ "type": "patreon",
+ "url": "https://www.patreon.com/feross"
+ },
+ {
+ "type": "consulting",
+ "url": "https://feross.org/support"
+ }
+ ],
+ "dependencies": {
+ "queue-microtask": "^1.2.2"
+ }
+ },
+ "node_modules/safe-array-concat": {
+ "version": "1.1.2",
+ "resolved": "https://registry.npmjs.org/safe-array-concat/-/safe-array-concat-1.1.2.tgz",
+ "integrity": "sha512-vj6RsCsWBCf19jIeHEfkRMw8DPiBb+DMXklQ/1SGDHOMlHdPUkZXFQ2YdplS23zESTijAcurb1aSgJA3AgMu1Q==",
+ "dev": true,
+ "dependencies": {
+ "call-bind": "^1.0.7",
+ "get-intrinsic": "^1.2.4",
+ "has-symbols": "^1.0.3",
+ "isarray": "^2.0.5"
+ },
+ "engines": {
+ "node": ">=0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/safe-regex-test": {
+ "version": "1.0.3",
+ "resolved": "https://registry.npmjs.org/safe-regex-test/-/safe-regex-test-1.0.3.tgz",
+ "integrity": "sha512-CdASjNJPvRa7roO6Ra/gLYBTzYzzPyyBXxIMdGW3USQLyjWEls2RgW5UBTXaQVp+OrpeCK3bLem8smtmheoRuw==",
+ "dev": true,
+ "dependencies": {
+ "call-bind": "^1.0.6",
+ "es-errors": "^1.3.0",
+ "is-regex": "^1.1.4"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/safer-buffer": {
+ "version": "2.1.2",
+ "resolved": "https://registry.npmjs.org/safer-buffer/-/safer-buffer-2.1.2.tgz",
+ "integrity": "sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg==",
+ "dev": true
+ },
+ "node_modules/saxes": {
+ "version": "6.0.0",
+ "resolved": "https://registry.npmjs.org/saxes/-/saxes-6.0.0.tgz",
+ "integrity": "sha512-xAg7SOnEhrm5zI3puOOKyy1OMcMlIJZYNJY7xLBwSze0UjhPLnWfj2GF2EpT0jmzaJKIWKHLsaSSajf35bcYnA==",
+ "dev": true,
+ "dependencies": {
+ "xmlchars": "^2.2.0"
+ },
+ "engines": {
+ "node": ">=v12.22.7"
+ }
+ },
+ "node_modules/scheduler": {
+ "version": "0.23.2",
+ "resolved": "https://registry.npmjs.org/scheduler/-/scheduler-0.23.2.tgz",
+ "integrity": "sha512-UOShsPwz7NrMUqhR6t0hWjFduvOzbtv7toDH1/hIrfRNIDBnnBWd0CwJTGvTpngVlmwGCdP9/Zl/tVrDqcuYzQ==",
+ "dependencies": {
+ "loose-envify": "^1.1.0"
+ }
+ },
+ "node_modules/semver": {
+ "version": "6.3.1",
+ "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz",
+ "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==",
+ "dev": true,
+ "bin": {
+ "semver": "bin/semver.js"
+ }
+ },
+ "node_modules/set-function-length": {
+ "version": "1.2.2",
+ "resolved": "https://registry.npmjs.org/set-function-length/-/set-function-length-1.2.2.tgz",
+ "integrity": "sha512-pgRc4hJ4/sNjWCSS9AmnS40x3bNMDTknHgL5UaMBTMyJnU90EgWh1Rz+MC9eFu4BuN/UwZjKQuY/1v3rM7HMfg==",
+ "dev": true,
+ "dependencies": {
+ "define-data-property": "^1.1.4",
+ "es-errors": "^1.3.0",
+ "function-bind": "^1.1.2",
+ "get-intrinsic": "^1.2.4",
+ "gopd": "^1.0.1",
+ "has-property-descriptors": "^1.0.2"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ }
+ },
+ "node_modules/set-function-name": {
+ "version": "2.0.2",
+ "resolved": "https://registry.npmjs.org/set-function-name/-/set-function-name-2.0.2.tgz",
+ "integrity": "sha512-7PGFlmtwsEADb0WYyvCMa1t+yke6daIG4Wirafur5kcf+MhUnPms1UeR0CKQdTZD81yESwMHbtn+TR+dMviakQ==",
+ "dev": true,
+ "dependencies": {
+ "define-data-property": "^1.1.4",
+ "es-errors": "^1.3.0",
+ "functions-have-names": "^1.2.3",
+ "has-property-descriptors": "^1.0.2"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ }
+ },
+ "node_modules/shebang-command": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-2.0.0.tgz",
+ "integrity": "sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA==",
+ "dependencies": {
+ "shebang-regex": "^3.0.0"
+ },
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/shebang-regex": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/shebang-regex/-/shebang-regex-3.0.0.tgz",
+ "integrity": "sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A==",
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/side-channel": {
+ "version": "1.0.6",
+ "resolved": "https://registry.npmjs.org/side-channel/-/side-channel-1.0.6.tgz",
+ "integrity": "sha512-fDW/EZ6Q9RiO8eFG8Hj+7u/oW+XrPTIChwCOM2+th2A6OblDtYYIpve9m+KvI9Z4C9qSEXlaGR6bTEYHReuglA==",
+ "dev": true,
+ "dependencies": {
+ "call-bind": "^1.0.7",
+ "es-errors": "^1.3.0",
+ "get-intrinsic": "^1.2.4",
+ "object-inspect": "^1.13.1"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/siginfo": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/siginfo/-/siginfo-2.0.0.tgz",
+ "integrity": "sha512-ybx0WO1/8bSBLEWXZvEd7gMW3Sn3JFlW3TvX1nREbDLRNQNaeNN8WK0meBwPdAaOI7TtRRRJn/Es1zhrrCHu7g==",
+ "dev": true
+ },
+ "node_modules/signal-exit": {
+ "version": "4.1.0",
+ "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-4.1.0.tgz",
+ "integrity": "sha512-bzyZ1e88w9O1iNJbKnOlvYTrWPDl46O1bG0D3XInv+9tkPrxrN8jUUTiFlDkkmKWgn1M6CfIA13SuGqOa9Korw==",
+ "engines": {
+ "node": ">=14"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/isaacs"
+ }
+ },
+ "node_modules/slash": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/slash/-/slash-3.0.0.tgz",
+ "integrity": "sha512-g9Q1haeby36OSStwb4ntCGGGaKsaVSjQ68fBxoQcutl5fS1vuY18H3wSt3jFyFtrkx+Kz0V1G85A4MyAdDMi2Q==",
+ "dev": true,
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/source-map": {
+ "version": "0.6.1",
+ "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz",
+ "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==",
+ "dev": true,
+ "license": "BSD-3-Clause",
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/source-map-js": {
+ "version": "1.2.1",
+ "resolved": "https://registry.npmjs.org/source-map-js/-/source-map-js-1.2.1.tgz",
+ "integrity": "sha512-UXWMKhLOwVKb728IUtQPXxfYU+usdybtUrK/8uGE8CQMvrhOpwvzDBwj0QhSL7MQc7vIsISBG8VQ8+IDQxpfQA==",
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/stackback": {
+ "version": "0.0.2",
+ "resolved": "https://registry.npmjs.org/stackback/-/stackback-0.0.2.tgz",
+ "integrity": "sha512-1XMJE5fQo1jGH6Y/7ebnwPOBEkIEnT4QF32d5R1+VXdXveM0IBMJt8zfaxX1P3QhVwrYe+576+jkANtSS2mBbw==",
+ "dev": true
+ },
+ "node_modules/statuses": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/statuses/-/statuses-2.0.1.tgz",
+ "integrity": "sha512-RwNA9Z/7PrK06rYLIzFMlaF+l73iwpzsqRIFgbMLbTcLD6cOao82TaWefPXQvB2fOC4AjuYSEndS7N/mTCbkdQ==",
+ "dev": true,
+ "engines": {
+ "node": ">= 0.8"
+ }
+ },
+ "node_modules/std-env": {
+ "version": "3.7.0",
+ "resolved": "https://registry.npmjs.org/std-env/-/std-env-3.7.0.tgz",
+ "integrity": "sha512-JPbdCEQLj1w5GilpiHAx3qJvFndqybBysA3qUOnznweH4QbNYUsW/ea8QzSrnh0vNsezMMw5bcVool8lM0gwzg==",
+ "dev": true
+ },
+ "node_modules/storybook": {
+ "version": "8.4.2",
+ "resolved": "https://registry.npmjs.org/storybook/-/storybook-8.4.2.tgz",
+ "integrity": "sha512-GMCgyAulmLNrkUtDkCpFO4SB77YrpiIxq6e5tzaQdXEuaDu1mdNwOuP3VG7nE2FzxmqDvagSgriM68YW9iFaZA==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@storybook/core": "8.4.2"
+ },
+ "bin": {
+ "getstorybook": "bin/index.cjs",
+ "sb": "bin/index.cjs",
+ "storybook": "bin/index.cjs"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/storybook"
+ },
+ "peerDependencies": {
+ "prettier": "^2 || ^3"
+ },
+ "peerDependenciesMeta": {
+ "prettier": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/strict-event-emitter": {
+ "version": "0.5.1",
+ "resolved": "https://registry.npmjs.org/strict-event-emitter/-/strict-event-emitter-0.5.1.tgz",
+ "integrity": "sha512-vMgjE/GGEPEFnhFub6pa4FmJBRBVOLpIII2hvCZ8Kzb7K0hlHo7mQv6xYrBvCL2LtAIBwFUK8wvuJgTVSQ5MFQ==",
+ "dev": true
+ },
+ "node_modules/string-width": {
+ "version": "5.1.2",
+ "resolved": "https://registry.npmjs.org/string-width/-/string-width-5.1.2.tgz",
+ "integrity": "sha512-HnLOCR3vjcY8beoNLtcjZ5/nxn2afmME6lhrDrebokqMap+XbeW8n9TXpPDOqdGK5qcI3oT0GKTW6wC7EMiVqA==",
+ "dependencies": {
+ "eastasianwidth": "^0.2.0",
+ "emoji-regex": "^9.2.2",
+ "strip-ansi": "^7.0.1"
+ },
+ "engines": {
+ "node": ">=12"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/string-width-cjs": {
+ "name": "string-width",
+ "version": "4.2.3",
+ "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz",
+ "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==",
+ "dependencies": {
+ "emoji-regex": "^8.0.0",
+ "is-fullwidth-code-point": "^3.0.0",
+ "strip-ansi": "^6.0.1"
+ },
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/string-width-cjs/node_modules/ansi-regex": {
+ "version": "5.0.1",
+ "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz",
+ "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==",
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/string-width-cjs/node_modules/emoji-regex": {
+ "version": "8.0.0",
+ "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz",
+ "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A=="
+ },
+ "node_modules/string-width-cjs/node_modules/strip-ansi": {
+ "version": "6.0.1",
+ "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz",
+ "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==",
+ "dependencies": {
+ "ansi-regex": "^5.0.1"
+ },
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/string.prototype.matchall": {
+ "version": "4.0.11",
+ "resolved": "https://registry.npmjs.org/string.prototype.matchall/-/string.prototype.matchall-4.0.11.tgz",
+ "integrity": "sha512-NUdh0aDavY2og7IbBPenWqR9exH+E26Sv8e0/eTe1tltDGZL+GtBkDAnnyBtmekfK6/Dq3MkcGtzXFEd1LQrtg==",
+ "dev": true,
+ "dependencies": {
+ "call-bind": "^1.0.7",
+ "define-properties": "^1.2.1",
+ "es-abstract": "^1.23.2",
+ "es-errors": "^1.3.0",
+ "es-object-atoms": "^1.0.0",
+ "get-intrinsic": "^1.2.4",
+ "gopd": "^1.0.1",
+ "has-symbols": "^1.0.3",
+ "internal-slot": "^1.0.7",
+ "regexp.prototype.flags": "^1.5.2",
+ "set-function-name": "^2.0.2",
+ "side-channel": "^1.0.6"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/string.prototype.repeat": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/string.prototype.repeat/-/string.prototype.repeat-1.0.0.tgz",
+ "integrity": "sha512-0u/TldDbKD8bFCQ/4f5+mNRrXwZ8hg2w7ZR8wa16e8z9XpePWl3eGEcUD0OXpEH/VJH/2G3gjUtR3ZOiBe2S/w==",
+ "dev": true,
+ "dependencies": {
+ "define-properties": "^1.1.3",
+ "es-abstract": "^1.17.5"
+ }
+ },
+ "node_modules/string.prototype.trim": {
+ "version": "1.2.9",
+ "resolved": "https://registry.npmjs.org/string.prototype.trim/-/string.prototype.trim-1.2.9.tgz",
+ "integrity": "sha512-klHuCNxiMZ8MlsOihJhJEBJAiMVqU3Z2nEXWfWnIqjN0gEFS9J9+IxKozWWtQGcgoa1WUZzLjKPTr4ZHNFTFxw==",
+ "dev": true,
+ "dependencies": {
+ "call-bind": "^1.0.7",
+ "define-properties": "^1.2.1",
+ "es-abstract": "^1.23.0",
+ "es-object-atoms": "^1.0.0"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/string.prototype.trimend": {
+ "version": "1.0.8",
+ "resolved": "https://registry.npmjs.org/string.prototype.trimend/-/string.prototype.trimend-1.0.8.tgz",
+ "integrity": "sha512-p73uL5VCHCO2BZZ6krwwQE3kCzM7NKmis8S//xEC6fQonchbum4eP6kR4DLEjQFO3Wnj3Fuo8NM0kOSjVdHjZQ==",
+ "dev": true,
+ "dependencies": {
+ "call-bind": "^1.0.7",
+ "define-properties": "^1.2.1",
+ "es-object-atoms": "^1.0.0"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/string.prototype.trimstart": {
+ "version": "1.0.8",
+ "resolved": "https://registry.npmjs.org/string.prototype.trimstart/-/string.prototype.trimstart-1.0.8.tgz",
+ "integrity": "sha512-UXSH262CSZY1tfu3G3Secr6uGLCFVPMhIqHjlgCUtCCcgihYc/xKs9djMTMUOb2j1mVSeU8EU6NWc/iQKU6Gfg==",
+ "dev": true,
+ "dependencies": {
+ "call-bind": "^1.0.7",
+ "define-properties": "^1.2.1",
+ "es-object-atoms": "^1.0.0"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/strip-ansi": {
+ "version": "7.1.0",
+ "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-7.1.0.tgz",
+ "integrity": "sha512-iq6eVVI64nQQTRYq2KtEg2d2uU7LElhTJwsH4YzIHZshxlgZms/wIc4VoDQTlG/IvVIrBKG06CrZnp0qv7hkcQ==",
+ "dependencies": {
+ "ansi-regex": "^6.0.1"
+ },
+ "engines": {
+ "node": ">=12"
+ },
+ "funding": {
+ "url": "https://github.com/chalk/strip-ansi?sponsor=1"
+ }
+ },
+ "node_modules/strip-ansi-cjs": {
+ "name": "strip-ansi",
+ "version": "6.0.1",
+ "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz",
+ "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==",
+ "dependencies": {
+ "ansi-regex": "^5.0.1"
+ },
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/strip-ansi-cjs/node_modules/ansi-regex": {
+ "version": "5.0.1",
+ "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz",
+ "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==",
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/strip-bom": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/strip-bom/-/strip-bom-3.0.0.tgz",
+ "integrity": "sha512-vavAMRXOgBVNF6nyEEmL3DBK19iRpDcoIwW+swQ+CbGiu7lju6t+JklA1MHweoWtadgt4ISVUsXLyDq34ddcwA==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">=4"
+ }
+ },
+ "node_modules/strip-indent": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/strip-indent/-/strip-indent-3.0.0.tgz",
+ "integrity": "sha512-laJTa3Jb+VQpaC6DseHhF7dXVqHTfJPCRDaEbid/drOhgitgYku/letMUqOXFoWV0zIIUbjpdH2t+tYj4bQMRQ==",
+ "dev": true,
+ "dependencies": {
+ "min-indent": "^1.0.0"
+ },
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/strip-json-comments": {
+ "version": "3.1.1",
+ "resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-3.1.1.tgz",
+ "integrity": "sha512-6fPc+R4ihwqP6N/aIv2f1gMH8lOVtWQHoqC4yK6oSDVVocumAsfCqjkXnqiYMhmMwS/mEHLp7Vehlt3ql6lEig==",
+ "dev": true,
+ "engines": {
+ "node": ">=8"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/style-mod": {
+ "version": "4.1.2",
+ "resolved": "https://registry.npmjs.org/style-mod/-/style-mod-4.1.2.tgz",
+ "integrity": "sha512-wnD1HyVqpJUI2+eKZ+eo1UwghftP6yuFheBqqe+bWCotBjC2K1YnteJILRMs3SM4V/0dLEW1SC27MWP5y+mwmw=="
+ },
+ "node_modules/sucrase": {
+ "version": "3.35.0",
+ "resolved": "https://registry.npmjs.org/sucrase/-/sucrase-3.35.0.tgz",
+ "integrity": "sha512-8EbVDiu9iN/nESwxeSxDKe0dunta1GOlHufmSSXxMD2z2/tMZpDMpvXQGsc+ajGo8y2uYUmixaSRUc/QPoQ0GA==",
+ "dependencies": {
+ "@jridgewell/gen-mapping": "^0.3.2",
+ "commander": "^4.0.0",
+ "glob": "^10.3.10",
+ "lines-and-columns": "^1.1.6",
+ "mz": "^2.7.0",
+ "pirates": "^4.0.1",
+ "ts-interface-checker": "^0.1.9"
+ },
+ "bin": {
+ "sucrase": "bin/sucrase",
+ "sucrase-node": "bin/sucrase-node"
+ },
+ "engines": {
+ "node": ">=16 || 14 >=14.17"
+ }
+ },
+ "node_modules/supports-color": {
+ "version": "7.2.0",
+ "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz",
+ "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==",
+ "dev": true,
+ "dependencies": {
+ "has-flag": "^4.0.0"
+ },
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/supports-preserve-symlinks-flag": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/supports-preserve-symlinks-flag/-/supports-preserve-symlinks-flag-1.0.0.tgz",
+ "integrity": "sha512-ot0WnXS9fgdkgIcePe6RHNk1WA8+muPa6cSjeR3V8K27q9BB1rTE3R1p7Hv0z1ZyAc8s6Vvv8DIyWf681MAt0w==",
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/symbol-tree": {
+ "version": "3.2.4",
+ "resolved": "https://registry.npmjs.org/symbol-tree/-/symbol-tree-3.2.4.tgz",
+ "integrity": "sha512-9QNk5KwDF+Bvz+PyObkmSYjI5ksVUYtjW7AU22r2NKcfLJcXp96hkDWU3+XndOsUb+AQ9QhfzfCT2O+CNWT5Tw==",
+ "dev": true
+ },
+ "node_modules/tailwind-merge": {
+ "version": "2.5.4",
+ "resolved": "https://registry.npmjs.org/tailwind-merge/-/tailwind-merge-2.5.4.tgz",
+ "integrity": "sha512-0q8cfZHMu9nuYP/b5Shb7Y7Sh1B7Nnl5GqNr1U+n2p6+mybvRtayrQ+0042Z5byvTA8ihjlP8Odo8/VnHbZu4Q==",
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/dcastil"
+ }
+ },
+ "node_modules/tailwindcss": {
+ "version": "3.4.14",
+ "resolved": "https://registry.npmjs.org/tailwindcss/-/tailwindcss-3.4.14.tgz",
+ "integrity": "sha512-IcSvOcTRcUtQQ7ILQL5quRDg7Xs93PdJEk1ZLbhhvJc7uj/OAhYOnruEiwnGgBvUtaUAJ8/mhSw1o8L2jCiENA==",
+ "dependencies": {
+ "@alloc/quick-lru": "^5.2.0",
+ "arg": "^5.0.2",
+ "chokidar": "^3.5.3",
+ "didyoumean": "^1.2.2",
+ "dlv": "^1.1.3",
+ "fast-glob": "^3.3.0",
+ "glob-parent": "^6.0.2",
+ "is-glob": "^4.0.3",
+ "jiti": "^1.21.0",
+ "lilconfig": "^2.1.0",
+ "micromatch": "^4.0.5",
+ "normalize-path": "^3.0.0",
+ "object-hash": "^3.0.0",
+ "picocolors": "^1.0.0",
+ "postcss": "^8.4.23",
+ "postcss-import": "^15.1.0",
+ "postcss-js": "^4.0.1",
+ "postcss-load-config": "^4.0.1",
+ "postcss-nested": "^6.0.1",
+ "postcss-selector-parser": "^6.0.11",
+ "resolve": "^1.22.2",
+ "sucrase": "^3.32.0"
+ },
+ "bin": {
+ "tailwind": "lib/cli.js",
+ "tailwindcss": "lib/cli.js"
+ },
+ "engines": {
+ "node": ">=14.0.0"
+ }
+ },
+ "node_modules/tailwindcss-animate": {
+ "version": "1.0.7",
+ "resolved": "https://registry.npmjs.org/tailwindcss-animate/-/tailwindcss-animate-1.0.7.tgz",
+ "integrity": "sha512-bl6mpH3T7I3UFxuvDEXLxy/VuFxBk5bbzplh7tXI68mwMokNYd1t9qPBHlnyTwfa4JGC4zP516I1hYYtQ/vspA==",
+ "peerDependencies": {
+ "tailwindcss": ">=3.0.0 || insiders"
+ }
+ },
+ "node_modules/tailwindcss/node_modules/glob-parent": {
+ "version": "6.0.2",
+ "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-6.0.2.tgz",
+ "integrity": "sha512-XxwI8EOhVQgWp6iDL+3b0r86f4d6AX6zSU55HfB4ydCEuXLXc5FcYeOu+nnGftS4TEju/11rt4KJPTMgbfmv4A==",
+ "dependencies": {
+ "is-glob": "^4.0.3"
+ },
+ "engines": {
+ "node": ">=10.13.0"
+ }
+ },
+ "node_modules/tailwindcss/node_modules/resolve": {
+ "version": "1.22.8",
+ "resolved": "https://registry.npmjs.org/resolve/-/resolve-1.22.8.tgz",
+ "integrity": "sha512-oKWePCxqpd6FlLvGV1VU0x7bkPmmCNolxzjMf4NczoDnQcIWrAF+cPtZn5i6n+RfD2d9i0tzpKnG6Yk168yIyw==",
+ "dependencies": {
+ "is-core-module": "^2.13.0",
+ "path-parse": "^1.0.7",
+ "supports-preserve-symlinks-flag": "^1.0.0"
+ },
+ "bin": {
+ "resolve": "bin/resolve"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/text-table": {
+ "version": "0.2.0",
+ "resolved": "https://registry.npmjs.org/text-table/-/text-table-0.2.0.tgz",
+ "integrity": "sha512-N+8UisAXDGk8PFXP4HAzVR9nbfmVJ3zYLAWiTIoqC5v5isinhr+r5uaO8+7r3BMfuNIufIsA7RdpVgacC2cSpw==",
+ "dev": true
+ },
+ "node_modules/thenify": {
+ "version": "3.3.1",
+ "resolved": "https://registry.npmjs.org/thenify/-/thenify-3.3.1.tgz",
+ "integrity": "sha512-RVZSIV5IG10Hk3enotrhvz0T9em6cyHBLkH/YAZuKqd8hRkKhSfCGIcP2KUY0EPxndzANBmNllzWPwak+bheSw==",
+ "dependencies": {
+ "any-promise": "^1.0.0"
+ }
+ },
+ "node_modules/thenify-all": {
+ "version": "1.6.0",
+ "resolved": "https://registry.npmjs.org/thenify-all/-/thenify-all-1.6.0.tgz",
+ "integrity": "sha512-RNxQH/qI8/t3thXJDwcstUO4zeqo64+Uy/+sNVRBx4Xn2OX+OZ9oP+iJnNFqplFra2ZUVeKCSa2oVWi3T4uVmA==",
+ "dependencies": {
+ "thenify": ">= 3.1.0 < 4"
+ },
+ "engines": {
+ "node": ">=0.8"
+ }
+ },
+ "node_modules/tiny-invariant": {
+ "version": "1.3.3",
+ "resolved": "https://registry.npmjs.org/tiny-invariant/-/tiny-invariant-1.3.3.tgz",
+ "integrity": "sha512-+FbBPE1o9QAYvviau/qC5SE3caw21q3xkvWKBtja5vgqOWIHHJ3ioaq1VPfn/Szqctz2bU/oYeKd9/z5BL+PVg=="
+ },
+ "node_modules/tiny-warning": {
+ "version": "1.0.3",
+ "resolved": "https://registry.npmjs.org/tiny-warning/-/tiny-warning-1.0.3.tgz",
+ "integrity": "sha512-lBN9zLN/oAf68o3zNXYrdCt1kP8WsiGW8Oo2ka41b2IM5JL/S1CTyX1rW0mb/zSuJun0ZUrDxx4sqvYS2FWzPA==",
+ "peer": true
+ },
+ "node_modules/tinybench": {
+ "version": "2.9.0",
+ "resolved": "https://registry.npmjs.org/tinybench/-/tinybench-2.9.0.tgz",
+ "integrity": "sha512-0+DUvqWMValLmha6lr4kD8iAMK1HzV0/aKnCtWb9v9641TnP/MFb7Pc2bxoxQjTXAErryXVgUOfv2YqNllqGeg==",
+ "dev": true
+ },
+ "node_modules/tinyexec": {
+ "version": "0.3.1",
+ "resolved": "https://registry.npmjs.org/tinyexec/-/tinyexec-0.3.1.tgz",
+ "integrity": "sha512-WiCJLEECkO18gwqIp6+hJg0//p23HXp4S+gGtAKu3mI2F2/sXC4FvHvXvB0zJVVaTPhx1/tOwdbRsa1sOBIKqQ==",
+ "dev": true
+ },
+ "node_modules/tinypool": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/tinypool/-/tinypool-1.0.1.tgz",
+ "integrity": "sha512-URZYihUbRPcGv95En+sz6MfghfIc2OJ1sv/RmhWZLouPY0/8Vo80viwPvg3dlaS9fuq7fQMEfgRRK7BBZThBEA==",
+ "dev": true,
+ "engines": {
+ "node": "^18.0.0 || >=20.0.0"
+ }
+ },
+ "node_modules/tinyrainbow": {
+ "version": "1.2.0",
+ "resolved": "https://registry.npmjs.org/tinyrainbow/-/tinyrainbow-1.2.0.tgz",
+ "integrity": "sha512-weEDEq7Z5eTHPDh4xjX789+fHfF+P8boiFB+0vbWzpbnbsEr/GRaohi/uMKxg8RZMXnl1ItAi/IUHWMsjDV7kQ==",
+ "dev": true,
+ "engines": {
+ "node": ">=14.0.0"
+ }
+ },
+ "node_modules/tinyspy": {
+ "version": "3.0.2",
+ "resolved": "https://registry.npmjs.org/tinyspy/-/tinyspy-3.0.2.tgz",
+ "integrity": "sha512-n1cw8k1k0x4pgA2+9XrOkFydTerNcJ1zWCO5Nn9scWHTD+5tp8dghT2x1uduQePZTZgd3Tupf+x9BxJjeJi77Q==",
+ "dev": true,
+ "engines": {
+ "node": ">=14.0.0"
+ }
+ },
+ "node_modules/tldts": {
+ "version": "6.1.58",
+ "resolved": "https://registry.npmjs.org/tldts/-/tldts-6.1.58.tgz",
+ "integrity": "sha512-MQJrJhjHOYGYb8DobR6Y4AdDbd4TYkyQ+KBDVc5ODzs1cbrvPpfN1IemYi9jfipJ/vR1YWvrDli0hg1y19VRoA==",
+ "dev": true,
+ "dependencies": {
+ "tldts-core": "^6.1.58"
+ },
+ "bin": {
+ "tldts": "bin/cli.js"
+ }
+ },
+ "node_modules/tldts-core": {
+ "version": "6.1.58",
+ "resolved": "https://registry.npmjs.org/tldts-core/-/tldts-core-6.1.58.tgz",
+ "integrity": "sha512-dR936xmhBm7AeqHIhCWwK765gZ7dFyL+IqLSFAjJbFlUXGMLCb8i2PzlzaOuWBuplBTaBYseSb565nk/ZEM0Bg==",
+ "dev": true
+ },
+ "node_modules/to-regex-range": {
+ "version": "5.0.1",
+ "resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-5.0.1.tgz",
+ "integrity": "sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==",
+ "dependencies": {
+ "is-number": "^7.0.0"
+ },
+ "engines": {
+ "node": ">=8.0"
+ }
+ },
+ "node_modules/tough-cookie": {
+ "version": "5.0.0",
+ "resolved": "https://registry.npmjs.org/tough-cookie/-/tough-cookie-5.0.0.tgz",
+ "integrity": "sha512-FRKsF7cz96xIIeMZ82ehjC3xW2E+O2+v11udrDYewUbszngYhsGa8z6YUMMzO9QJZzzyd0nGGXnML/TReX6W8Q==",
+ "dev": true,
+ "dependencies": {
+ "tldts": "^6.1.32"
+ },
+ "engines": {
+ "node": ">=16"
+ }
+ },
+ "node_modules/tr46": {
+ "version": "5.0.0",
+ "resolved": "https://registry.npmjs.org/tr46/-/tr46-5.0.0.tgz",
+ "integrity": "sha512-tk2G5R2KRwBd+ZN0zaEXpmzdKyOYksXwywulIX95MBODjSzMIuQnQ3m8JxgbhnL1LeVo7lqQKsYa1O3Htl7K5g==",
+ "dev": true,
+ "dependencies": {
+ "punycode": "^2.3.1"
+ },
+ "engines": {
+ "node": ">=18"
+ }
+ },
+ "node_modules/ts-api-utils": {
+ "version": "1.4.0",
+ "resolved": "https://registry.npmjs.org/ts-api-utils/-/ts-api-utils-1.4.0.tgz",
+ "integrity": "sha512-032cPxaEKwM+GT3vA5JXNzIaizx388rhsSW79vGRNGXfRRAdEAn2mvk36PvK5HnOchyWZ7afLEXqYCvPCrzuzQ==",
+ "dev": true,
+ "engines": {
+ "node": ">=16"
+ },
+ "peerDependencies": {
+ "typescript": ">=4.2.0"
+ }
+ },
+ "node_modules/ts-dedent": {
+ "version": "2.2.0",
+ "resolved": "https://registry.npmjs.org/ts-dedent/-/ts-dedent-2.2.0.tgz",
+ "integrity": "sha512-q5W7tVM71e2xjHZTlgfTDoPF/SmqKG5hddq9SzR49CH2hayqRKJtQ4mtRlSxKaJlR/+9rEM+mnBHf7I2/BQcpQ==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">=6.10"
+ }
+ },
+ "node_modules/ts-interface-checker": {
+ "version": "0.1.13",
+ "resolved": "https://registry.npmjs.org/ts-interface-checker/-/ts-interface-checker-0.1.13.tgz",
+ "integrity": "sha512-Y/arvbn+rrz3JCKl9C4kVNfTfSm2/mEp5FSz5EsZSANGPSlQrpRI5M4PKF+mJnE52jOO90PnPSc3Ur3bTQw0gA=="
+ },
+ "node_modules/tsconfig-paths": {
+ "version": "4.2.0",
+ "resolved": "https://registry.npmjs.org/tsconfig-paths/-/tsconfig-paths-4.2.0.tgz",
+ "integrity": "sha512-NoZ4roiN7LnbKn9QqE1amc9DJfzvZXxF4xDavcOWt1BPkdx+m+0gJuPM+S0vCe7zTJMYUP0R8pO2XMr+Y8oLIg==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "json5": "^2.2.2",
+ "minimist": "^1.2.6",
+ "strip-bom": "^3.0.0"
+ },
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/tslib": {
+ "version": "2.8.1",
+ "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.8.1.tgz",
+ "integrity": "sha512-oJFu94HQb+KVduSUQL7wnpmqnfmLsOA/nAh6b6EH0wCEoK0/mPeXU6c3wKDV83MkOuHPRHtSXKKU99IBazS/2w=="
+ },
+ "node_modules/tsutils": {
+ "version": "3.21.0",
+ "resolved": "https://registry.npmjs.org/tsutils/-/tsutils-3.21.0.tgz",
+ "integrity": "sha512-mHKK3iUXL+3UF6xL5k0PEhKRUBKPBCv/+RkEOpjRWxxx27KKRBmmA60A9pgOUvMi8GKhRMPEmjBRPzs2W7O1OA==",
+ "dev": true,
+ "dependencies": {
+ "tslib": "^1.8.1"
+ },
+ "engines": {
+ "node": ">= 6"
+ },
+ "peerDependencies": {
+ "typescript": ">=2.8.0 || >= 3.2.0-dev || >= 3.3.0-dev || >= 3.4.0-dev || >= 3.5.0-dev || >= 3.6.0-dev || >= 3.6.0-beta || >= 3.7.0-dev || >= 3.7.0-beta"
+ }
+ },
+ "node_modules/tsutils/node_modules/tslib": {
+ "version": "1.14.1",
+ "resolved": "https://registry.npmjs.org/tslib/-/tslib-1.14.1.tgz",
+ "integrity": "sha512-Xni35NKzjgMrwevysHTCArtLDpPvye8zV/0E4EyYn43P7/7qvQwPh9BGkHewbMulVntbigmcT7rdX3BNo9wRJg==",
+ "dev": true
+ },
+ "node_modules/tsx": {
+ "version": "4.19.2",
+ "resolved": "https://registry.npmjs.org/tsx/-/tsx-4.19.2.tgz",
+ "integrity": "sha512-pOUl6Vo2LUq/bSa8S5q7b91cgNSjctn9ugq/+Mvow99qW6x/UZYwzxy/3NmqoT66eHYfCVvFvACC58UBPFf28g==",
+ "devOptional": true,
+ "dependencies": {
+ "esbuild": "~0.23.0",
+ "get-tsconfig": "^4.7.5"
+ },
+ "bin": {
+ "tsx": "dist/cli.mjs"
+ },
+ "engines": {
+ "node": ">=18.0.0"
+ },
+ "optionalDependencies": {
+ "fsevents": "~2.3.3"
+ }
+ },
+ "node_modules/type-check": {
+ "version": "0.4.0",
+ "resolved": "https://registry.npmjs.org/type-check/-/type-check-0.4.0.tgz",
+ "integrity": "sha512-XleUoc9uwGXqjWwXaUTZAmzMcFZ5858QA2vvx1Ur5xIcixXIP+8LnFDgRplU30us6teqdlskFfu+ae4K79Ooew==",
+ "dev": true,
+ "dependencies": {
+ "prelude-ls": "^1.2.1"
+ },
+ "engines": {
+ "node": ">= 0.8.0"
+ }
+ },
+ "node_modules/type-fest": {
+ "version": "4.26.1",
+ "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-4.26.1.tgz",
+ "integrity": "sha512-yOGpmOAL7CkKe/91I5O3gPICmJNLJ1G4zFYVAsRHg7M64biSnPtRj0WNQt++bRkjYOqjWXrhnUw1utzmVErAdg==",
+ "dev": true,
+ "engines": {
+ "node": ">=16"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/typed-array-buffer": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/typed-array-buffer/-/typed-array-buffer-1.0.2.tgz",
+ "integrity": "sha512-gEymJYKZtKXzzBzM4jqa9w6Q1Jjm7x2d+sh19AdsD4wqnMPDYyvwpsIc2Q/835kHuo3BEQ7CjelGhfTsoBb2MQ==",
+ "dev": true,
+ "dependencies": {
+ "call-bind": "^1.0.7",
+ "es-errors": "^1.3.0",
+ "is-typed-array": "^1.1.13"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ }
+ },
+ "node_modules/typed-array-byte-length": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/typed-array-byte-length/-/typed-array-byte-length-1.0.1.tgz",
+ "integrity": "sha512-3iMJ9q0ao7WE9tWcaYKIptkNBuOIcZCCT0d4MRvuuH88fEoEH62IuQe0OtraD3ebQEoTRk8XCBoknUNc1Y67pw==",
+ "dev": true,
+ "dependencies": {
+ "call-bind": "^1.0.7",
+ "for-each": "^0.3.3",
+ "gopd": "^1.0.1",
+ "has-proto": "^1.0.3",
+ "is-typed-array": "^1.1.13"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/typed-array-byte-offset": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/typed-array-byte-offset/-/typed-array-byte-offset-1.0.2.tgz",
+ "integrity": "sha512-Ous0vodHa56FviZucS2E63zkgtgrACj7omjwd/8lTEMEPFFyjfixMZ1ZXenpgCFBBt4EC1J2XsyVS2gkG0eTFA==",
+ "dev": true,
+ "dependencies": {
+ "available-typed-arrays": "^1.0.7",
+ "call-bind": "^1.0.7",
+ "for-each": "^0.3.3",
+ "gopd": "^1.0.1",
+ "has-proto": "^1.0.3",
+ "is-typed-array": "^1.1.13"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/typed-array-length": {
+ "version": "1.0.6",
+ "resolved": "https://registry.npmjs.org/typed-array-length/-/typed-array-length-1.0.6.tgz",
+ "integrity": "sha512-/OxDN6OtAk5KBpGb28T+HZc2M+ADtvRxXrKKbUwtsLgdoxgX13hyy7ek6bFRl5+aBs2yZzB0c4CnQfAtVypW/g==",
+ "dev": true,
+ "dependencies": {
+ "call-bind": "^1.0.7",
+ "for-each": "^0.3.3",
+ "gopd": "^1.0.1",
+ "has-proto": "^1.0.3",
+ "is-typed-array": "^1.1.13",
+ "possible-typed-array-names": "^1.0.0"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/typescript": {
+ "version": "5.6.3",
+ "resolved": "https://registry.npmjs.org/typescript/-/typescript-5.6.3.tgz",
+ "integrity": "sha512-hjcS1mhfuyi4WW8IWtjP7brDrG2cuDZukyrYrSauoXGNgx0S7zceP07adYkJycEr56BOUTNPzbInooiN3fn1qw==",
+ "dev": true,
+ "bin": {
+ "tsc": "bin/tsc",
+ "tsserver": "bin/tsserver"
+ },
+ "engines": {
+ "node": ">=14.17"
+ }
+ },
+ "node_modules/typescript-eslint": {
+ "version": "8.12.2",
+ "resolved": "https://registry.npmjs.org/typescript-eslint/-/typescript-eslint-8.12.2.tgz",
+ "integrity": "sha512-UbuVUWSrHVR03q9CWx+JDHeO6B/Hr9p4U5lRH++5tq/EbFq1faYZe50ZSBePptgfIKLEti0aPQ3hFgnPVcd8ZQ==",
+ "dev": true,
+ "dependencies": {
+ "@typescript-eslint/eslint-plugin": "8.12.2",
+ "@typescript-eslint/parser": "8.12.2",
+ "@typescript-eslint/utils": "8.12.2"
+ },
+ "engines": {
+ "node": "^18.18.0 || ^20.9.0 || >=21.1.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/typescript-eslint"
+ },
+ "peerDependenciesMeta": {
+ "typescript": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/unbox-primitive": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/unbox-primitive/-/unbox-primitive-1.0.2.tgz",
+ "integrity": "sha512-61pPlCD9h51VoreyJ0BReideM3MDKMKnh6+V9L08331ipq6Q8OFXZYiqP6n/tbHx4s5I9uRhcye6BrbkizkBDw==",
+ "dev": true,
+ "dependencies": {
+ "call-bind": "^1.0.2",
+ "has-bigints": "^1.0.2",
+ "has-symbols": "^1.0.3",
+ "which-boxed-primitive": "^1.0.2"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/undici-types": {
+ "version": "6.19.8",
+ "resolved": "https://registry.npmjs.org/undici-types/-/undici-types-6.19.8.tgz",
+ "integrity": "sha512-ve2KP6f/JnbPBFyobGHuerC9g1FYGn/F8n1LWTwNxCEzd6IfqTwUQcNXgEtmmQ6DlRrC1hrSrBnCZPokRrDHjw==",
+ "dev": true
+ },
+ "node_modules/universalify": {
+ "version": "0.2.0",
+ "resolved": "https://registry.npmjs.org/universalify/-/universalify-0.2.0.tgz",
+ "integrity": "sha512-CJ1QgKmNg3CwvAv/kOFmtnEN05f0D/cn9QntgNOQlQF9dgvVTHj3t+8JPdjqawCHk7V/KA+fbUqzZ9XWhcqPUg==",
+ "dev": true,
+ "engines": {
+ "node": ">= 4.0.0"
+ }
+ },
+ "node_modules/unplugin": {
+ "version": "1.15.0",
+ "resolved": "https://registry.npmjs.org/unplugin/-/unplugin-1.15.0.tgz",
+ "integrity": "sha512-jTPIs63W+DUEDW207ztbaoO7cQ4p5aVaB823LSlxpsFEU3Mykwxf3ZGC/wzxFJeZlASZYgVrWeo7LgOrqJZ8RA==",
+ "dev": true,
+ "dependencies": {
+ "acorn": "^8.14.0",
+ "webpack-virtual-modules": "^0.6.2"
+ },
+ "engines": {
+ "node": ">=14.0.0"
+ },
+ "peerDependencies": {
+ "webpack-sources": "^3"
+ },
+ "peerDependenciesMeta": {
+ "webpack-sources": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/update-browserslist-db": {
+ "version": "1.1.1",
+ "resolved": "https://registry.npmjs.org/update-browserslist-db/-/update-browserslist-db-1.1.1.tgz",
+ "integrity": "sha512-R8UzCaa9Az+38REPiJ1tXlImTJXlVfgHZsglwBD/k6nj76ctsH1E3q4doGrukiLQd3sGQYu56r5+lo5r94l29A==",
+ "dev": true,
+ "funding": [
+ {
+ "type": "opencollective",
+ "url": "https://opencollective.com/browserslist"
+ },
+ {
+ "type": "tidelift",
+ "url": "https://tidelift.com/funding/github/npm/browserslist"
+ },
+ {
+ "type": "github",
+ "url": "https://github.com/sponsors/ai"
+ }
+ ],
+ "dependencies": {
+ "escalade": "^3.2.0",
+ "picocolors": "^1.1.0"
+ },
+ "bin": {
+ "update-browserslist-db": "cli.js"
+ },
+ "peerDependencies": {
+ "browserslist": ">= 4.21.0"
+ }
+ },
+ "node_modules/uri-js": {
+ "version": "4.4.1",
+ "resolved": "https://registry.npmjs.org/uri-js/-/uri-js-4.4.1.tgz",
+ "integrity": "sha512-7rKUyy33Q1yc98pQ1DAmLtwX109F7TIfWlW1Ydo8Wl1ii1SeHieeh0HHfPeL2fMXK6z0s8ecKs9frCuLJvndBg==",
+ "dev": true,
+ "dependencies": {
+ "punycode": "^2.1.0"
+ }
+ },
+ "node_modules/url-parse": {
+ "version": "1.5.10",
+ "resolved": "https://registry.npmjs.org/url-parse/-/url-parse-1.5.10.tgz",
+ "integrity": "sha512-WypcfiRhfeUP9vvF0j6rw0J3hrWrw6iZv3+22h6iRMJ/8z1Tj6XfLP4DsUix5MhMPnXpiHDoKyoZ/bdCkwBCiQ==",
+ "dev": true,
+ "dependencies": {
+ "querystringify": "^2.1.1",
+ "requires-port": "^1.0.0"
+ }
+ },
+ "node_modules/use-callback-ref": {
+ "version": "1.3.2",
+ "resolved": "https://registry.npmjs.org/use-callback-ref/-/use-callback-ref-1.3.2.tgz",
+ "integrity": "sha512-elOQwe6Q8gqZgDA8mrh44qRTQqpIHDcZ3hXTLjBe1i4ph8XpNJnO+aQf3NaG+lriLopI4HMx9VjQLfPQ6vhnoA==",
+ "dependencies": {
+ "tslib": "^2.0.0"
+ },
+ "engines": {
+ "node": ">=10"
+ },
+ "peerDependencies": {
+ "@types/react": "^16.8.0 || ^17.0.0 || ^18.0.0",
+ "react": "^16.8.0 || ^17.0.0 || ^18.0.0"
+ },
+ "peerDependenciesMeta": {
+ "@types/react": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/use-sidecar": {
+ "version": "1.1.2",
+ "resolved": "https://registry.npmjs.org/use-sidecar/-/use-sidecar-1.1.2.tgz",
+ "integrity": "sha512-epTbsLuzZ7lPClpz2TyryBfztm7m+28DlEv2ZCQ3MDr5ssiwyOwGH/e5F9CkfWjJ1t4clvI58yF822/GUkjjhw==",
+ "dependencies": {
+ "detect-node-es": "^1.1.0",
+ "tslib": "^2.0.0"
+ },
+ "engines": {
+ "node": ">=10"
+ },
+ "peerDependencies": {
+ "@types/react": "^16.9.0 || ^17.0.0 || ^18.0.0",
+ "react": "^16.8.0 || ^17.0.0 || ^18.0.0"
+ },
+ "peerDependenciesMeta": {
+ "@types/react": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/use-sync-external-store": {
+ "version": "1.2.2",
+ "resolved": "https://registry.npmjs.org/use-sync-external-store/-/use-sync-external-store-1.2.2.tgz",
+ "integrity": "sha512-PElTlVMwpblvbNqQ82d2n6RjStvdSoNe9FG28kNfz3WiXilJm4DdNkEzRhCZuIDwY8U08WVihhGR5iRqAwfDiw==",
+ "peer": true,
+ "peerDependencies": {
+ "react": "^16.8.0 || ^17.0.0 || ^18.0.0"
+ }
+ },
+ "node_modules/util": {
+ "version": "0.12.5",
+ "resolved": "https://registry.npmjs.org/util/-/util-0.12.5.tgz",
+ "integrity": "sha512-kZf/K6hEIrWHI6XqOFUiiMa+79wE/D8Q+NCNAWclkyg3b4d2k7s0QGepNjiABc+aR3N1PAyHL7p6UcLY6LmrnA==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "inherits": "^2.0.3",
+ "is-arguments": "^1.0.4",
+ "is-generator-function": "^1.0.7",
+ "is-typed-array": "^1.1.3",
+ "which-typed-array": "^1.1.2"
+ }
+ },
+ "node_modules/util-deprecate": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/util-deprecate/-/util-deprecate-1.0.2.tgz",
+ "integrity": "sha512-EPD5q1uXyFxJpCrLnCc1nHnq3gOa6DZBocAIiI2TaSCA7VCJ1UJDMagCzIkXNsUYfD1daK//LTEQ8xiIbrHtcw=="
+ },
+ "node_modules/uuid": {
+ "version": "9.0.1",
+ "resolved": "https://registry.npmjs.org/uuid/-/uuid-9.0.1.tgz",
+ "integrity": "sha512-b+1eJOlsR9K8HJpow9Ok3fiWOWSIcIzXodvv0rQjVoOVNpWMpxf1wZNpt4y9h10odCNrqnYp1OBzRktckBe3sA==",
+ "dev": true,
+ "funding": [
+ "https://github.com/sponsors/broofa",
+ "https://github.com/sponsors/ctavan"
+ ],
+ "license": "MIT",
+ "bin": {
+ "uuid": "dist/bin/uuid"
+ }
+ },
+ "node_modules/victory-vendor": {
+ "version": "36.9.2",
+ "resolved": "https://registry.npmjs.org/victory-vendor/-/victory-vendor-36.9.2.tgz",
+ "integrity": "sha512-PnpQQMuxlwYdocC8fIJqVXvkeViHYzotI+NJrCuav0ZYFoq912ZHBk3mCeuj+5/VpodOjPe1z0Fk2ihgzlXqjQ==",
+ "dependencies": {
+ "@types/d3-array": "^3.0.3",
+ "@types/d3-ease": "^3.0.0",
+ "@types/d3-interpolate": "^3.0.1",
+ "@types/d3-scale": "^4.0.2",
+ "@types/d3-shape": "^3.1.0",
+ "@types/d3-time": "^3.0.0",
+ "@types/d3-timer": "^3.0.0",
+ "d3-array": "^3.1.6",
+ "d3-ease": "^3.0.1",
+ "d3-interpolate": "^3.0.1",
+ "d3-scale": "^4.0.2",
+ "d3-shape": "^3.1.0",
+ "d3-time": "^3.0.0",
+ "d3-timer": "^3.0.1"
+ }
+ },
+ "node_modules/vite": {
+ "version": "5.4.10",
+ "resolved": "https://registry.npmjs.org/vite/-/vite-5.4.10.tgz",
+ "integrity": "sha512-1hvaPshuPUtxeQ0hsVH3Mud0ZanOLwVTneA1EgbAM5LhaZEqyPWGRQ7BtaMvUrTDeEaC8pxtj6a6jku3x4z6SQ==",
+ "dev": true,
+ "dependencies": {
+ "esbuild": "^0.21.3",
+ "postcss": "^8.4.43",
+ "rollup": "^4.20.0"
+ },
+ "bin": {
+ "vite": "bin/vite.js"
+ },
+ "engines": {
+ "node": "^18.0.0 || >=20.0.0"
+ },
+ "funding": {
+ "url": "https://github.com/vitejs/vite?sponsor=1"
+ },
+ "optionalDependencies": {
+ "fsevents": "~2.3.3"
+ },
+ "peerDependencies": {
+ "@types/node": "^18.0.0 || >=20.0.0",
+ "less": "*",
+ "lightningcss": "^1.21.0",
+ "sass": "*",
+ "sass-embedded": "*",
+ "stylus": "*",
+ "sugarss": "*",
+ "terser": "^5.4.0"
+ },
+ "peerDependenciesMeta": {
+ "@types/node": {
+ "optional": true
+ },
+ "less": {
+ "optional": true
+ },
+ "lightningcss": {
+ "optional": true
+ },
+ "sass": {
+ "optional": true
+ },
+ "sass-embedded": {
+ "optional": true
+ },
+ "stylus": {
+ "optional": true
+ },
+ "sugarss": {
+ "optional": true
+ },
+ "terser": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/vite-node": {
+ "version": "2.1.4",
+ "resolved": "https://registry.npmjs.org/vite-node/-/vite-node-2.1.4.tgz",
+ "integrity": "sha512-kqa9v+oi4HwkG6g8ufRnb5AeplcRw8jUF6/7/Qz1qRQOXHImG8YnLbB+LLszENwFnoBl9xIf9nVdCFzNd7GQEg==",
+ "dev": true,
+ "dependencies": {
+ "cac": "^6.7.14",
+ "debug": "^4.3.7",
+ "pathe": "^1.1.2",
+ "vite": "^5.0.0"
+ },
+ "bin": {
+ "vite-node": "vite-node.mjs"
+ },
+ "engines": {
+ "node": "^18.0.0 || >=20.0.0"
+ },
+ "funding": {
+ "url": "https://opencollective.com/vitest"
+ }
+ },
+ "node_modules/vite/node_modules/@esbuild/aix-ppc64": {
+ "version": "0.21.5",
+ "resolved": "https://registry.npmjs.org/@esbuild/aix-ppc64/-/aix-ppc64-0.21.5.tgz",
+ "integrity": "sha512-1SDgH6ZSPTlggy1yI6+Dbkiz8xzpHJEVAlF/AM1tHPLsf5STom9rwtjE4hKAF20FfXXNTFqEYXyJNWh1GiZedQ==",
+ "cpu": [
+ "ppc64"
+ ],
+ "dev": true,
+ "optional": true,
+ "os": [
+ "aix"
+ ],
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/vite/node_modules/@esbuild/android-arm": {
+ "version": "0.21.5",
+ "resolved": "https://registry.npmjs.org/@esbuild/android-arm/-/android-arm-0.21.5.tgz",
+ "integrity": "sha512-vCPvzSjpPHEi1siZdlvAlsPxXl7WbOVUBBAowWug4rJHb68Ox8KualB+1ocNvT5fjv6wpkX6o/iEpbDrf68zcg==",
+ "cpu": [
+ "arm"
+ ],
+ "dev": true,
+ "optional": true,
+ "os": [
+ "android"
+ ],
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/vite/node_modules/@esbuild/android-arm64": {
+ "version": "0.21.5",
+ "resolved": "https://registry.npmjs.org/@esbuild/android-arm64/-/android-arm64-0.21.5.tgz",
+ "integrity": "sha512-c0uX9VAUBQ7dTDCjq+wdyGLowMdtR/GoC2U5IYk/7D1H1JYC0qseD7+11iMP2mRLN9RcCMRcjC4YMclCzGwS/A==",
+ "cpu": [
+ "arm64"
+ ],
+ "dev": true,
+ "optional": true,
+ "os": [
+ "android"
+ ],
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/vite/node_modules/@esbuild/android-x64": {
+ "version": "0.21.5",
+ "resolved": "https://registry.npmjs.org/@esbuild/android-x64/-/android-x64-0.21.5.tgz",
+ "integrity": "sha512-D7aPRUUNHRBwHxzxRvp856rjUHRFW1SdQATKXH2hqA0kAZb1hKmi02OpYRacl0TxIGz/ZmXWlbZgjwWYaCakTA==",
+ "cpu": [
+ "x64"
+ ],
+ "dev": true,
+ "optional": true,
+ "os": [
+ "android"
+ ],
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/vite/node_modules/@esbuild/darwin-arm64": {
+ "version": "0.21.5",
+ "resolved": "https://registry.npmjs.org/@esbuild/darwin-arm64/-/darwin-arm64-0.21.5.tgz",
+ "integrity": "sha512-DwqXqZyuk5AiWWf3UfLiRDJ5EDd49zg6O9wclZ7kUMv2WRFr4HKjXp/5t8JZ11QbQfUS6/cRCKGwYhtNAY88kQ==",
+ "cpu": [
+ "arm64"
+ ],
+ "dev": true,
+ "optional": true,
+ "os": [
+ "darwin"
+ ],
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/vite/node_modules/@esbuild/darwin-x64": {
+ "version": "0.21.5",
+ "resolved": "https://registry.npmjs.org/@esbuild/darwin-x64/-/darwin-x64-0.21.5.tgz",
+ "integrity": "sha512-se/JjF8NlmKVG4kNIuyWMV/22ZaerB+qaSi5MdrXtd6R08kvs2qCN4C09miupktDitvh8jRFflwGFBQcxZRjbw==",
+ "cpu": [
+ "x64"
+ ],
+ "dev": true,
+ "optional": true,
+ "os": [
+ "darwin"
+ ],
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/vite/node_modules/@esbuild/freebsd-arm64": {
+ "version": "0.21.5",
+ "resolved": "https://registry.npmjs.org/@esbuild/freebsd-arm64/-/freebsd-arm64-0.21.5.tgz",
+ "integrity": "sha512-5JcRxxRDUJLX8JXp/wcBCy3pENnCgBR9bN6JsY4OmhfUtIHe3ZW0mawA7+RDAcMLrMIZaf03NlQiX9DGyB8h4g==",
+ "cpu": [
+ "arm64"
+ ],
+ "dev": true,
+ "optional": true,
+ "os": [
+ "freebsd"
+ ],
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/vite/node_modules/@esbuild/freebsd-x64": {
+ "version": "0.21.5",
+ "resolved": "https://registry.npmjs.org/@esbuild/freebsd-x64/-/freebsd-x64-0.21.5.tgz",
+ "integrity": "sha512-J95kNBj1zkbMXtHVH29bBriQygMXqoVQOQYA+ISs0/2l3T9/kj42ow2mpqerRBxDJnmkUDCaQT/dfNXWX/ZZCQ==",
+ "cpu": [
+ "x64"
+ ],
+ "dev": true,
+ "optional": true,
+ "os": [
+ "freebsd"
+ ],
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/vite/node_modules/@esbuild/linux-arm": {
+ "version": "0.21.5",
+ "resolved": "https://registry.npmjs.org/@esbuild/linux-arm/-/linux-arm-0.21.5.tgz",
+ "integrity": "sha512-bPb5AHZtbeNGjCKVZ9UGqGwo8EUu4cLq68E95A53KlxAPRmUyYv2D6F0uUI65XisGOL1hBP5mTronbgo+0bFcA==",
+ "cpu": [
+ "arm"
+ ],
+ "dev": true,
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/vite/node_modules/@esbuild/linux-arm64": {
+ "version": "0.21.5",
+ "resolved": "https://registry.npmjs.org/@esbuild/linux-arm64/-/linux-arm64-0.21.5.tgz",
+ "integrity": "sha512-ibKvmyYzKsBeX8d8I7MH/TMfWDXBF3db4qM6sy+7re0YXya+K1cem3on9XgdT2EQGMu4hQyZhan7TeQ8XkGp4Q==",
+ "cpu": [
+ "arm64"
+ ],
+ "dev": true,
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/vite/node_modules/@esbuild/linux-ia32": {
+ "version": "0.21.5",
+ "resolved": "https://registry.npmjs.org/@esbuild/linux-ia32/-/linux-ia32-0.21.5.tgz",
+ "integrity": "sha512-YvjXDqLRqPDl2dvRODYmmhz4rPeVKYvppfGYKSNGdyZkA01046pLWyRKKI3ax8fbJoK5QbxblURkwK/MWY18Tg==",
+ "cpu": [
+ "ia32"
+ ],
+ "dev": true,
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/vite/node_modules/@esbuild/linux-loong64": {
+ "version": "0.21.5",
+ "resolved": "https://registry.npmjs.org/@esbuild/linux-loong64/-/linux-loong64-0.21.5.tgz",
+ "integrity": "sha512-uHf1BmMG8qEvzdrzAqg2SIG/02+4/DHB6a9Kbya0XDvwDEKCoC8ZRWI5JJvNdUjtciBGFQ5PuBlpEOXQj+JQSg==",
+ "cpu": [
+ "loong64"
+ ],
+ "dev": true,
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/vite/node_modules/@esbuild/linux-mips64el": {
+ "version": "0.21.5",
+ "resolved": "https://registry.npmjs.org/@esbuild/linux-mips64el/-/linux-mips64el-0.21.5.tgz",
+ "integrity": "sha512-IajOmO+KJK23bj52dFSNCMsz1QP1DqM6cwLUv3W1QwyxkyIWecfafnI555fvSGqEKwjMXVLokcV5ygHW5b3Jbg==",
+ "cpu": [
+ "mips64el"
+ ],
+ "dev": true,
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/vite/node_modules/@esbuild/linux-ppc64": {
+ "version": "0.21.5",
+ "resolved": "https://registry.npmjs.org/@esbuild/linux-ppc64/-/linux-ppc64-0.21.5.tgz",
+ "integrity": "sha512-1hHV/Z4OEfMwpLO8rp7CvlhBDnjsC3CttJXIhBi+5Aj5r+MBvy4egg7wCbe//hSsT+RvDAG7s81tAvpL2XAE4w==",
+ "cpu": [
+ "ppc64"
+ ],
+ "dev": true,
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/vite/node_modules/@esbuild/linux-riscv64": {
+ "version": "0.21.5",
+ "resolved": "https://registry.npmjs.org/@esbuild/linux-riscv64/-/linux-riscv64-0.21.5.tgz",
+ "integrity": "sha512-2HdXDMd9GMgTGrPWnJzP2ALSokE/0O5HhTUvWIbD3YdjME8JwvSCnNGBnTThKGEB91OZhzrJ4qIIxk/SBmyDDA==",
+ "cpu": [
+ "riscv64"
+ ],
+ "dev": true,
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/vite/node_modules/@esbuild/linux-s390x": {
+ "version": "0.21.5",
+ "resolved": "https://registry.npmjs.org/@esbuild/linux-s390x/-/linux-s390x-0.21.5.tgz",
+ "integrity": "sha512-zus5sxzqBJD3eXxwvjN1yQkRepANgxE9lgOW2qLnmr8ikMTphkjgXu1HR01K4FJg8h1kEEDAqDcZQtbrRnB41A==",
+ "cpu": [
+ "s390x"
+ ],
+ "dev": true,
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/vite/node_modules/@esbuild/linux-x64": {
+ "version": "0.21.5",
+ "resolved": "https://registry.npmjs.org/@esbuild/linux-x64/-/linux-x64-0.21.5.tgz",
+ "integrity": "sha512-1rYdTpyv03iycF1+BhzrzQJCdOuAOtaqHTWJZCWvijKD2N5Xu0TtVC8/+1faWqcP9iBCWOmjmhoH94dH82BxPQ==",
+ "cpu": [
+ "x64"
+ ],
+ "dev": true,
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/vite/node_modules/@esbuild/netbsd-x64": {
+ "version": "0.21.5",
+ "resolved": "https://registry.npmjs.org/@esbuild/netbsd-x64/-/netbsd-x64-0.21.5.tgz",
+ "integrity": "sha512-Woi2MXzXjMULccIwMnLciyZH4nCIMpWQAs049KEeMvOcNADVxo0UBIQPfSmxB3CWKedngg7sWZdLvLczpe0tLg==",
+ "cpu": [
+ "x64"
+ ],
+ "dev": true,
+ "optional": true,
+ "os": [
+ "netbsd"
+ ],
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/vite/node_modules/@esbuild/openbsd-x64": {
+ "version": "0.21.5",
+ "resolved": "https://registry.npmjs.org/@esbuild/openbsd-x64/-/openbsd-x64-0.21.5.tgz",
+ "integrity": "sha512-HLNNw99xsvx12lFBUwoT8EVCsSvRNDVxNpjZ7bPn947b8gJPzeHWyNVhFsaerc0n3TsbOINvRP2byTZ5LKezow==",
+ "cpu": [
+ "x64"
+ ],
+ "dev": true,
+ "optional": true,
+ "os": [
+ "openbsd"
+ ],
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/vite/node_modules/@esbuild/sunos-x64": {
+ "version": "0.21.5",
+ "resolved": "https://registry.npmjs.org/@esbuild/sunos-x64/-/sunos-x64-0.21.5.tgz",
+ "integrity": "sha512-6+gjmFpfy0BHU5Tpptkuh8+uw3mnrvgs+dSPQXQOv3ekbordwnzTVEb4qnIvQcYXq6gzkyTnoZ9dZG+D4garKg==",
+ "cpu": [
+ "x64"
+ ],
+ "dev": true,
+ "optional": true,
+ "os": [
+ "sunos"
+ ],
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/vite/node_modules/@esbuild/win32-arm64": {
+ "version": "0.21.5",
+ "resolved": "https://registry.npmjs.org/@esbuild/win32-arm64/-/win32-arm64-0.21.5.tgz",
+ "integrity": "sha512-Z0gOTd75VvXqyq7nsl93zwahcTROgqvuAcYDUr+vOv8uHhNSKROyU961kgtCD1e95IqPKSQKH7tBTslnS3tA8A==",
+ "cpu": [
+ "arm64"
+ ],
+ "dev": true,
+ "optional": true,
+ "os": [
+ "win32"
+ ],
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/vite/node_modules/@esbuild/win32-ia32": {
+ "version": "0.21.5",
+ "resolved": "https://registry.npmjs.org/@esbuild/win32-ia32/-/win32-ia32-0.21.5.tgz",
+ "integrity": "sha512-SWXFF1CL2RVNMaVs+BBClwtfZSvDgtL//G/smwAc5oVK/UPu2Gu9tIaRgFmYFFKrmg3SyAjSrElf0TiJ1v8fYA==",
+ "cpu": [
+ "ia32"
+ ],
+ "dev": true,
+ "optional": true,
+ "os": [
+ "win32"
+ ],
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/vite/node_modules/@esbuild/win32-x64": {
+ "version": "0.21.5",
+ "resolved": "https://registry.npmjs.org/@esbuild/win32-x64/-/win32-x64-0.21.5.tgz",
+ "integrity": "sha512-tQd/1efJuzPC6rCFwEvLtci/xNFcTZknmXs98FYDfGE4wP9ClFV98nyKrzJKVPMhdDnjzLhdUyMX4PsQAPjwIw==",
+ "cpu": [
+ "x64"
+ ],
+ "dev": true,
+ "optional": true,
+ "os": [
+ "win32"
+ ],
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/vite/node_modules/esbuild": {
+ "version": "0.21.5",
+ "resolved": "https://registry.npmjs.org/esbuild/-/esbuild-0.21.5.tgz",
+ "integrity": "sha512-mg3OPMV4hXywwpoDxu3Qda5xCKQi+vCTZq8S9J/EpkhB2HzKXq4SNFZE3+NK93JYxc8VMSep+lOUSC/RVKaBqw==",
+ "dev": true,
+ "hasInstallScript": true,
+ "bin": {
+ "esbuild": "bin/esbuild"
+ },
+ "engines": {
+ "node": ">=12"
+ },
+ "optionalDependencies": {
+ "@esbuild/aix-ppc64": "0.21.5",
+ "@esbuild/android-arm": "0.21.5",
+ "@esbuild/android-arm64": "0.21.5",
+ "@esbuild/android-x64": "0.21.5",
+ "@esbuild/darwin-arm64": "0.21.5",
+ "@esbuild/darwin-x64": "0.21.5",
+ "@esbuild/freebsd-arm64": "0.21.5",
+ "@esbuild/freebsd-x64": "0.21.5",
+ "@esbuild/linux-arm": "0.21.5",
+ "@esbuild/linux-arm64": "0.21.5",
+ "@esbuild/linux-ia32": "0.21.5",
+ "@esbuild/linux-loong64": "0.21.5",
+ "@esbuild/linux-mips64el": "0.21.5",
+ "@esbuild/linux-ppc64": "0.21.5",
+ "@esbuild/linux-riscv64": "0.21.5",
+ "@esbuild/linux-s390x": "0.21.5",
+ "@esbuild/linux-x64": "0.21.5",
+ "@esbuild/netbsd-x64": "0.21.5",
+ "@esbuild/openbsd-x64": "0.21.5",
+ "@esbuild/sunos-x64": "0.21.5",
+ "@esbuild/win32-arm64": "0.21.5",
+ "@esbuild/win32-ia32": "0.21.5",
+ "@esbuild/win32-x64": "0.21.5"
+ }
+ },
+ "node_modules/vitest": {
+ "version": "2.1.4",
+ "resolved": "https://registry.npmjs.org/vitest/-/vitest-2.1.4.tgz",
+ "integrity": "sha512-eDjxbVAJw1UJJCHr5xr/xM86Zx+YxIEXGAR+bmnEID7z9qWfoxpHw0zdobz+TQAFOLT+nEXz3+gx6nUJ7RgmlQ==",
+ "dev": true,
+ "dependencies": {
+ "@vitest/expect": "2.1.4",
+ "@vitest/mocker": "2.1.4",
+ "@vitest/pretty-format": "^2.1.4",
+ "@vitest/runner": "2.1.4",
+ "@vitest/snapshot": "2.1.4",
+ "@vitest/spy": "2.1.4",
+ "@vitest/utils": "2.1.4",
+ "chai": "^5.1.2",
+ "debug": "^4.3.7",
+ "expect-type": "^1.1.0",
+ "magic-string": "^0.30.12",
+ "pathe": "^1.1.2",
+ "std-env": "^3.7.0",
+ "tinybench": "^2.9.0",
+ "tinyexec": "^0.3.1",
+ "tinypool": "^1.0.1",
+ "tinyrainbow": "^1.2.0",
+ "vite": "^5.0.0",
+ "vite-node": "2.1.4",
+ "why-is-node-running": "^2.3.0"
+ },
+ "bin": {
+ "vitest": "vitest.mjs"
+ },
+ "engines": {
+ "node": "^18.0.0 || >=20.0.0"
+ },
+ "funding": {
+ "url": "https://opencollective.com/vitest"
+ },
+ "peerDependencies": {
+ "@edge-runtime/vm": "*",
+ "@types/node": "^18.0.0 || >=20.0.0",
+ "@vitest/browser": "2.1.4",
+ "@vitest/ui": "2.1.4",
+ "happy-dom": "*",
+ "jsdom": "*"
+ },
+ "peerDependenciesMeta": {
+ "@edge-runtime/vm": {
+ "optional": true
+ },
+ "@types/node": {
+ "optional": true
+ },
+ "@vitest/browser": {
+ "optional": true
+ },
+ "@vitest/ui": {
+ "optional": true
+ },
+ "happy-dom": {
+ "optional": true
+ },
+ "jsdom": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/w3c-keyname": {
+ "version": "2.2.8",
+ "resolved": "https://registry.npmjs.org/w3c-keyname/-/w3c-keyname-2.2.8.tgz",
+ "integrity": "sha512-dpojBhNsCNN7T82Tm7k26A6G9ML3NkhDsnw9n/eoxSRlVBB4CEtIQ/KTCLI2Fwf3ataSXRhYFkQi3SlnFwPvPQ=="
+ },
+ "node_modules/w3c-xmlserializer": {
+ "version": "5.0.0",
+ "resolved": "https://registry.npmjs.org/w3c-xmlserializer/-/w3c-xmlserializer-5.0.0.tgz",
+ "integrity": "sha512-o8qghlI8NZHU1lLPrpi2+Uq7abh4GGPpYANlalzWxyWteJOCsr/P+oPBA49TOLu5FTZO4d3F9MnWJfiMo4BkmA==",
+ "dev": true,
+ "dependencies": {
+ "xml-name-validator": "^5.0.0"
+ },
+ "engines": {
+ "node": ">=18"
+ }
+ },
+ "node_modules/webidl-conversions": {
+ "version": "7.0.0",
+ "resolved": "https://registry.npmjs.org/webidl-conversions/-/webidl-conversions-7.0.0.tgz",
+ "integrity": "sha512-VwddBukDzu71offAQR975unBIGqfKZpM+8ZX6ySk8nYhVoo5CYaZyzt3YBvYtRtO+aoGlqxPg/B87NGVZ/fu6g==",
+ "dev": true,
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/webpack-virtual-modules": {
+ "version": "0.6.2",
+ "resolved": "https://registry.npmjs.org/webpack-virtual-modules/-/webpack-virtual-modules-0.6.2.tgz",
+ "integrity": "sha512-66/V2i5hQanC51vBQKPH4aI8NMAcBW59FVBs+rC7eGHupMyfn34q7rZIE+ETlJ+XTevqfUhVVBgSUNSW2flEUQ==",
+ "dev": true
+ },
+ "node_modules/whatwg-encoding": {
+ "version": "3.1.1",
+ "resolved": "https://registry.npmjs.org/whatwg-encoding/-/whatwg-encoding-3.1.1.tgz",
+ "integrity": "sha512-6qN4hJdMwfYBtE3YBTTHhoeuUrDBPZmbQaxWAqSALV/MeEnR5z1xd8UKud2RAkFoPkmB+hli1TZSnyi84xz1vQ==",
+ "dev": true,
+ "dependencies": {
+ "iconv-lite": "0.6.3"
+ },
+ "engines": {
+ "node": ">=18"
+ }
+ },
+ "node_modules/whatwg-mimetype": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/whatwg-mimetype/-/whatwg-mimetype-4.0.0.tgz",
+ "integrity": "sha512-QaKxh0eNIi2mE9p2vEdzfagOKHCcj1pJ56EEHGQOVxp8r9/iszLUUV7v89x9O1p/T+NlTM5W7jW6+cz4Fq1YVg==",
+ "dev": true,
+ "engines": {
+ "node": ">=18"
+ }
+ },
+ "node_modules/whatwg-url": {
+ "version": "14.0.0",
+ "resolved": "https://registry.npmjs.org/whatwg-url/-/whatwg-url-14.0.0.tgz",
+ "integrity": "sha512-1lfMEm2IEr7RIV+f4lUNPOqfFL+pO+Xw3fJSqmjX9AbXcXcYOkCe1P6+9VBZB6n94af16NfZf+sSk0JCBZC9aw==",
+ "dev": true,
+ "dependencies": {
+ "tr46": "^5.0.0",
+ "webidl-conversions": "^7.0.0"
+ },
+ "engines": {
+ "node": ">=18"
+ }
+ },
+ "node_modules/which": {
+ "version": "2.0.2",
+ "resolved": "https://registry.npmjs.org/which/-/which-2.0.2.tgz",
+ "integrity": "sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==",
+ "dependencies": {
+ "isexe": "^2.0.0"
+ },
+ "bin": {
+ "node-which": "bin/node-which"
+ },
+ "engines": {
+ "node": ">= 8"
+ }
+ },
+ "node_modules/which-boxed-primitive": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/which-boxed-primitive/-/which-boxed-primitive-1.0.2.tgz",
+ "integrity": "sha512-bwZdv0AKLpplFY2KZRX6TvyuN7ojjr7lwkg6ml0roIy9YeuSr7JS372qlNW18UQYzgYK9ziGcerWqZOmEn9VNg==",
+ "dev": true,
+ "dependencies": {
+ "is-bigint": "^1.0.1",
+ "is-boolean-object": "^1.1.0",
+ "is-number-object": "^1.0.4",
+ "is-string": "^1.0.5",
+ "is-symbol": "^1.0.3"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/which-builtin-type": {
+ "version": "1.1.4",
+ "resolved": "https://registry.npmjs.org/which-builtin-type/-/which-builtin-type-1.1.4.tgz",
+ "integrity": "sha512-bppkmBSsHFmIMSl8BO9TbsyzsvGjVoppt8xUiGzwiu/bhDCGxnpOKCxgqj6GuyHE0mINMDecBFPlOm2hzY084w==",
+ "dev": true,
+ "dependencies": {
+ "function.prototype.name": "^1.1.6",
+ "has-tostringtag": "^1.0.2",
+ "is-async-function": "^2.0.0",
+ "is-date-object": "^1.0.5",
+ "is-finalizationregistry": "^1.0.2",
+ "is-generator-function": "^1.0.10",
+ "is-regex": "^1.1.4",
+ "is-weakref": "^1.0.2",
+ "isarray": "^2.0.5",
+ "which-boxed-primitive": "^1.0.2",
+ "which-collection": "^1.0.2",
+ "which-typed-array": "^1.1.15"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/which-collection": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/which-collection/-/which-collection-1.0.2.tgz",
+ "integrity": "sha512-K4jVyjnBdgvc86Y6BkaLZEN933SwYOuBFkdmBu9ZfkcAbdVbpITnDmjvZ/aQjRXQrv5EPkTnD1s39GiiqbngCw==",
+ "dev": true,
+ "dependencies": {
+ "is-map": "^2.0.3",
+ "is-set": "^2.0.3",
+ "is-weakmap": "^2.0.2",
+ "is-weakset": "^2.0.3"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/which-typed-array": {
+ "version": "1.1.15",
+ "resolved": "https://registry.npmjs.org/which-typed-array/-/which-typed-array-1.1.15.tgz",
+ "integrity": "sha512-oV0jmFtUky6CXfkqehVvBP/LSWJ2sy4vWMioiENyJLePrBO/yKyV9OyJySfAKosh+RYkIl5zJCNZ8/4JncrpdA==",
+ "dev": true,
+ "dependencies": {
+ "available-typed-arrays": "^1.0.7",
+ "call-bind": "^1.0.7",
+ "for-each": "^0.3.3",
+ "gopd": "^1.0.1",
+ "has-tostringtag": "^1.0.2"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/why-is-node-running": {
+ "version": "2.3.0",
+ "resolved": "https://registry.npmjs.org/why-is-node-running/-/why-is-node-running-2.3.0.tgz",
+ "integrity": "sha512-hUrmaWBdVDcxvYqnyh09zunKzROWjbZTiNy8dBEjkS7ehEDQibXJ7XvlmtbwuTclUiIyN+CyXQD4Vmko8fNm8w==",
+ "dev": true,
+ "dependencies": {
+ "siginfo": "^2.0.0",
+ "stackback": "0.0.2"
+ },
+ "bin": {
+ "why-is-node-running": "cli.js"
+ },
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/word-wrap": {
+ "version": "1.2.5",
+ "resolved": "https://registry.npmjs.org/word-wrap/-/word-wrap-1.2.5.tgz",
+ "integrity": "sha512-BN22B5eaMMI9UMtjrGd5g5eCYPpCPDUy0FJXbYsaT5zYxjFOckS53SQDE3pWkVoWpHXVb3BrYcEN4Twa55B5cA==",
+ "dev": true,
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/wrap-ansi": {
+ "version": "8.1.0",
+ "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-8.1.0.tgz",
+ "integrity": "sha512-si7QWI6zUMq56bESFvagtmzMdGOtoxfR+Sez11Mobfc7tm+VkUckk9bW2UeffTGVUbOksxmSw0AA2gs8g71NCQ==",
+ "dependencies": {
+ "ansi-styles": "^6.1.0",
+ "string-width": "^5.0.1",
+ "strip-ansi": "^7.0.1"
+ },
+ "engines": {
+ "node": ">=12"
+ },
+ "funding": {
+ "url": "https://github.com/chalk/wrap-ansi?sponsor=1"
+ }
+ },
+ "node_modules/wrap-ansi-cjs": {
+ "name": "wrap-ansi",
+ "version": "7.0.0",
+ "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-7.0.0.tgz",
+ "integrity": "sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==",
+ "dependencies": {
+ "ansi-styles": "^4.0.0",
+ "string-width": "^4.1.0",
+ "strip-ansi": "^6.0.0"
+ },
+ "engines": {
+ "node": ">=10"
+ },
+ "funding": {
+ "url": "https://github.com/chalk/wrap-ansi?sponsor=1"
+ }
+ },
+ "node_modules/wrap-ansi-cjs/node_modules/ansi-regex": {
+ "version": "5.0.1",
+ "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz",
+ "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==",
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/wrap-ansi-cjs/node_modules/emoji-regex": {
+ "version": "8.0.0",
+ "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz",
+ "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A=="
+ },
+ "node_modules/wrap-ansi-cjs/node_modules/string-width": {
+ "version": "4.2.3",
+ "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz",
+ "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==",
+ "dependencies": {
+ "emoji-regex": "^8.0.0",
+ "is-fullwidth-code-point": "^3.0.0",
+ "strip-ansi": "^6.0.1"
+ },
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/wrap-ansi-cjs/node_modules/strip-ansi": {
+ "version": "6.0.1",
+ "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz",
+ "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==",
+ "dependencies": {
+ "ansi-regex": "^5.0.1"
+ },
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/wrap-ansi/node_modules/ansi-styles": {
+ "version": "6.2.1",
+ "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-6.2.1.tgz",
+ "integrity": "sha512-bN798gFfQX+viw3R7yrGWRqnrN2oRkEkUjjl4JNn4E8GxxbjtG3FbrEIIY3l8/hrwUwIeCZvi4QuOTP4MErVug==",
+ "engines": {
+ "node": ">=12"
+ },
+ "funding": {
+ "url": "https://github.com/chalk/ansi-styles?sponsor=1"
+ }
+ },
+ "node_modules/wrappy": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz",
+ "integrity": "sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ==",
+ "dev": true,
+ "license": "ISC"
+ },
+ "node_modules/ws": {
+ "version": "8.18.0",
+ "resolved": "https://registry.npmjs.org/ws/-/ws-8.18.0.tgz",
+ "integrity": "sha512-8VbfWfHLbbwu3+N6OKsOMpBdT4kXPDDB9cJk2bJ6mh9ucxdlnNvH1e+roYkKmN9Nxw2yjz7VzeO9oOz2zJ04Pw==",
+ "dev": true,
+ "engines": {
+ "node": ">=10.0.0"
+ },
+ "peerDependencies": {
+ "bufferutil": "^4.0.1",
+ "utf-8-validate": ">=5.0.2"
+ },
+ "peerDependenciesMeta": {
+ "bufferutil": {
+ "optional": true
+ },
+ "utf-8-validate": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/xml-name-validator": {
+ "version": "5.0.0",
+ "resolved": "https://registry.npmjs.org/xml-name-validator/-/xml-name-validator-5.0.0.tgz",
+ "integrity": "sha512-EvGK8EJ3DhaHfbRlETOWAS5pO9MZITeauHKJyb8wyajUfQUenkIg2MvLDTZ4T/TgIcm3HU0TFBgWWboAZ30UHg==",
+ "dev": true,
+ "engines": {
+ "node": ">=18"
+ }
+ },
+ "node_modules/xmlchars": {
+ "version": "2.2.0",
+ "resolved": "https://registry.npmjs.org/xmlchars/-/xmlchars-2.2.0.tgz",
+ "integrity": "sha512-JZnDKK8B0RCDw84FNdDAIpZK+JuJw+s7Lz8nksI7SIuU3UXJJslUthsi+uWBUYOwPFwW7W7PRLRfUKpxjtjFCw==",
+ "dev": true
+ },
+ "node_modules/y18n": {
+ "version": "5.0.8",
+ "resolved": "https://registry.npmjs.org/y18n/-/y18n-5.0.8.tgz",
+ "integrity": "sha512-0pfFzegeDWJHJIAmTLRP2DwHjdF5s7jo9tuztdQxAhINCdvS+3nGINqPd00AphqJR/0LhANUS6/+7SCb98YOfA==",
+ "dev": true,
+ "engines": {
+ "node": ">=10"
+ }
+ },
+ "node_modules/yallist": {
+ "version": "3.1.1",
+ "resolved": "https://registry.npmjs.org/yallist/-/yallist-3.1.1.tgz",
+ "integrity": "sha512-a4UGQaWPH59mOXUYnAG2ewncQS4i4F43Tv3JoAM+s2VDAmS9NsK8GpDMLrCHPksFT7h3K6TOoUNn2pb7RoXx4g==",
+ "dev": true
+ },
+ "node_modules/yaml": {
+ "version": "2.6.0",
+ "resolved": "https://registry.npmjs.org/yaml/-/yaml-2.6.0.tgz",
+ "integrity": "sha512-a6ae//JvKDEra2kdi1qzCyrJW/WZCgFi8ydDV+eXExl95t+5R+ijnqHJbz9tmMh8FUjx3iv2fCQ4dclAQlO2UQ==",
+ "bin": {
+ "yaml": "bin.mjs"
+ },
+ "engines": {
+ "node": ">= 14"
+ }
+ },
+ "node_modules/yargs": {
+ "version": "17.7.2",
+ "resolved": "https://registry.npmjs.org/yargs/-/yargs-17.7.2.tgz",
+ "integrity": "sha512-7dSzzRQ++CKnNI/krKnYRV7JKKPUXMEh61soaHKg9mrWEhzFWhFnxPxGl+69cD1Ou63C13NUPCnmIcrvqCuM6w==",
+ "dev": true,
+ "dependencies": {
+ "cliui": "^8.0.1",
+ "escalade": "^3.1.1",
+ "get-caller-file": "^2.0.5",
+ "require-directory": "^2.1.1",
+ "string-width": "^4.2.3",
+ "y18n": "^5.0.5",
+ "yargs-parser": "^21.1.1"
+ },
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/yargs-parser": {
+ "version": "21.1.1",
+ "resolved": "https://registry.npmjs.org/yargs-parser/-/yargs-parser-21.1.1.tgz",
+ "integrity": "sha512-tVpsJW7DdjecAiFpbIB1e3qxIQsE6NoPc5/eTdrbbIC4h0LVsWhnoa3g+m2HclBIujHzsxZ4VJVA+GUuc2/LBw==",
+ "dev": true,
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/yargs/node_modules/ansi-regex": {
+ "version": "5.0.1",
+ "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz",
+ "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==",
+ "dev": true,
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/yargs/node_modules/emoji-regex": {
+ "version": "8.0.0",
+ "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz",
+ "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==",
+ "dev": true
+ },
+ "node_modules/yargs/node_modules/string-width": {
+ "version": "4.2.3",
+ "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz",
+ "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==",
+ "dev": true,
+ "dependencies": {
+ "emoji-regex": "^8.0.0",
+ "is-fullwidth-code-point": "^3.0.0",
+ "strip-ansi": "^6.0.1"
+ },
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/yargs/node_modules/strip-ansi": {
+ "version": "6.0.1",
+ "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz",
+ "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==",
+ "dev": true,
+ "dependencies": {
+ "ansi-regex": "^5.0.1"
+ },
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/yocto-queue": {
+ "version": "0.1.0",
+ "resolved": "https://registry.npmjs.org/yocto-queue/-/yocto-queue-0.1.0.tgz",
+ "integrity": "sha512-rVksvsnNCdJ/ohGc6xgPwyN8eheCxsiLM8mxuE/t/mOVqJewPuO1miLpTHQiRgTKCLexL4MeAFVagts7HmNZ2Q==",
+ "dev": true,
+ "engines": {
+ "node": ">=10"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/yoctocolors-cjs": {
+ "version": "2.1.2",
+ "resolved": "https://registry.npmjs.org/yoctocolors-cjs/-/yoctocolors-cjs-2.1.2.tgz",
+ "integrity": "sha512-cYVsTjKl8b+FrnidjibDWskAv7UKOfcwaVZdp/it9n1s9fU3IkgDbhdIRKCW4JDsAlECJY0ytoVPT3sK6kideA==",
+ "dev": true,
+ "engines": {
+ "node": ">=18"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/zod": {
+ "version": "3.23.8",
+ "resolved": "https://registry.npmjs.org/zod/-/zod-3.23.8.tgz",
+ "integrity": "sha512-XBx9AXhXktjUqnepgTiE5flcKIYWi/rme0Eaj+5Y0lftuGBq+jyRu/md4WnuxqgP1ubdpNCsYEYPxrzVHD8d6g==",
+ "funding": {
+ "url": "https://github.com/sponsors/colinhacks"
+ }
+ }
+ }
+}
diff --git a/ui-v2/package.json b/ui-v2/package.json
new file mode 100644
index 000000000000..fed5a9f75eb5
--- /dev/null
+++ b/ui-v2/package.json
@@ -0,0 +1,97 @@
+{
+ "name": "prefect-ui",
+ "private": true,
+ "version": "0.0.0",
+ "type": "module",
+ "scripts": {
+ "dev": "vite",
+ "build": "tsc -b && vite build",
+ "test": "vitest",
+ "lint": "eslint .",
+ "format:check": "biome format",
+ "format": "biome format --write",
+ "preview": "vite preview",
+ "service-sync": "uv run ../scripts/generate_oss_openapi_schema.py && npx openapi-typescript oss_schema.json -o src/api/prefect.ts && rm oss_schema.json",
+ "prepare": "cd .. && husky ui-v2/.husky",
+ "storybook": "storybook dev -p 6006",
+ "build-storybook": "storybook build"
+ },
+ "dependencies": {
+ "@codemirror/lang-json": "^6.0.1",
+ "@hookform/resolvers": "^3.9.1",
+ "@radix-ui/react-avatar": "^1.1.1",
+ "@radix-ui/react-checkbox": "^1.1.2",
+ "@radix-ui/react-dialog": "^1.1.2",
+ "@radix-ui/react-dropdown-menu": "^2.1.2",
+ "@radix-ui/react-hover-card": "^1.1.2",
+ "@radix-ui/react-icons": "^1.3.0",
+ "@radix-ui/react-label": "^2.1.0",
+ "@radix-ui/react-popover": "^1.1.2",
+ "@radix-ui/react-scroll-area": "^1.2.0",
+ "@radix-ui/react-select": "^2.1.2",
+ "@radix-ui/react-separator": "^1.1.0",
+ "@radix-ui/react-slot": "^1.1.0",
+ "@radix-ui/react-tabs": "^1.1.1",
+ "@radix-ui/react-toast": "^1.2.2",
+ "@radix-ui/react-tooltip": "^1.1.3",
+ "@tanstack/react-query": "^5.56.2",
+ "@tanstack/react-table": "^8.20.5",
+ "@tanstack/router-zod-adapter": "^1.58.16",
+ "@uiw/react-codemirror": "^4.23.6",
+ "class-variance-authority": "^0.7.0",
+ "clsx": "^2.1.1",
+ "date-fns": "^3.6.0",
+ "date-fns-tz": "^3.2.0",
+ "lucide-react": "^0.447.0",
+ "openapi-fetch": "^0.12.2",
+ "react": "^18.3.1",
+ "react-day-picker": "^8.10.1",
+ "react-dom": "^18.3.1",
+ "react-hook-form": "^7.53.1",
+ "recharts": "^2.12.7",
+ "tailwind-merge": "^2.5.2",
+ "tailwindcss-animate": "^1.0.7",
+ "zod": "^3.23.8"
+ },
+ "devDependencies": {
+ "@biomejs/biome": "1.9.4",
+ "@eslint/js": "^9.12.0",
+ "@storybook/addon-essentials": "^8.4.2",
+ "@storybook/addon-interactions": "^8.4.2",
+ "@storybook/blocks": "^8.4.2",
+ "@storybook/react": "^8.4.2",
+ "@storybook/react-vite": "^8.4.2",
+ "@storybook/test": "^8.4.2",
+ "@tanstack/eslint-plugin-router": "^1.77.7",
+ "@tanstack/router-devtools": "^1.58.15",
+ "@tanstack/router-plugin": "^1.58.12",
+ "@testing-library/dom": "^10.4.0",
+ "@testing-library/jest-dom": "^6.6.3",
+ "@testing-library/react": "^16.0.1",
+ "@testing-library/user-event": "^14.5.2",
+ "@types/node": "^22.7.4",
+ "@types/react": "^18.3.12",
+ "@types/react-dom": "^18.3.1",
+ "@vitejs/plugin-react-swc": "^3.5.0",
+ "autoprefixer": "^10.4.20",
+ "eslint": "^9.12.0",
+ "eslint-plugin-jest-dom": "^5.4.0",
+ "eslint-plugin-react": "^7.37.1",
+ "eslint-plugin-react-hooks": "^5.1.0-rc.0",
+ "eslint-plugin-react-refresh": "^0.4.9",
+ "eslint-plugin-storybook": "^0.11.0",
+ "eslint-plugin-testing-library": "^6.4.0",
+ "eslint-plugin-unused-imports": "^4.1.4",
+ "globals": "^15.10.0",
+ "husky": "^9.1.6",
+ "jsdom": "^25.0.1",
+ "msw": "^2.6.0",
+ "postcss": "^8.4.47",
+ "storybook": "^8.4.2",
+ "tailwindcss": "^3.4.13",
+ "typescript": "^5.5.3",
+ "typescript-eslint": "^8.8.1",
+ "vite": "^5.4.1",
+ "vitest": "^2.1.4"
+ }
+}
diff --git a/ui-v2/postcss.config.js b/ui-v2/postcss.config.js
new file mode 100644
index 000000000000..7b75c83aff1c
--- /dev/null
+++ b/ui-v2/postcss.config.js
@@ -0,0 +1,6 @@
+export default {
+ plugins: {
+ tailwindcss: {},
+ autoprefixer: {},
+ },
+};
diff --git a/ui-v2/public/ico/android-chrome-192x192.png b/ui-v2/public/ico/android-chrome-192x192.png
new file mode 100644
index 000000000000..f307ca127650
Binary files /dev/null and b/ui-v2/public/ico/android-chrome-192x192.png differ
diff --git a/ui-v2/public/ico/android-chrome-512x512.png b/ui-v2/public/ico/android-chrome-512x512.png
new file mode 100644
index 000000000000..bfeb13ea3fb0
Binary files /dev/null and b/ui-v2/public/ico/android-chrome-512x512.png differ
diff --git a/ui-v2/public/ico/apple-touch-icon.png b/ui-v2/public/ico/apple-touch-icon.png
new file mode 100644
index 000000000000..a0eb8e95b771
Binary files /dev/null and b/ui-v2/public/ico/apple-touch-icon.png differ
diff --git a/ui-v2/public/ico/browserconfig.xml b/ui-v2/public/ico/browserconfig.xml
new file mode 100644
index 000000000000..b3930d0f0471
--- /dev/null
+++ b/ui-v2/public/ico/browserconfig.xml
@@ -0,0 +1,9 @@
+
+
+
+
+
+ #da532c
+
+
+
diff --git a/ui-v2/public/ico/favicon-16x16-dark.png b/ui-v2/public/ico/favicon-16x16-dark.png
new file mode 100644
index 000000000000..6af87c17996f
Binary files /dev/null and b/ui-v2/public/ico/favicon-16x16-dark.png differ
diff --git a/ui-v2/public/ico/favicon-16x16.png b/ui-v2/public/ico/favicon-16x16.png
new file mode 100644
index 000000000000..737a0f1b249e
Binary files /dev/null and b/ui-v2/public/ico/favicon-16x16.png differ
diff --git a/ui-v2/public/ico/favicon-32x32-dark.png b/ui-v2/public/ico/favicon-32x32-dark.png
new file mode 100644
index 000000000000..859cb6c4c339
Binary files /dev/null and b/ui-v2/public/ico/favicon-32x32-dark.png differ
diff --git a/ui-v2/public/ico/favicon-32x32.png b/ui-v2/public/ico/favicon-32x32.png
new file mode 100644
index 000000000000..945c47af30c5
Binary files /dev/null and b/ui-v2/public/ico/favicon-32x32.png differ
diff --git a/ui-v2/public/ico/favicon-dark.ico b/ui-v2/public/ico/favicon-dark.ico
new file mode 100644
index 000000000000..665f48ad1189
Binary files /dev/null and b/ui-v2/public/ico/favicon-dark.ico differ
diff --git a/ui-v2/public/ico/favicon.ico b/ui-v2/public/ico/favicon.ico
new file mode 100644
index 000000000000..832e9fd18e32
Binary files /dev/null and b/ui-v2/public/ico/favicon.ico differ
diff --git a/ui-v2/public/ico/mstile-150x150.png b/ui-v2/public/ico/mstile-150x150.png
new file mode 100644
index 000000000000..5c87e11377cd
Binary files /dev/null and b/ui-v2/public/ico/mstile-150x150.png differ
diff --git a/ui-v2/public/ico/safari-pinned-tab.svg b/ui-v2/public/ico/safari-pinned-tab.svg
new file mode 100644
index 000000000000..b0059fae09bd
--- /dev/null
+++ b/ui-v2/public/ico/safari-pinned-tab.svg
@@ -0,0 +1,28 @@
+
+
+
+
+Created by potrace 1.14, written by Peter Selinger 2001-2017
+
+
+
+
+
diff --git a/ui-v2/public/ico/site.webmanifest b/ui-v2/public/ico/site.webmanifest
new file mode 100644
index 000000000000..f2443d9c0ae5
--- /dev/null
+++ b/ui-v2/public/ico/site.webmanifest
@@ -0,0 +1,19 @@
+{
+ "name": "",
+ "short_name": "",
+ "icons": [
+ {
+ "src": "/ico/android-chrome-192x192.png",
+ "sizes": "192x192",
+ "type": "image/png"
+ },
+ {
+ "src": "/ico/android-chrome-512x512.png",
+ "sizes": "512x512",
+ "type": "image/png"
+ }
+ ],
+ "theme_color": "#ffffff",
+ "background_color": "#ffffff",
+ "display": "standalone"
+}
diff --git a/ui-v2/src/api/prefect.ts b/ui-v2/src/api/prefect.ts
new file mode 100644
index 000000000000..f49fc08bb2ba
--- /dev/null
+++ b/ui-v2/src/api/prefect.ts
@@ -0,0 +1,15901 @@
+/**
+ * This file was auto-generated by openapi-typescript.
+ * Do not make direct changes to the file.
+ */
+
+export interface paths {
+ "/health": {
+ parameters: {
+ query?: never;
+ header?: never;
+ path?: never;
+ cookie?: never;
+ };
+ /** Health Check */
+ get: operations["health_check_health_get"];
+ put?: never;
+ post?: never;
+ delete?: never;
+ options?: never;
+ head?: never;
+ patch?: never;
+ trace?: never;
+ };
+ "/version": {
+ parameters: {
+ query?: never;
+ header?: never;
+ path?: never;
+ cookie?: never;
+ };
+ /** Server Version */
+ get: operations["server_version_version_get"];
+ put?: never;
+ post?: never;
+ delete?: never;
+ options?: never;
+ head?: never;
+ patch?: never;
+ trace?: never;
+ };
+ "/flows/": {
+ parameters: {
+ query?: never;
+ header?: never;
+ path?: never;
+ cookie?: never;
+ };
+ get?: never;
+ put?: never;
+ /**
+ * Create Flow
+ * @description Gracefully creates a new flow from the provided schema. If a flow with the
+ * same name already exists, the existing flow is returned.
+ */
+ post: operations["create_flow_flows__post"];
+ delete?: never;
+ options?: never;
+ head?: never;
+ patch?: never;
+ trace?: never;
+ };
+ "/flows/{id}": {
+ parameters: {
+ query?: never;
+ header?: never;
+ path?: never;
+ cookie?: never;
+ };
+ /**
+ * Read Flow
+ * @description Get a flow by id.
+ */
+ get: operations["read_flow_flows__id__get"];
+ put?: never;
+ post?: never;
+ /**
+ * Delete Flow
+ * @description Delete a flow by id.
+ */
+ delete: operations["delete_flow_flows__id__delete"];
+ options?: never;
+ head?: never;
+ /**
+ * Update Flow
+ * @description Updates a flow.
+ */
+ patch: operations["update_flow_flows__id__patch"];
+ trace?: never;
+ };
+ "/flows/count": {
+ parameters: {
+ query?: never;
+ header?: never;
+ path?: never;
+ cookie?: never;
+ };
+ get?: never;
+ put?: never;
+ /**
+ * Count Flows
+ * @description Count flows.
+ */
+ post: operations["count_flows_flows_count_post"];
+ delete?: never;
+ options?: never;
+ head?: never;
+ patch?: never;
+ trace?: never;
+ };
+ "/flows/name/{name}": {
+ parameters: {
+ query?: never;
+ header?: never;
+ path?: never;
+ cookie?: never;
+ };
+ /**
+ * Read Flow By Name
+ * @description Get a flow by name.
+ */
+ get: operations["read_flow_by_name_flows_name__name__get"];
+ put?: never;
+ post?: never;
+ delete?: never;
+ options?: never;
+ head?: never;
+ patch?: never;
+ trace?: never;
+ };
+ "/flows/filter": {
+ parameters: {
+ query?: never;
+ header?: never;
+ path?: never;
+ cookie?: never;
+ };
+ get?: never;
+ put?: never;
+ /**
+ * Read Flows
+ * @description Query for flows.
+ */
+ post: operations["read_flows_flows_filter_post"];
+ delete?: never;
+ options?: never;
+ head?: never;
+ patch?: never;
+ trace?: never;
+ };
+ "/flows/paginate": {
+ parameters: {
+ query?: never;
+ header?: never;
+ path?: never;
+ cookie?: never;
+ };
+ get?: never;
+ put?: never;
+ /**
+ * Paginate Flows
+ * @description Pagination query for flows.
+ */
+ post: operations["paginate_flows_flows_paginate_post"];
+ delete?: never;
+ options?: never;
+ head?: never;
+ patch?: never;
+ trace?: never;
+ };
+ "/flow_runs/": {
+ parameters: {
+ query?: never;
+ header?: never;
+ path?: never;
+ cookie?: never;
+ };
+ get?: never;
+ put?: never;
+ /**
+ * Create Flow Run
+ * @description Create a flow run. If a flow run with the same flow_id and
+ * idempotency key already exists, the existing flow run will be returned.
+ *
+ * If no state is provided, the flow run will be created in a PENDING state.
+ */
+ post: operations["create_flow_run_flow_runs__post"];
+ delete?: never;
+ options?: never;
+ head?: never;
+ patch?: never;
+ trace?: never;
+ };
+ "/flow_runs/{id}": {
+ parameters: {
+ query?: never;
+ header?: never;
+ path?: never;
+ cookie?: never;
+ };
+ /**
+ * Read Flow Run
+ * @description Get a flow run by id.
+ */
+ get: operations["read_flow_run_flow_runs__id__get"];
+ put?: never;
+ post?: never;
+ /**
+ * Delete Flow Run
+ * @description Delete a flow run by id.
+ */
+ delete: operations["delete_flow_run_flow_runs__id__delete"];
+ options?: never;
+ head?: never;
+ /**
+ * Update Flow Run
+ * @description Updates a flow run.
+ */
+ patch: operations["update_flow_run_flow_runs__id__patch"];
+ trace?: never;
+ };
+ "/flow_runs/count": {
+ parameters: {
+ query?: never;
+ header?: never;
+ path?: never;
+ cookie?: never;
+ };
+ get?: never;
+ put?: never;
+ /**
+ * Count Flow Runs
+ * @description Query for flow runs.
+ */
+ post: operations["count_flow_runs_flow_runs_count_post"];
+ delete?: never;
+ options?: never;
+ head?: never;
+ patch?: never;
+ trace?: never;
+ };
+ "/flow_runs/lateness": {
+ parameters: {
+ query?: never;
+ header?: never;
+ path?: never;
+ cookie?: never;
+ };
+ get?: never;
+ put?: never;
+ /**
+ * Average Flow Run Lateness
+ * @description Query for average flow-run lateness in seconds.
+ */
+ post: operations["average_flow_run_lateness_flow_runs_lateness_post"];
+ delete?: never;
+ options?: never;
+ head?: never;
+ patch?: never;
+ trace?: never;
+ };
+ "/flow_runs/history": {
+ parameters: {
+ query?: never;
+ header?: never;
+ path?: never;
+ cookie?: never;
+ };
+ get?: never;
+ put?: never;
+ /**
+ * Flow Run History
+ * @description Query for flow run history data across a given range and interval.
+ */
+ post: operations["flow_run_history_flow_runs_history_post"];
+ delete?: never;
+ options?: never;
+ head?: never;
+ patch?: never;
+ trace?: never;
+ };
+ "/flow_runs/{id}/graph": {
+ parameters: {
+ query?: never;
+ header?: never;
+ path?: never;
+ cookie?: never;
+ };
+ /**
+ * Read Flow Run Graph V1
+ * @description Get a task run dependency map for a given flow run.
+ */
+ get: operations["read_flow_run_graph_v1_flow_runs__id__graph_get"];
+ put?: never;
+ post?: never;
+ delete?: never;
+ options?: never;
+ head?: never;
+ patch?: never;
+ trace?: never;
+ };
+ "/flow_runs/{id}/graph-v2": {
+ parameters: {
+ query?: never;
+ header?: never;
+ path?: never;
+ cookie?: never;
+ };
+ /**
+ * Read Flow Run Graph V2
+ * @description Get a graph of the tasks and subflow runs for the given flow run
+ */
+ get: operations["read_flow_run_graph_v2_flow_runs__id__graph_v2_get"];
+ put?: never;
+ post?: never;
+ delete?: never;
+ options?: never;
+ head?: never;
+ patch?: never;
+ trace?: never;
+ };
+ "/flow_runs/{id}/resume": {
+ parameters: {
+ query?: never;
+ header?: never;
+ path?: never;
+ cookie?: never;
+ };
+ get?: never;
+ put?: never;
+ /**
+ * Resume Flow Run
+ * @description Resume a paused flow run.
+ */
+ post: operations["resume_flow_run_flow_runs__id__resume_post"];
+ delete?: never;
+ options?: never;
+ head?: never;
+ patch?: never;
+ trace?: never;
+ };
+ "/flow_runs/filter": {
+ parameters: {
+ query?: never;
+ header?: never;
+ path?: never;
+ cookie?: never;
+ };
+ get?: never;
+ put?: never;
+ /**
+ * Read Flow Runs
+ * @description Query for flow runs.
+ */
+ post: operations["read_flow_runs_flow_runs_filter_post"];
+ delete?: never;
+ options?: never;
+ head?: never;
+ patch?: never;
+ trace?: never;
+ };
+ "/flow_runs/{id}/set_state": {
+ parameters: {
+ query?: never;
+ header?: never;
+ path?: never;
+ cookie?: never;
+ };
+ get?: never;
+ put?: never;
+ /**
+ * Set Flow Run State
+ * @description Set a flow run state, invoking any orchestration rules.
+ */
+ post: operations["set_flow_run_state_flow_runs__id__set_state_post"];
+ delete?: never;
+ options?: never;
+ head?: never;
+ patch?: never;
+ trace?: never;
+ };
+ "/flow_runs/{id}/input": {
+ parameters: {
+ query?: never;
+ header?: never;
+ path?: never;
+ cookie?: never;
+ };
+ get?: never;
+ put?: never;
+ /**
+ * Create Flow Run Input
+ * @description Create a key/value input for a flow run.
+ */
+ post: operations["create_flow_run_input_flow_runs__id__input_post"];
+ delete?: never;
+ options?: never;
+ head?: never;
+ patch?: never;
+ trace?: never;
+ };
+ "/flow_runs/{id}/input/filter": {
+ parameters: {
+ query?: never;
+ header?: never;
+ path?: never;
+ cookie?: never;
+ };
+ get?: never;
+ put?: never;
+ /**
+ * Filter Flow Run Input
+ * @description Filter flow run inputs by key prefix
+ */
+ post: operations["filter_flow_run_input_flow_runs__id__input_filter_post"];
+ delete?: never;
+ options?: never;
+ head?: never;
+ patch?: never;
+ trace?: never;
+ };
+ "/flow_runs/{id}/input/{key}": {
+ parameters: {
+ query?: never;
+ header?: never;
+ path?: never;
+ cookie?: never;
+ };
+ /**
+ * Read Flow Run Input
+ * @description Create a value from a flow run input
+ */
+ get: operations["read_flow_run_input_flow_runs__id__input__key__get"];
+ put?: never;
+ post?: never;
+ /**
+ * Delete Flow Run Input
+ * @description Delete a flow run input
+ */
+ delete: operations["delete_flow_run_input_flow_runs__id__input__key__delete"];
+ options?: never;
+ head?: never;
+ patch?: never;
+ trace?: never;
+ };
+ "/flow_runs/paginate": {
+ parameters: {
+ query?: never;
+ header?: never;
+ path?: never;
+ cookie?: never;
+ };
+ get?: never;
+ put?: never;
+ /**
+ * Paginate Flow Runs
+ * @description Pagination query for flow runs.
+ */
+ post: operations["paginate_flow_runs_flow_runs_paginate_post"];
+ delete?: never;
+ options?: never;
+ head?: never;
+ patch?: never;
+ trace?: never;
+ };
+ "/flow_runs/{id}/logs/download": {
+ parameters: {
+ query?: never;
+ header?: never;
+ path?: never;
+ cookie?: never;
+ };
+ /**
+ * Download Logs
+ * @description Download all flow run logs as a CSV file, collecting all logs until there are no more logs to retrieve.
+ */
+ get: operations["download_logs_flow_runs__id__logs_download_get"];
+ put?: never;
+ post?: never;
+ delete?: never;
+ options?: never;
+ head?: never;
+ patch?: never;
+ trace?: never;
+ };
+ "/task_runs/": {
+ parameters: {
+ query?: never;
+ header?: never;
+ path?: never;
+ cookie?: never;
+ };
+ get?: never;
+ put?: never;
+ /**
+ * Create Task Run
+ * @description Create a task run. If a task run with the same flow_run_id,
+ * task_key, and dynamic_key already exists, the existing task
+ * run will be returned.
+ *
+ * If no state is provided, the task run will be created in a PENDING state.
+ */
+ post: operations["create_task_run_task_runs__post"];
+ delete?: never;
+ options?: never;
+ head?: never;
+ patch?: never;
+ trace?: never;
+ };
+ "/task_runs/{id}": {
+ parameters: {
+ query?: never;
+ header?: never;
+ path?: never;
+ cookie?: never;
+ };
+ /**
+ * Read Task Run
+ * @description Get a task run by id.
+ */
+ get: operations["read_task_run_task_runs__id__get"];
+ put?: never;
+ post?: never;
+ /**
+ * Delete Task Run
+ * @description Delete a task run by id.
+ */
+ delete: operations["delete_task_run_task_runs__id__delete"];
+ options?: never;
+ head?: never;
+ /**
+ * Update Task Run
+ * @description Updates a task run.
+ */
+ patch: operations["update_task_run_task_runs__id__patch"];
+ trace?: never;
+ };
+ "/task_runs/count": {
+ parameters: {
+ query?: never;
+ header?: never;
+ path?: never;
+ cookie?: never;
+ };
+ get?: never;
+ put?: never;
+ /**
+ * Count Task Runs
+ * @description Count task runs.
+ */
+ post: operations["count_task_runs_task_runs_count_post"];
+ delete?: never;
+ options?: never;
+ head?: never;
+ patch?: never;
+ trace?: never;
+ };
+ "/task_runs/history": {
+ parameters: {
+ query?: never;
+ header?: never;
+ path?: never;
+ cookie?: never;
+ };
+ get?: never;
+ put?: never;
+ /**
+ * Task Run History
+ * @description Query for task run history data across a given range and interval.
+ */
+ post: operations["task_run_history_task_runs_history_post"];
+ delete?: never;
+ options?: never;
+ head?: never;
+ patch?: never;
+ trace?: never;
+ };
+ "/task_runs/filter": {
+ parameters: {
+ query?: never;
+ header?: never;
+ path?: never;
+ cookie?: never;
+ };
+ get?: never;
+ put?: never;
+ /**
+ * Read Task Runs
+ * @description Query for task runs.
+ */
+ post: operations["read_task_runs_task_runs_filter_post"];
+ delete?: never;
+ options?: never;
+ head?: never;
+ patch?: never;
+ trace?: never;
+ };
+ "/task_runs/{id}/set_state": {
+ parameters: {
+ query?: never;
+ header?: never;
+ path?: never;
+ cookie?: never;
+ };
+ get?: never;
+ put?: never;
+ /**
+ * Set Task Run State
+ * @description Set a task run state, invoking any orchestration rules.
+ */
+ post: operations["set_task_run_state_task_runs__id__set_state_post"];
+ delete?: never;
+ options?: never;
+ head?: never;
+ patch?: never;
+ trace?: never;
+ };
+ "/flow_run_states/{id}": {
+ parameters: {
+ query?: never;
+ header?: never;
+ path?: never;
+ cookie?: never;
+ };
+ /**
+ * Read Flow Run State
+ * @description Get a flow run state by id.
+ */
+ get: operations["read_flow_run_state_flow_run_states__id__get"];
+ put?: never;
+ post?: never;
+ delete?: never;
+ options?: never;
+ head?: never;
+ patch?: never;
+ trace?: never;
+ };
+ "/flow_run_states/": {
+ parameters: {
+ query?: never;
+ header?: never;
+ path?: never;
+ cookie?: never;
+ };
+ /**
+ * Read Flow Run States
+ * @description Get states associated with a flow run.
+ */
+ get: operations["read_flow_run_states_flow_run_states__get"];
+ put?: never;
+ post?: never;
+ delete?: never;
+ options?: never;
+ head?: never;
+ patch?: never;
+ trace?: never;
+ };
+ "/task_run_states/{id}": {
+ parameters: {
+ query?: never;
+ header?: never;
+ path?: never;
+ cookie?: never;
+ };
+ /**
+ * Read Task Run State
+ * @description Get a task run state by id.
+ */
+ get: operations["read_task_run_state_task_run_states__id__get"];
+ put?: never;
+ post?: never;
+ delete?: never;
+ options?: never;
+ head?: never;
+ patch?: never;
+ trace?: never;
+ };
+ "/task_run_states/": {
+ parameters: {
+ query?: never;
+ header?: never;
+ path?: never;
+ cookie?: never;
+ };
+ /**
+ * Read Task Run States
+ * @description Get states associated with a task run.
+ */
+ get: operations["read_task_run_states_task_run_states__get"];
+ put?: never;
+ post?: never;
+ delete?: never;
+ options?: never;
+ head?: never;
+ patch?: never;
+ trace?: never;
+ };
+ "/flow_run_notification_policies/": {
+ parameters: {
+ query?: never;
+ header?: never;
+ path?: never;
+ cookie?: never;
+ };
+ get?: never;
+ put?: never;
+ /**
+ * Create Flow Run Notification Policy
+ * @description Creates a new flow run notification policy.
+ */
+ post: operations["create_flow_run_notification_policy_flow_run_notification_policies__post"];
+ delete?: never;
+ options?: never;
+ head?: never;
+ patch?: never;
+ trace?: never;
+ };
+ "/flow_run_notification_policies/{id}": {
+ parameters: {
+ query?: never;
+ header?: never;
+ path?: never;
+ cookie?: never;
+ };
+ /**
+ * Read Flow Run Notification Policy
+ * @description Get a flow run notification policy by id.
+ */
+ get: operations["read_flow_run_notification_policy_flow_run_notification_policies__id__get"];
+ put?: never;
+ post?: never;
+ /**
+ * Delete Flow Run Notification Policy
+ * @description Delete a flow run notification policy by id.
+ */
+ delete: operations["delete_flow_run_notification_policy_flow_run_notification_policies__id__delete"];
+ options?: never;
+ head?: never;
+ /**
+ * Update Flow Run Notification Policy
+ * @description Updates an existing flow run notification policy.
+ */
+ patch: operations["update_flow_run_notification_policy_flow_run_notification_policies__id__patch"];
+ trace?: never;
+ };
+ "/flow_run_notification_policies/filter": {
+ parameters: {
+ query?: never;
+ header?: never;
+ path?: never;
+ cookie?: never;
+ };
+ get?: never;
+ put?: never;
+ /**
+ * Read Flow Run Notification Policies
+ * @description Query for flow run notification policies.
+ */
+ post: operations["read_flow_run_notification_policies_flow_run_notification_policies_filter_post"];
+ delete?: never;
+ options?: never;
+ head?: never;
+ patch?: never;
+ trace?: never;
+ };
+ "/deployments/": {
+ parameters: {
+ query?: never;
+ header?: never;
+ path?: never;
+ cookie?: never;
+ };
+ get?: never;
+ put?: never;
+ /**
+ * Create Deployment
+ * @description Gracefully creates a new deployment from the provided schema. If a deployment with
+ * the same name and flow_id already exists, the deployment is updated.
+ *
+ * If the deployment has an active schedule, flow runs will be scheduled.
+ * When upserting, any scheduled runs from the existing deployment will be deleted.
+ */
+ post: operations["create_deployment_deployments__post"];
+ delete?: never;
+ options?: never;
+ head?: never;
+ patch?: never;
+ trace?: never;
+ };
+ "/deployments/{id}": {
+ parameters: {
+ query?: never;
+ header?: never;
+ path?: never;
+ cookie?: never;
+ };
+ /**
+ * Read Deployment
+ * @description Get a deployment by id.
+ */
+ get: operations["read_deployment_deployments__id__get"];
+ put?: never;
+ post?: never;
+ /**
+ * Delete Deployment
+ * @description Delete a deployment by id.
+ */
+ delete: operations["delete_deployment_deployments__id__delete"];
+ options?: never;
+ head?: never;
+ /** Update Deployment */
+ patch: operations["update_deployment_deployments__id__patch"];
+ trace?: never;
+ };
+ "/deployments/name/{flow_name}/{deployment_name}": {
+ parameters: {
+ query?: never;
+ header?: never;
+ path?: never;
+ cookie?: never;
+ };
+ /**
+ * Read Deployment By Name
+ * @description Get a deployment using the name of the flow and the deployment.
+ */
+ get: operations["read_deployment_by_name_deployments_name__flow_name___deployment_name__get"];
+ put?: never;
+ post?: never;
+ delete?: never;
+ options?: never;
+ head?: never;
+ patch?: never;
+ trace?: never;
+ };
+ "/deployments/filter": {
+ parameters: {
+ query?: never;
+ header?: never;
+ path?: never;
+ cookie?: never;
+ };
+ get?: never;
+ put?: never;
+ /**
+ * Read Deployments
+ * @description Query for deployments.
+ */
+ post: operations["read_deployments_deployments_filter_post"];
+ delete?: never;
+ options?: never;
+ head?: never;
+ patch?: never;
+ trace?: never;
+ };
+ "/deployments/paginate": {
+ parameters: {
+ query?: never;
+ header?: never;
+ path?: never;
+ cookie?: never;
+ };
+ get?: never;
+ put?: never;
+ /**
+ * Paginate Deployments
+ * @description Pagination query for flow runs.
+ */
+ post: operations["paginate_deployments_deployments_paginate_post"];
+ delete?: never;
+ options?: never;
+ head?: never;
+ patch?: never;
+ trace?: never;
+ };
+ "/deployments/get_scheduled_flow_runs": {
+ parameters: {
+ query?: never;
+ header?: never;
+ path?: never;
+ cookie?: never;
+ };
+ get?: never;
+ put?: never;
+ /**
+ * Get Scheduled Flow Runs For Deployments
+ * @description Get scheduled runs for a set of deployments. Used by a runner to poll for work.
+ */
+ post: operations["get_scheduled_flow_runs_for_deployments_deployments_get_scheduled_flow_runs_post"];
+ delete?: never;
+ options?: never;
+ head?: never;
+ patch?: never;
+ trace?: never;
+ };
+ "/deployments/count": {
+ parameters: {
+ query?: never;
+ header?: never;
+ path?: never;
+ cookie?: never;
+ };
+ get?: never;
+ put?: never;
+ /**
+ * Count Deployments
+ * @description Count deployments.
+ */
+ post: operations["count_deployments_deployments_count_post"];
+ delete?: never;
+ options?: never;
+ head?: never;
+ patch?: never;
+ trace?: never;
+ };
+ "/deployments/{id}/schedule": {
+ parameters: {
+ query?: never;
+ header?: never;
+ path?: never;
+ cookie?: never;
+ };
+ get?: never;
+ put?: never;
+ /**
+ * Schedule Deployment
+ * @description Schedule runs for a deployment. For backfills, provide start/end times in the past.
+ *
+ * This function will generate the minimum number of runs that satisfy the min
+ * and max times, and the min and max counts. Specifically, the following order
+ * will be respected.
+ *
+ * - Runs will be generated starting on or after the `start_time`
+ * - No more than `max_runs` runs will be generated
+ * - No runs will be generated after `end_time` is reached
+ * - At least `min_runs` runs will be generated
+ * - Runs will be generated until at least `start_time + min_time` is reached
+ */
+ post: operations["schedule_deployment_deployments__id__schedule_post"];
+ delete?: never;
+ options?: never;
+ head?: never;
+ patch?: never;
+ trace?: never;
+ };
+ "/deployments/{id}/resume_deployment": {
+ parameters: {
+ query?: never;
+ header?: never;
+ path?: never;
+ cookie?: never;
+ };
+ get?: never;
+ put?: never;
+ /**
+ * Resume Deployment
+ * @description Set a deployment schedule to active. Runs will be scheduled immediately.
+ */
+ post: operations["resume_deployment_deployments__id__resume_deployment_post"];
+ delete?: never;
+ options?: never;
+ head?: never;
+ patch?: never;
+ trace?: never;
+ };
+ "/deployments/{id}/pause_deployment": {
+ parameters: {
+ query?: never;
+ header?: never;
+ path?: never;
+ cookie?: never;
+ };
+ get?: never;
+ put?: never;
+ /**
+ * Pause Deployment
+ * @description Set a deployment schedule to inactive. Any auto-scheduled runs still in a Scheduled
+ * state will be deleted.
+ */
+ post: operations["pause_deployment_deployments__id__pause_deployment_post"];
+ delete?: never;
+ options?: never;
+ head?: never;
+ patch?: never;
+ trace?: never;
+ };
+ "/deployments/{id}/create_flow_run": {
+ parameters: {
+ query?: never;
+ header?: never;
+ path?: never;
+ cookie?: never;
+ };
+ get?: never;
+ put?: never;
+ /**
+ * Create Flow Run From Deployment
+ * @description Create a flow run from a deployment.
+ *
+ * Any parameters not provided will be inferred from the deployment's parameters.
+ * If tags are not provided, the deployment's tags will be used.
+ *
+ * If no state is provided, the flow run will be created in a SCHEDULED state.
+ */
+ post: operations["create_flow_run_from_deployment_deployments__id__create_flow_run_post"];
+ delete?: never;
+ options?: never;
+ head?: never;
+ patch?: never;
+ trace?: never;
+ };
+ "/deployments/{id}/work_queue_check": {
+ parameters: {
+ query?: never;
+ header?: never;
+ path?: never;
+ cookie?: never;
+ };
+ /**
+ * Work Queue Check For Deployment
+ * @deprecated
+ * @description Get list of work-queues that are able to pick up the specified deployment.
+ *
+ * This endpoint is intended to be used by the UI to provide users warnings
+ * about deployments that are unable to be executed because there are no work
+ * queues that will pick up their runs, based on existing filter criteria. It
+ * may be deprecated in the future because there is not a strict relationship
+ * between work queues and deployments.
+ */
+ get: operations["work_queue_check_for_deployment_deployments__id__work_queue_check_get"];
+ put?: never;
+ post?: never;
+ delete?: never;
+ options?: never;
+ head?: never;
+ patch?: never;
+ trace?: never;
+ };
+ "/deployments/{id}/schedules": {
+ parameters: {
+ query?: never;
+ header?: never;
+ path?: never;
+ cookie?: never;
+ };
+ /** Read Deployment Schedules */
+ get: operations["read_deployment_schedules_deployments__id__schedules_get"];
+ put?: never;
+ /** Create Deployment Schedules */
+ post: operations["create_deployment_schedules_deployments__id__schedules_post"];
+ delete?: never;
+ options?: never;
+ head?: never;
+ patch?: never;
+ trace?: never;
+ };
+ "/deployments/{id}/schedules/{schedule_id}": {
+ parameters: {
+ query?: never;
+ header?: never;
+ path?: never;
+ cookie?: never;
+ };
+ get?: never;
+ put?: never;
+ post?: never;
+ /** Delete Deployment Schedule */
+ delete: operations["delete_deployment_schedule_deployments__id__schedules__schedule_id__delete"];
+ options?: never;
+ head?: never;
+ /** Update Deployment Schedule */
+ patch: operations["update_deployment_schedule_deployments__id__schedules__schedule_id__patch"];
+ trace?: never;
+ };
+ "/saved_searches/": {
+ parameters: {
+ query?: never;
+ header?: never;
+ path?: never;
+ cookie?: never;
+ };
+ get?: never;
+ /**
+ * Create Saved Search
+ * @description Gracefully creates a new saved search from the provided schema.
+ *
+ * If a saved search with the same name already exists, the saved search's fields are
+ * replaced.
+ */
+ put: operations["create_saved_search_saved_searches__put"];
+ post?: never;
+ delete?: never;
+ options?: never;
+ head?: never;
+ patch?: never;
+ trace?: never;
+ };
+ "/saved_searches/{id}": {
+ parameters: {
+ query?: never;
+ header?: never;
+ path?: never;
+ cookie?: never;
+ };
+ /**
+ * Read Saved Search
+ * @description Get a saved search by id.
+ */
+ get: operations["read_saved_search_saved_searches__id__get"];
+ put?: never;
+ post?: never;
+ /**
+ * Delete Saved Search
+ * @description Delete a saved search by id.
+ */
+ delete: operations["delete_saved_search_saved_searches__id__delete"];
+ options?: never;
+ head?: never;
+ patch?: never;
+ trace?: never;
+ };
+ "/saved_searches/filter": {
+ parameters: {
+ query?: never;
+ header?: never;
+ path?: never;
+ cookie?: never;
+ };
+ get?: never;
+ put?: never;
+ /**
+ * Read Saved Searches
+ * @description Query for saved searches.
+ */
+ post: operations["read_saved_searches_saved_searches_filter_post"];
+ delete?: never;
+ options?: never;
+ head?: never;
+ patch?: never;
+ trace?: never;
+ };
+ "/logs/": {
+ parameters: {
+ query?: never;
+ header?: never;
+ path?: never;
+ cookie?: never;
+ };
+ get?: never;
+ put?: never;
+ /**
+ * Create Logs
+ * @description Create new logs from the provided schema.
+ */
+ post: operations["create_logs_logs__post"];
+ delete?: never;
+ options?: never;
+ head?: never;
+ patch?: never;
+ trace?: never;
+ };
+ "/logs/filter": {
+ parameters: {
+ query?: never;
+ header?: never;
+ path?: never;
+ cookie?: never;
+ };
+ get?: never;
+ put?: never;
+ /**
+ * Read Logs
+ * @description Query for logs.
+ */
+ post: operations["read_logs_logs_filter_post"];
+ delete?: never;
+ options?: never;
+ head?: never;
+ patch?: never;
+ trace?: never;
+ };
+ "/concurrency_limits/": {
+ parameters: {
+ query?: never;
+ header?: never;
+ path?: never;
+ cookie?: never;
+ };
+ get?: never;
+ put?: never;
+ /** Create Concurrency Limit */
+ post: operations["create_concurrency_limit_concurrency_limits__post"];
+ delete?: never;
+ options?: never;
+ head?: never;
+ patch?: never;
+ trace?: never;
+ };
+ "/concurrency_limits/{id}": {
+ parameters: {
+ query?: never;
+ header?: never;
+ path?: never;
+ cookie?: never;
+ };
+ /**
+ * Read Concurrency Limit
+ * @description Get a concurrency limit by id.
+ *
+ * The `active slots` field contains a list of TaskRun IDs currently using a
+ * concurrency slot for the specified tag.
+ */
+ get: operations["read_concurrency_limit_concurrency_limits__id__get"];
+ put?: never;
+ post?: never;
+ /** Delete Concurrency Limit */
+ delete: operations["delete_concurrency_limit_concurrency_limits__id__delete"];
+ options?: never;
+ head?: never;
+ patch?: never;
+ trace?: never;
+ };
+ "/concurrency_limits/tag/{tag}": {
+ parameters: {
+ query?: never;
+ header?: never;
+ path?: never;
+ cookie?: never;
+ };
+ /**
+ * Read Concurrency Limit By Tag
+ * @description Get a concurrency limit by tag.
+ *
+ * The `active slots` field contains a list of TaskRun IDs currently using a
+ * concurrency slot for the specified tag.
+ */
+ get: operations["read_concurrency_limit_by_tag_concurrency_limits_tag__tag__get"];
+ put?: never;
+ post?: never;
+ /** Delete Concurrency Limit By Tag */
+ delete: operations["delete_concurrency_limit_by_tag_concurrency_limits_tag__tag__delete"];
+ options?: never;
+ head?: never;
+ patch?: never;
+ trace?: never;
+ };
+ "/concurrency_limits/filter": {
+ parameters: {
+ query?: never;
+ header?: never;
+ path?: never;
+ cookie?: never;
+ };
+ get?: never;
+ put?: never;
+ /**
+ * Read Concurrency Limits
+ * @description Query for concurrency limits.
+ *
+ * For each concurrency limit the `active slots` field contains a list of TaskRun IDs
+ * currently using a concurrency slot for the specified tag.
+ */
+ post: operations["read_concurrency_limits_concurrency_limits_filter_post"];
+ delete?: never;
+ options?: never;
+ head?: never;
+ patch?: never;
+ trace?: never;
+ };
+ "/concurrency_limits/tag/{tag}/reset": {
+ parameters: {
+ query?: never;
+ header?: never;
+ path?: never;
+ cookie?: never;
+ };
+ get?: never;
+ put?: never;
+ /** Reset Concurrency Limit By Tag */
+ post: operations["reset_concurrency_limit_by_tag_concurrency_limits_tag__tag__reset_post"];
+ delete?: never;
+ options?: never;
+ head?: never;
+ patch?: never;
+ trace?: never;
+ };
+ "/concurrency_limits/increment": {
+ parameters: {
+ query?: never;
+ header?: never;
+ path?: never;
+ cookie?: never;
+ };
+ get?: never;
+ put?: never;
+ /** Increment Concurrency Limits V1 */
+ post: operations["increment_concurrency_limits_v1_concurrency_limits_increment_post"];
+ delete?: never;
+ options?: never;
+ head?: never;
+ patch?: never;
+ trace?: never;
+ };
+ "/concurrency_limits/decrement": {
+ parameters: {
+ query?: never;
+ header?: never;
+ path?: never;
+ cookie?: never;
+ };
+ get?: never;
+ put?: never;
+ /** Decrement Concurrency Limits V1 */
+ post: operations["decrement_concurrency_limits_v1_concurrency_limits_decrement_post"];
+ delete?: never;
+ options?: never;
+ head?: never;
+ patch?: never;
+ trace?: never;
+ };
+ "/v2/concurrency_limits/": {
+ parameters: {
+ query?: never;
+ header?: never;
+ path?: never;
+ cookie?: never;
+ };
+ get?: never;
+ put?: never;
+ /** Create Concurrency Limit V2 */
+ post: operations["create_concurrency_limit_v2_v2_concurrency_limits__post"];
+ delete?: never;
+ options?: never;
+ head?: never;
+ patch?: never;
+ trace?: never;
+ };
+ "/v2/concurrency_limits/{id_or_name}": {
+ parameters: {
+ query?: never;
+ header?: never;
+ path?: never;
+ cookie?: never;
+ };
+ /** Read Concurrency Limit V2 */
+ get: operations["read_concurrency_limit_v2_v2_concurrency_limits__id_or_name__get"];
+ put?: never;
+ post?: never;
+ /** Delete Concurrency Limit V2 */
+ delete: operations["delete_concurrency_limit_v2_v2_concurrency_limits__id_or_name__delete"];
+ options?: never;
+ head?: never;
+ /** Update Concurrency Limit V2 */
+ patch: operations["update_concurrency_limit_v2_v2_concurrency_limits__id_or_name__patch"];
+ trace?: never;
+ };
+ "/v2/concurrency_limits/filter": {
+ parameters: {
+ query?: never;
+ header?: never;
+ path?: never;
+ cookie?: never;
+ };
+ get?: never;
+ put?: never;
+ /** Read All Concurrency Limits V2 */
+ post: operations["read_all_concurrency_limits_v2_v2_concurrency_limits_filter_post"];
+ delete?: never;
+ options?: never;
+ head?: never;
+ patch?: never;
+ trace?: never;
+ };
+ "/v2/concurrency_limits/increment": {
+ parameters: {
+ query?: never;
+ header?: never;
+ path?: never;
+ cookie?: never;
+ };
+ get?: never;
+ put?: never;
+ /** Bulk Increment Active Slots */
+ post: operations["bulk_increment_active_slots_v2_concurrency_limits_increment_post"];
+ delete?: never;
+ options?: never;
+ head?: never;
+ patch?: never;
+ trace?: never;
+ };
+ "/v2/concurrency_limits/decrement": {
+ parameters: {
+ query?: never;
+ header?: never;
+ path?: never;
+ cookie?: never;
+ };
+ get?: never;
+ put?: never;
+ /** Bulk Decrement Active Slots */
+ post: operations["bulk_decrement_active_slots_v2_concurrency_limits_decrement_post"];
+ delete?: never;
+ options?: never;
+ head?: never;
+ patch?: never;
+ trace?: never;
+ };
+ "/block_types/": {
+ parameters: {
+ query?: never;
+ header?: never;
+ path?: never;
+ cookie?: never;
+ };
+ get?: never;
+ put?: never;
+ /**
+ * Create Block Type
+ * @description Create a new block type
+ */
+ post: operations["create_block_type_block_types__post"];
+ delete?: never;
+ options?: never;
+ head?: never;
+ patch?: never;
+ trace?: never;
+ };
+ "/block_types/{id}": {
+ parameters: {
+ query?: never;
+ header?: never;
+ path?: never;
+ cookie?: never;
+ };
+ /**
+ * Read Block Type By Id
+ * @description Get a block type by ID.
+ */
+ get: operations["read_block_type_by_id_block_types__id__get"];
+ put?: never;
+ post?: never;
+ /** Delete Block Type */
+ delete: operations["delete_block_type_block_types__id__delete"];
+ options?: never;
+ head?: never;
+ /**
+ * Update Block Type
+ * @description Update a block type.
+ */
+ patch: operations["update_block_type_block_types__id__patch"];
+ trace?: never;
+ };
+ "/block_types/slug/{slug}": {
+ parameters: {
+ query?: never;
+ header?: never;
+ path?: never;
+ cookie?: never;
+ };
+ /**
+ * Read Block Type By Slug
+ * @description Get a block type by name.
+ */
+ get: operations["read_block_type_by_slug_block_types_slug__slug__get"];
+ put?: never;
+ post?: never;
+ delete?: never;
+ options?: never;
+ head?: never;
+ patch?: never;
+ trace?: never;
+ };
+ "/block_types/filter": {
+ parameters: {
+ query?: never;
+ header?: never;
+ path?: never;
+ cookie?: never;
+ };
+ get?: never;
+ put?: never;
+ /**
+ * Read Block Types
+ * @description Gets all block types. Optionally limit return with limit and offset.
+ */
+ post: operations["read_block_types_block_types_filter_post"];
+ delete?: never;
+ options?: never;
+ head?: never;
+ patch?: never;
+ trace?: never;
+ };
+ "/block_types/slug/{slug}/block_documents": {
+ parameters: {
+ query?: never;
+ header?: never;
+ path?: never;
+ cookie?: never;
+ };
+ /** Read Block Documents For Block Type */
+ get: operations["read_block_documents_for_block_type_block_types_slug__slug__block_documents_get"];
+ put?: never;
+ post?: never;
+ delete?: never;
+ options?: never;
+ head?: never;
+ patch?: never;
+ trace?: never;
+ };
+ "/block_types/slug/{slug}/block_documents/name/{block_document_name}": {
+ parameters: {
+ query?: never;
+ header?: never;
+ path?: never;
+ cookie?: never;
+ };
+ /** Read Block Document By Name For Block Type */
+ get: operations["read_block_document_by_name_for_block_type_block_types_slug__slug__block_documents_name__block_document_name__get"];
+ put?: never;
+ post?: never;
+ delete?: never;
+ options?: never;
+ head?: never;
+ patch?: never;
+ trace?: never;
+ };
+ "/block_types/install_system_block_types": {
+ parameters: {
+ query?: never;
+ header?: never;
+ path?: never;
+ cookie?: never;
+ };
+ get?: never;
+ put?: never;
+ /** Install System Block Types */
+ post: operations["install_system_block_types_block_types_install_system_block_types_post"];
+ delete?: never;
+ options?: never;
+ head?: never;
+ patch?: never;
+ trace?: never;
+ };
+ "/block_documents/": {
+ parameters: {
+ query?: never;
+ header?: never;
+ path?: never;
+ cookie?: never;
+ };
+ get?: never;
+ put?: never;
+ /**
+ * Create Block Document
+ * @description Create a new block document.
+ */
+ post: operations["create_block_document_block_documents__post"];
+ delete?: never;
+ options?: never;
+ head?: never;
+ patch?: never;
+ trace?: never;
+ };
+ "/block_documents/filter": {
+ parameters: {
+ query?: never;
+ header?: never;
+ path?: never;
+ cookie?: never;
+ };
+ get?: never;
+ put?: never;
+ /**
+ * Read Block Documents
+ * @description Query for block documents.
+ */
+ post: operations["read_block_documents_block_documents_filter_post"];
+ delete?: never;
+ options?: never;
+ head?: never;
+ patch?: never;
+ trace?: never;
+ };
+ "/block_documents/count": {
+ parameters: {
+ query?: never;
+ header?: never;
+ path?: never;
+ cookie?: never;
+ };
+ get?: never;
+ put?: never;
+ /**
+ * Count Block Documents
+ * @description Count block documents.
+ */
+ post: operations["count_block_documents_block_documents_count_post"];
+ delete?: never;
+ options?: never;
+ head?: never;
+ patch?: never;
+ trace?: never;
+ };
+ "/block_documents/{id}": {
+ parameters: {
+ query?: never;
+ header?: never;
+ path?: never;
+ cookie?: never;
+ };
+ /** Read Block Document By Id */
+ get: operations["read_block_document_by_id_block_documents__id__get"];
+ put?: never;
+ post?: never;
+ /** Delete Block Document */
+ delete: operations["delete_block_document_block_documents__id__delete"];
+ options?: never;
+ head?: never;
+ /** Update Block Document Data */
+ patch: operations["update_block_document_data_block_documents__id__patch"];
+ trace?: never;
+ };
+ "/work_pools/": {
+ parameters: {
+ query?: never;
+ header?: never;
+ path?: never;
+ cookie?: never;
+ };
+ get?: never;
+ put?: never;
+ /**
+ * Create Work Pool
+ * @description Creates a new work pool. If a work pool with the same
+ * name already exists, an error will be raised.
+ */
+ post: operations["create_work_pool_work_pools__post"];
+ delete?: never;
+ options?: never;
+ head?: never;
+ patch?: never;
+ trace?: never;
+ };
+ "/work_pools/{name}": {
+ parameters: {
+ query?: never;
+ header?: never;
+ path?: never;
+ cookie?: never;
+ };
+ /**
+ * Read Work Pool
+ * @description Read a work pool by name
+ */
+ get: operations["read_work_pool_work_pools__name__get"];
+ put?: never;
+ post?: never;
+ /**
+ * Delete Work Pool
+ * @description Delete a work pool
+ */
+ delete: operations["delete_work_pool_work_pools__name__delete"];
+ options?: never;
+ head?: never;
+ /**
+ * Update Work Pool
+ * @description Update a work pool
+ */
+ patch: operations["update_work_pool_work_pools__name__patch"];
+ trace?: never;
+ };
+ "/work_pools/filter": {
+ parameters: {
+ query?: never;
+ header?: never;
+ path?: never;
+ cookie?: never;
+ };
+ get?: never;
+ put?: never;
+ /**
+ * Read Work Pools
+ * @description Read multiple work pools
+ */
+ post: operations["read_work_pools_work_pools_filter_post"];
+ delete?: never;
+ options?: never;
+ head?: never;
+ patch?: never;
+ trace?: never;
+ };
+ "/work_pools/count": {
+ parameters: {
+ query?: never;
+ header?: never;
+ path?: never;
+ cookie?: never;
+ };
+ get?: never;
+ put?: never;
+ /**
+ * Count Work Pools
+ * @description Count work pools
+ */
+ post: operations["count_work_pools_work_pools_count_post"];
+ delete?: never;
+ options?: never;
+ head?: never;
+ patch?: never;
+ trace?: never;
+ };
+ "/work_pools/{name}/get_scheduled_flow_runs": {
+ parameters: {
+ query?: never;
+ header?: never;
+ path?: never;
+ cookie?: never;
+ };
+ get?: never;
+ put?: never;
+ /**
+ * Get Scheduled Flow Runs
+ * @description Load scheduled runs for a worker
+ */
+ post: operations["get_scheduled_flow_runs_work_pools__name__get_scheduled_flow_runs_post"];
+ delete?: never;
+ options?: never;
+ head?: never;
+ patch?: never;
+ trace?: never;
+ };
+ "/work_pools/{work_pool_name}/queues": {
+ parameters: {
+ query?: never;
+ header?: never;
+ path?: never;
+ cookie?: never;
+ };
+ get?: never;
+ put?: never;
+ /**
+ * Create Work Queue
+ * @description Creates a new work pool queue. If a work pool queue with the same
+ * name already exists, an error will be raised.
+ */
+ post: operations["create_work_queue_work_pools__work_pool_name__queues_post"];
+ delete?: never;
+ options?: never;
+ head?: never;
+ patch?: never;
+ trace?: never;
+ };
+ "/work_pools/{work_pool_name}/queues/{name}": {
+ parameters: {
+ query?: never;
+ header?: never;
+ path?: never;
+ cookie?: never;
+ };
+ /**
+ * Read Work Queue
+ * @description Read a work pool queue
+ */
+ get: operations["read_work_queue_work_pools__work_pool_name__queues__name__get"];
+ put?: never;
+ post?: never;
+ /**
+ * Delete Work Queue
+ * @description Delete a work pool queue
+ */
+ delete: operations["delete_work_queue_work_pools__work_pool_name__queues__name__delete"];
+ options?: never;
+ head?: never;
+ /**
+ * Update Work Queue
+ * @description Update a work pool queue
+ */
+ patch: operations["update_work_queue_work_pools__work_pool_name__queues__name__patch"];
+ trace?: never;
+ };
+ "/work_pools/{work_pool_name}/queues/filter": {
+ parameters: {
+ query?: never;
+ header?: never;
+ path?: never;
+ cookie?: never;
+ };
+ get?: never;
+ put?: never;
+ /**
+ * Read Work Queues
+ * @description Read all work pool queues
+ */
+ post: operations["read_work_queues_work_pools__work_pool_name__queues_filter_post"];
+ delete?: never;
+ options?: never;
+ head?: never;
+ patch?: never;
+ trace?: never;
+ };
+ "/work_pools/{work_pool_name}/workers/heartbeat": {
+ parameters: {
+ query?: never;
+ header?: never;
+ path?: never;
+ cookie?: never;
+ };
+ get?: never;
+ put?: never;
+ /** Worker Heartbeat */
+ post: operations["worker_heartbeat_work_pools__work_pool_name__workers_heartbeat_post"];
+ delete?: never;
+ options?: never;
+ head?: never;
+ patch?: never;
+ trace?: never;
+ };
+ "/work_pools/{work_pool_name}/workers/filter": {
+ parameters: {
+ query?: never;
+ header?: never;
+ path?: never;
+ cookie?: never;
+ };
+ get?: never;
+ put?: never;
+ /**
+ * Read Workers
+ * @description Read all worker processes
+ */
+ post: operations["read_workers_work_pools__work_pool_name__workers_filter_post"];
+ delete?: never;
+ options?: never;
+ head?: never;
+ patch?: never;
+ trace?: never;
+ };
+ "/work_pools/{work_pool_name}/workers/{name}": {
+ parameters: {
+ query?: never;
+ header?: never;
+ path?: never;
+ cookie?: never;
+ };
+ get?: never;
+ put?: never;
+ post?: never;
+ /**
+ * Delete Worker
+ * @description Delete a work pool's worker
+ */
+ delete: operations["delete_worker_work_pools__work_pool_name__workers__name__delete"];
+ options?: never;
+ head?: never;
+ patch?: never;
+ trace?: never;
+ };
+ "/task_workers/filter": {
+ parameters: {
+ query?: never;
+ header?: never;
+ path?: never;
+ cookie?: never;
+ };
+ get?: never;
+ put?: never;
+ /**
+ * Read Task Workers
+ * @description Read active task workers. Optionally filter by task keys.
+ */
+ post: operations["read_task_workers_task_workers_filter_post"];
+ delete?: never;
+ options?: never;
+ head?: never;
+ patch?: never;
+ trace?: never;
+ };
+ "/work_queues/": {
+ parameters: {
+ query?: never;
+ header?: never;
+ path?: never;
+ cookie?: never;
+ };
+ get?: never;
+ put?: never;
+ /**
+ * Create Work Queue
+ * @description Creates a new work queue.
+ *
+ * If a work queue with the same name already exists, an error
+ * will be raised.
+ */
+ post: operations["create_work_queue_work_queues__post"];
+ delete?: never;
+ options?: never;
+ head?: never;
+ patch?: never;
+ trace?: never;
+ };
+ "/work_queues/{id}": {
+ parameters: {
+ query?: never;
+ header?: never;
+ path?: never;
+ cookie?: never;
+ };
+ /**
+ * Read Work Queue
+ * @description Get a work queue by id.
+ */
+ get: operations["read_work_queue_work_queues__id__get"];
+ put?: never;
+ post?: never;
+ /**
+ * Delete Work Queue
+ * @description Delete a work queue by id.
+ */
+ delete: operations["delete_work_queue_work_queues__id__delete"];
+ options?: never;
+ head?: never;
+ /**
+ * Update Work Queue
+ * @description Updates an existing work queue.
+ */
+ patch: operations["update_work_queue_work_queues__id__patch"];
+ trace?: never;
+ };
+ "/work_queues/name/{name}": {
+ parameters: {
+ query?: never;
+ header?: never;
+ path?: never;
+ cookie?: never;
+ };
+ /**
+ * Read Work Queue By Name
+ * @description Get a work queue by id.
+ */
+ get: operations["read_work_queue_by_name_work_queues_name__name__get"];
+ put?: never;
+ post?: never;
+ delete?: never;
+ options?: never;
+ head?: never;
+ patch?: never;
+ trace?: never;
+ };
+ "/work_queues/{id}/get_runs": {
+ parameters: {
+ query?: never;
+ header?: never;
+ path?: never;
+ cookie?: never;
+ };
+ get?: never;
+ put?: never;
+ /**
+ * Read Work Queue Runs
+ * @description Get flow runs from the work queue.
+ */
+ post: operations["read_work_queue_runs_work_queues__id__get_runs_post"];
+ delete?: never;
+ options?: never;
+ head?: never;
+ patch?: never;
+ trace?: never;
+ };
+ "/work_queues/filter": {
+ parameters: {
+ query?: never;
+ header?: never;
+ path?: never;
+ cookie?: never;
+ };
+ get?: never;
+ put?: never;
+ /**
+ * Read Work Queues
+ * @description Query for work queues.
+ */
+ post: operations["read_work_queues_work_queues_filter_post"];
+ delete?: never;
+ options?: never;
+ head?: never;
+ patch?: never;
+ trace?: never;
+ };
+ "/work_queues/{id}/status": {
+ parameters: {
+ query?: never;
+ header?: never;
+ path?: never;
+ cookie?: never;
+ };
+ /**
+ * Read Work Queue Status
+ * @description Get the status of a work queue.
+ */
+ get: operations["read_work_queue_status_work_queues__id__status_get"];
+ put?: never;
+ post?: never;
+ delete?: never;
+ options?: never;
+ head?: never;
+ patch?: never;
+ trace?: never;
+ };
+ "/artifacts/": {
+ parameters: {
+ query?: never;
+ header?: never;
+ path?: never;
+ cookie?: never;
+ };
+ get?: never;
+ put?: never;
+ /** Create Artifact */
+ post: operations["create_artifact_artifacts__post"];
+ delete?: never;
+ options?: never;
+ head?: never;
+ patch?: never;
+ trace?: never;
+ };
+ "/artifacts/{id}": {
+ parameters: {
+ query?: never;
+ header?: never;
+ path?: never;
+ cookie?: never;
+ };
+ /**
+ * Read Artifact
+ * @description Retrieve an artifact from the database.
+ */
+ get: operations["read_artifact_artifacts__id__get"];
+ put?: never;
+ post?: never;
+ /**
+ * Delete Artifact
+ * @description Delete an artifact from the database.
+ */
+ delete: operations["delete_artifact_artifacts__id__delete"];
+ options?: never;
+ head?: never;
+ /**
+ * Update Artifact
+ * @description Update an artifact in the database.
+ */
+ patch: operations["update_artifact_artifacts__id__patch"];
+ trace?: never;
+ };
+ "/artifacts/{key}/latest": {
+ parameters: {
+ query?: never;
+ header?: never;
+ path?: never;
+ cookie?: never;
+ };
+ /**
+ * Read Latest Artifact
+ * @description Retrieve the latest artifact from the artifact table.
+ */
+ get: operations["read_latest_artifact_artifacts__key__latest_get"];
+ put?: never;
+ post?: never;
+ delete?: never;
+ options?: never;
+ head?: never;
+ patch?: never;
+ trace?: never;
+ };
+ "/artifacts/filter": {
+ parameters: {
+ query?: never;
+ header?: never;
+ path?: never;
+ cookie?: never;
+ };
+ get?: never;
+ put?: never;
+ /**
+ * Read Artifacts
+ * @description Retrieve artifacts from the database.
+ */
+ post: operations["read_artifacts_artifacts_filter_post"];
+ delete?: never;
+ options?: never;
+ head?: never;
+ patch?: never;
+ trace?: never;
+ };
+ "/artifacts/latest/filter": {
+ parameters: {
+ query?: never;
+ header?: never;
+ path?: never;
+ cookie?: never;
+ };
+ get?: never;
+ put?: never;
+ /**
+ * Read Latest Artifacts
+ * @description Retrieve artifacts from the database.
+ */
+ post: operations["read_latest_artifacts_artifacts_latest_filter_post"];
+ delete?: never;
+ options?: never;
+ head?: never;
+ patch?: never;
+ trace?: never;
+ };
+ "/artifacts/count": {
+ parameters: {
+ query?: never;
+ header?: never;
+ path?: never;
+ cookie?: never;
+ };
+ get?: never;
+ put?: never;
+ /**
+ * Count Artifacts
+ * @description Count artifacts from the database.
+ */
+ post: operations["count_artifacts_artifacts_count_post"];
+ delete?: never;
+ options?: never;
+ head?: never;
+ patch?: never;
+ trace?: never;
+ };
+ "/artifacts/latest/count": {
+ parameters: {
+ query?: never;
+ header?: never;
+ path?: never;
+ cookie?: never;
+ };
+ get?: never;
+ put?: never;
+ /**
+ * Count Latest Artifacts
+ * @description Count artifacts from the database.
+ */
+ post: operations["count_latest_artifacts_artifacts_latest_count_post"];
+ delete?: never;
+ options?: never;
+ head?: never;
+ patch?: never;
+ trace?: never;
+ };
+ "/block_schemas/": {
+ parameters: {
+ query?: never;
+ header?: never;
+ path?: never;
+ cookie?: never;
+ };
+ get?: never;
+ put?: never;
+ /** Create Block Schema */
+ post: operations["create_block_schema_block_schemas__post"];
+ delete?: never;
+ options?: never;
+ head?: never;
+ patch?: never;
+ trace?: never;
+ };
+ "/block_schemas/{id}": {
+ parameters: {
+ query?: never;
+ header?: never;
+ path?: never;
+ cookie?: never;
+ };
+ /**
+ * Read Block Schema By Id
+ * @description Get a block schema by id.
+ */
+ get: operations["read_block_schema_by_id_block_schemas__id__get"];
+ put?: never;
+ post?: never;
+ /**
+ * Delete Block Schema
+ * @description Delete a block schema by id.
+ */
+ delete: operations["delete_block_schema_block_schemas__id__delete"];
+ options?: never;
+ head?: never;
+ patch?: never;
+ trace?: never;
+ };
+ "/block_schemas/filter": {
+ parameters: {
+ query?: never;
+ header?: never;
+ path?: never;
+ cookie?: never;
+ };
+ get?: never;
+ put?: never;
+ /**
+ * Read Block Schemas
+ * @description Read all block schemas, optionally filtered by type
+ */
+ post: operations["read_block_schemas_block_schemas_filter_post"];
+ delete?: never;
+ options?: never;
+ head?: never;
+ patch?: never;
+ trace?: never;
+ };
+ "/block_schemas/checksum/{checksum}": {
+ parameters: {
+ query?: never;
+ header?: never;
+ path?: never;
+ cookie?: never;
+ };
+ /** Read Block Schema By Checksum */
+ get: operations["read_block_schema_by_checksum_block_schemas_checksum__checksum__get"];
+ put?: never;
+ post?: never;
+ delete?: never;
+ options?: never;
+ head?: never;
+ patch?: never;
+ trace?: never;
+ };
+ "/block_capabilities/": {
+ parameters: {
+ query?: never;
+ header?: never;
+ path?: never;
+ cookie?: never;
+ };
+ /** Read Available Block Capabilities */
+ get: operations["read_available_block_capabilities_block_capabilities__get"];
+ put?: never;
+ post?: never;
+ delete?: never;
+ options?: never;
+ head?: never;
+ patch?: never;
+ trace?: never;
+ };
+ "/collections/views/{view}": {
+ parameters: {
+ query?: never;
+ header?: never;
+ path?: never;
+ cookie?: never;
+ };
+ /**
+ * Read View Content
+ * @description Reads the content of a view from the prefect-collection-registry.
+ */
+ get: operations["read_view_content_collections_views__view__get"];
+ put?: never;
+ post?: never;
+ delete?: never;
+ options?: never;
+ head?: never;
+ patch?: never;
+ trace?: never;
+ };
+ "/variables/": {
+ parameters: {
+ query?: never;
+ header?: never;
+ path?: never;
+ cookie?: never;
+ };
+ get?: never;
+ put?: never;
+ /** Create Variable */
+ post: operations["create_variable_variables__post"];
+ delete?: never;
+ options?: never;
+ head?: never;
+ patch?: never;
+ trace?: never;
+ };
+ "/variables/{id}": {
+ parameters: {
+ query?: never;
+ header?: never;
+ path?: never;
+ cookie?: never;
+ };
+ /** Read Variable */
+ get: operations["read_variable_variables__id__get"];
+ put?: never;
+ post?: never;
+ /** Delete Variable */
+ delete: operations["delete_variable_variables__id__delete"];
+ options?: never;
+ head?: never;
+ /** Update Variable */
+ patch: operations["update_variable_variables__id__patch"];
+ trace?: never;
+ };
+ "/variables/name/{name}": {
+ parameters: {
+ query?: never;
+ header?: never;
+ path?: never;
+ cookie?: never;
+ };
+ /** Read Variable By Name */
+ get: operations["read_variable_by_name_variables_name__name__get"];
+ put?: never;
+ post?: never;
+ /** Delete Variable By Name */
+ delete: operations["delete_variable_by_name_variables_name__name__delete"];
+ options?: never;
+ head?: never;
+ /** Update Variable By Name */
+ patch: operations["update_variable_by_name_variables_name__name__patch"];
+ trace?: never;
+ };
+ "/variables/filter": {
+ parameters: {
+ query?: never;
+ header?: never;
+ path?: never;
+ cookie?: never;
+ };
+ get?: never;
+ put?: never;
+ /** Read Variables */
+ post: operations["read_variables_variables_filter_post"];
+ delete?: never;
+ options?: never;
+ head?: never;
+ patch?: never;
+ trace?: never;
+ };
+ "/variables/count": {
+ parameters: {
+ query?: never;
+ header?: never;
+ path?: never;
+ cookie?: never;
+ };
+ get?: never;
+ put?: never;
+ /** Count Variables */
+ post: operations["count_variables_variables_count_post"];
+ delete?: never;
+ options?: never;
+ head?: never;
+ patch?: never;
+ trace?: never;
+ };
+ "/csrf-token": {
+ parameters: {
+ query?: never;
+ header?: never;
+ path?: never;
+ cookie?: never;
+ };
+ /**
+ * Create Csrf Token
+ * @description Create or update a CSRF token for a client
+ */
+ get: operations["create_csrf_token_csrf_token_get"];
+ put?: never;
+ post?: never;
+ delete?: never;
+ options?: never;
+ head?: never;
+ patch?: never;
+ trace?: never;
+ };
+ "/events": {
+ parameters: {
+ query?: never;
+ header?: never;
+ path?: never;
+ cookie?: never;
+ };
+ get?: never;
+ put?: never;
+ /**
+ * Create Events
+ * @description Record a batch of Events
+ */
+ post: operations["create_events_events_post"];
+ delete?: never;
+ options?: never;
+ head?: never;
+ patch?: never;
+ trace?: never;
+ };
+ "/events/filter": {
+ parameters: {
+ query?: never;
+ header?: never;
+ path?: never;
+ cookie?: never;
+ };
+ get?: never;
+ put?: never;
+ /**
+ * Read Events
+ * @description Queries for Events matching the given filter criteria in the given Account. Returns
+ * the first page of results, and the URL to request the next page (if there are more
+ * results).
+ */
+ post: operations["read_events_events_filter_post"];
+ delete?: never;
+ options?: never;
+ head?: never;
+ patch?: never;
+ trace?: never;
+ };
+ "/events/filter/next": {
+ parameters: {
+ query?: never;
+ header?: never;
+ path?: never;
+ cookie?: never;
+ };
+ /**
+ * Read Account Events Page
+ * @description Returns the next page of Events for a previous query against the given Account, and
+ * the URL to request the next page (if there are more results).
+ */
+ get: operations["read_account_events_page_events_filter_next_get"];
+ put?: never;
+ post?: never;
+ delete?: never;
+ options?: never;
+ head?: never;
+ patch?: never;
+ trace?: never;
+ };
+ "/events/count-by/{countable}": {
+ parameters: {
+ query?: never;
+ header?: never;
+ path?: never;
+ cookie?: never;
+ };
+ get?: never;
+ put?: never;
+ /**
+ * Count Account Events
+ * @description Returns distinct objects and the count of events associated with them. Objects
+ * that can be counted include the day the event occurred, the type of event, or
+ * the IDs of the resources associated with the event.
+ */
+ post: operations["count_account_events_events_count_by__countable__post"];
+ delete?: never;
+ options?: never;
+ head?: never;
+ patch?: never;
+ trace?: never;
+ };
+ "/automations/": {
+ parameters: {
+ query?: never;
+ header?: never;
+ path?: never;
+ cookie?: never;
+ };
+ get?: never;
+ put?: never;
+ /** Create Automation */
+ post: operations["create_automation_automations__post"];
+ delete?: never;
+ options?: never;
+ head?: never;
+ patch?: never;
+ trace?: never;
+ };
+ "/automations/{id}": {
+ parameters: {
+ query?: never;
+ header?: never;
+ path?: never;
+ cookie?: never;
+ };
+ /** Read Automation */
+ get: operations["read_automation_automations__id__get"];
+ /** Update Automation */
+ put: operations["update_automation_automations__id__put"];
+ post?: never;
+ /** Delete Automation */
+ delete: operations["delete_automation_automations__id__delete"];
+ options?: never;
+ head?: never;
+ /** Patch Automation */
+ patch: operations["patch_automation_automations__id__patch"];
+ trace?: never;
+ };
+ "/automations/filter": {
+ parameters: {
+ query?: never;
+ header?: never;
+ path?: never;
+ cookie?: never;
+ };
+ get?: never;
+ put?: never;
+ /** Read Automations */
+ post: operations["read_automations_automations_filter_post"];
+ delete?: never;
+ options?: never;
+ head?: never;
+ patch?: never;
+ trace?: never;
+ };
+ "/automations/count": {
+ parameters: {
+ query?: never;
+ header?: never;
+ path?: never;
+ cookie?: never;
+ };
+ get?: never;
+ put?: never;
+ /** Count Automations */
+ post: operations["count_automations_automations_count_post"];
+ delete?: never;
+ options?: never;
+ head?: never;
+ patch?: never;
+ trace?: never;
+ };
+ "/automations/related-to/{resource_id}": {
+ parameters: {
+ query?: never;
+ header?: never;
+ path?: never;
+ cookie?: never;
+ };
+ /** Read Automations Related To Resource */
+ get: operations["read_automations_related_to_resource_automations_related_to__resource_id__get"];
+ put?: never;
+ post?: never;
+ delete?: never;
+ options?: never;
+ head?: never;
+ patch?: never;
+ trace?: never;
+ };
+ "/automations/owned-by/{resource_id}": {
+ parameters: {
+ query?: never;
+ header?: never;
+ path?: never;
+ cookie?: never;
+ };
+ get?: never;
+ put?: never;
+ post?: never;
+ /** Delete Automations Owned By Resource */
+ delete: operations["delete_automations_owned_by_resource_automations_owned_by__resource_id__delete"];
+ options?: never;
+ head?: never;
+ patch?: never;
+ trace?: never;
+ };
+ "/templates/validate": {
+ parameters: {
+ query?: never;
+ header?: never;
+ path?: never;
+ cookie?: never;
+ };
+ get?: never;
+ put?: never;
+ /** Validate Template */
+ post: operations["validate_template_templates_validate_post"];
+ delete?: never;
+ options?: never;
+ head?: never;
+ patch?: never;
+ trace?: never;
+ };
+ "/ui/flows/count-deployments": {
+ parameters: {
+ query?: never;
+ header?: never;
+ path?: never;
+ cookie?: never;
+ };
+ get?: never;
+ put?: never;
+ /**
+ * Count Deployments By Flow
+ * @description Get deployment counts by flow id.
+ */
+ post: operations["count_deployments_by_flow_ui_flows_count_deployments_post"];
+ delete?: never;
+ options?: never;
+ head?: never;
+ patch?: never;
+ trace?: never;
+ };
+ "/ui/flows/next-runs": {
+ parameters: {
+ query?: never;
+ header?: never;
+ path?: never;
+ cookie?: never;
+ };
+ get?: never;
+ put?: never;
+ /**
+ * Next Runs By Flow
+ * @description Get the next flow run by flow id.
+ */
+ post: operations["next_runs_by_flow_ui_flows_next_runs_post"];
+ delete?: never;
+ options?: never;
+ head?: never;
+ patch?: never;
+ trace?: never;
+ };
+ "/ui/flow_runs/history": {
+ parameters: {
+ query?: never;
+ header?: never;
+ path?: never;
+ cookie?: never;
+ };
+ get?: never;
+ put?: never;
+ /** Read Flow Run History */
+ post: operations["read_flow_run_history_ui_flow_runs_history_post"];
+ delete?: never;
+ options?: never;
+ head?: never;
+ patch?: never;
+ trace?: never;
+ };
+ "/ui/flow_runs/count-task-runs": {
+ parameters: {
+ query?: never;
+ header?: never;
+ path?: never;
+ cookie?: never;
+ };
+ get?: never;
+ put?: never;
+ /**
+ * Count Task Runs By Flow Run
+ * @description Get task run counts by flow run id.
+ */
+ post: operations["count_task_runs_by_flow_run_ui_flow_runs_count_task_runs_post"];
+ delete?: never;
+ options?: never;
+ head?: never;
+ patch?: never;
+ trace?: never;
+ };
+ "/ui/schemas/validate": {
+ parameters: {
+ query?: never;
+ header?: never;
+ path?: never;
+ cookie?: never;
+ };
+ get?: never;
+ put?: never;
+ /** Validate Obj */
+ post: operations["validate_obj_ui_schemas_validate_post"];
+ delete?: never;
+ options?: never;
+ head?: never;
+ patch?: never;
+ trace?: never;
+ };
+ "/ui/task_runs/dashboard/counts": {
+ parameters: {
+ query?: never;
+ header?: never;
+ path?: never;
+ cookie?: never;
+ };
+ get?: never;
+ put?: never;
+ /** Read Dashboard Task Run Counts */
+ post: operations["read_dashboard_task_run_counts_ui_task_runs_dashboard_counts_post"];
+ delete?: never;
+ options?: never;
+ head?: never;
+ patch?: never;
+ trace?: never;
+ };
+ "/ui/task_runs/count": {
+ parameters: {
+ query?: never;
+ header?: never;
+ path?: never;
+ cookie?: never;
+ };
+ get?: never;
+ put?: never;
+ /** Read Task Run Counts By State */
+ post: operations["read_task_run_counts_by_state_ui_task_runs_count_post"];
+ delete?: never;
+ options?: never;
+ head?: never;
+ patch?: never;
+ trace?: never;
+ };
+ "/admin/settings": {
+ parameters: {
+ query?: never;
+ header?: never;
+ path?: never;
+ cookie?: never;
+ };
+ /**
+ * Read Settings
+ * @description Get the current Prefect REST API settings.
+ *
+ * Secret setting values will be obfuscated.
+ */
+ get: operations["read_settings_admin_settings_get"];
+ put?: never;
+ post?: never;
+ delete?: never;
+ options?: never;
+ head?: never;
+ patch?: never;
+ trace?: never;
+ };
+ "/admin/version": {
+ parameters: {
+ query?: never;
+ header?: never;
+ path?: never;
+ cookie?: never;
+ };
+ /**
+ * Read Version
+ * @description Returns the Prefect version number
+ */
+ get: operations["read_version_admin_version_get"];
+ put?: never;
+ post?: never;
+ delete?: never;
+ options?: never;
+ head?: never;
+ patch?: never;
+ trace?: never;
+ };
+ "/admin/database/clear": {
+ parameters: {
+ query?: never;
+ header?: never;
+ path?: never;
+ cookie?: never;
+ };
+ get?: never;
+ put?: never;
+ /**
+ * Clear Database
+ * @description Clear all database tables without dropping them.
+ */
+ post: operations["clear_database_admin_database_clear_post"];
+ delete?: never;
+ options?: never;
+ head?: never;
+ patch?: never;
+ trace?: never;
+ };
+ "/admin/database/drop": {
+ parameters: {
+ query?: never;
+ header?: never;
+ path?: never;
+ cookie?: never;
+ };
+ get?: never;
+ put?: never;
+ /**
+ * Drop Database
+ * @description Drop all database objects.
+ */
+ post: operations["drop_database_admin_database_drop_post"];
+ delete?: never;
+ options?: never;
+ head?: never;
+ patch?: never;
+ trace?: never;
+ };
+ "/admin/database/create": {
+ parameters: {
+ query?: never;
+ header?: never;
+ path?: never;
+ cookie?: never;
+ };
+ get?: never;
+ put?: never;
+ /**
+ * Create Database
+ * @description Create all database objects.
+ */
+ post: operations["create_database_admin_database_create_post"];
+ delete?: never;
+ options?: never;
+ head?: never;
+ patch?: never;
+ trace?: never;
+ };
+ "/hello": {
+ parameters: {
+ query?: never;
+ header?: never;
+ path?: never;
+ cookie?: never;
+ };
+ /**
+ * Hello
+ * @description Say hello!
+ */
+ get: operations["hello_hello_get"];
+ put?: never;
+ post?: never;
+ delete?: never;
+ options?: never;
+ head?: never;
+ patch?: never;
+ trace?: never;
+ };
+ "/ready": {
+ parameters: {
+ query?: never;
+ header?: never;
+ path?: never;
+ cookie?: never;
+ };
+ /** Perform Readiness Check */
+ get: operations["perform_readiness_check_ready_get"];
+ put?: never;
+ post?: never;
+ delete?: never;
+ options?: never;
+ head?: never;
+ patch?: never;
+ trace?: never;
+ };
+}
+export type webhooks = Record;
+export interface components {
+ schemas: {
+ /** Artifact */
+ Artifact: {
+ /**
+ * Id
+ * Format: uuid
+ */
+ id?: string;
+ /** Created */
+ created?: string | null;
+ /** Updated */
+ updated?: string | null;
+ /**
+ * Key
+ * @description An optional unique reference key for this artifact.
+ */
+ key?: string | null;
+ /**
+ * Type
+ * @description An identifier that describes the shape of the data field. e.g. 'result', 'table', 'markdown'
+ */
+ type?: string | null;
+ /**
+ * Description
+ * @description A markdown-enabled description of the artifact.
+ */
+ description?: string | null;
+ /**
+ * Data
+ * @description Data associated with the artifact, e.g. a result.; structure depends on the artifact type.
+ */
+ data?: Record | unknown | null;
+ /**
+ * Metadata
+ * @description User-defined artifact metadata. Content must be string key and value pairs.
+ */
+ metadata_?: {
+ [key: string]: string;
+ } | null;
+ /**
+ * Flow Run Id
+ * @description The flow run associated with the artifact.
+ */
+ flow_run_id?: string | null;
+ /**
+ * Task Run Id
+ * @description The task run associated with the artifact.
+ */
+ task_run_id?: string | null;
+ };
+ /** ArtifactCollection */
+ ArtifactCollection: {
+ /**
+ * Id
+ * Format: uuid
+ */
+ id?: string;
+ /** Created */
+ created?: string | null;
+ /** Updated */
+ updated?: string | null;
+ /**
+ * Key
+ * @description An optional unique reference key for this artifact.
+ */
+ key: string;
+ /**
+ * Latest Id
+ * Format: uuid
+ * @description The latest artifact ID associated with the key.
+ */
+ latest_id: string;
+ /**
+ * Type
+ * @description An identifier that describes the shape of the data field. e.g. 'result', 'table', 'markdown'
+ */
+ type?: string | null;
+ /**
+ * Description
+ * @description A markdown-enabled description of the artifact.
+ */
+ description?: string | null;
+ /**
+ * Data
+ * @description Data associated with the artifact, e.g. a result.; structure depends on the artifact type.
+ */
+ data?: Record | unknown | null;
+ /**
+ * Metadata
+ * @description User-defined artifact metadata. Content must be string key and value pairs.
+ */
+ metadata_?: {
+ [key: string]: string;
+ } | null;
+ /**
+ * Flow Run Id
+ * @description The flow run associated with the artifact.
+ */
+ flow_run_id?: string | null;
+ /**
+ * Task Run Id
+ * @description The task run associated with the artifact.
+ */
+ task_run_id?: string | null;
+ };
+ /**
+ * ArtifactCollectionFilter
+ * @description Filter artifact collections. Only artifact collections matching all criteria will be returned
+ */
+ ArtifactCollectionFilter: {
+ /**
+ * @description Operator for combining filter criteria. Defaults to 'and_'.
+ * @default and_
+ */
+ operator: components["schemas"]["Operator"];
+ /** @description Filter criteria for `Artifact.id` */
+ latest_id?:
+ | components["schemas"]["ArtifactCollectionFilterLatestId"]
+ | null;
+ /** @description Filter criteria for `Artifact.key` */
+ key?: components["schemas"]["ArtifactCollectionFilterKey"] | null;
+ /** @description Filter criteria for `Artifact.flow_run_id` */
+ flow_run_id?:
+ | components["schemas"]["ArtifactCollectionFilterFlowRunId"]
+ | null;
+ /** @description Filter criteria for `Artifact.task_run_id` */
+ task_run_id?:
+ | components["schemas"]["ArtifactCollectionFilterTaskRunId"]
+ | null;
+ /** @description Filter criteria for `Artifact.type` */
+ type?: components["schemas"]["ArtifactCollectionFilterType"] | null;
+ };
+ /**
+ * ArtifactCollectionFilterFlowRunId
+ * @description Filter by `ArtifactCollection.flow_run_id`.
+ */
+ ArtifactCollectionFilterFlowRunId: {
+ /**
+ * Any
+ * @description A list of flow run IDs to include
+ */
+ any_?: string[] | null;
+ };
+ /**
+ * ArtifactCollectionFilterKey
+ * @description Filter by `ArtifactCollection.key`.
+ */
+ ArtifactCollectionFilterKey: {
+ /**
+ * Any
+ * @description A list of artifact keys to include
+ */
+ any_?: string[] | null;
+ /**
+ * Like
+ * @description A string to match artifact keys against. This can include SQL wildcard characters like `%` and `_`.
+ */
+ like_?: string | null;
+ /**
+ * Exists
+ * @description If `true`, only include artifacts with a non-null key. If `false`, only include artifacts with a null key. Should return all rows in the ArtifactCollection table if specified.
+ */
+ exists_?: boolean | null;
+ };
+ /**
+ * ArtifactCollectionFilterLatestId
+ * @description Filter by `ArtifactCollection.latest_id`.
+ */
+ ArtifactCollectionFilterLatestId: {
+ /**
+ * Any
+ * @description A list of artifact ids to include
+ */
+ any_?: string[] | null;
+ };
+ /**
+ * ArtifactCollectionFilterTaskRunId
+ * @description Filter by `ArtifactCollection.task_run_id`.
+ */
+ ArtifactCollectionFilterTaskRunId: {
+ /**
+ * Any
+ * @description A list of task run IDs to include
+ */
+ any_?: string[] | null;
+ };
+ /**
+ * ArtifactCollectionFilterType
+ * @description Filter by `ArtifactCollection.type`.
+ */
+ ArtifactCollectionFilterType: {
+ /**
+ * Any
+ * @description A list of artifact types to include
+ */
+ any_?: string[] | null;
+ /**
+ * Not Any
+ * @description A list of artifact types to exclude
+ */
+ not_any_?: string[] | null;
+ };
+ /**
+ * ArtifactCollectionSort
+ * @description Defines artifact collection sorting options.
+ * @enum {string}
+ */
+ ArtifactCollectionSort:
+ | "CREATED_DESC"
+ | "UPDATED_DESC"
+ | "ID_DESC"
+ | "KEY_DESC"
+ | "KEY_ASC";
+ /**
+ * ArtifactCreate
+ * @description Data used by the Prefect REST API to create an artifact.
+ */
+ ArtifactCreate: {
+ /**
+ * Key
+ * @description An optional unique reference key for this artifact.
+ */
+ key?: string | null;
+ /**
+ * Type
+ * @description An identifier that describes the shape of the data field. e.g. 'result', 'table', 'markdown'
+ */
+ type?: string | null;
+ /**
+ * Description
+ * @description A markdown-enabled description of the artifact.
+ */
+ description?: string | null;
+ /**
+ * Data
+ * @description Data associated with the artifact, e.g. a result.; structure depends on the artifact type.
+ */
+ data?: Record | unknown | null;
+ /**
+ * Metadata
+ * @description User-defined artifact metadata. Content must be string key and value pairs.
+ */
+ metadata_?: {
+ [key: string]: string;
+ } | null;
+ /**
+ * Flow Run Id
+ * @description The flow run associated with the artifact.
+ */
+ flow_run_id?: string | null;
+ /**
+ * Task Run Id
+ * @description The task run associated with the artifact.
+ */
+ task_run_id?: string | null;
+ };
+ /**
+ * ArtifactFilter
+ * @description Filter artifacts. Only artifacts matching all criteria will be returned
+ */
+ ArtifactFilter: {
+ /**
+ * @description Operator for combining filter criteria. Defaults to 'and_'.
+ * @default and_
+ */
+ operator: components["schemas"]["Operator"];
+ /** @description Filter criteria for `Artifact.id` */
+ id?: components["schemas"]["ArtifactFilterId"] | null;
+ /** @description Filter criteria for `Artifact.key` */
+ key?: components["schemas"]["ArtifactFilterKey"] | null;
+ /** @description Filter criteria for `Artifact.flow_run_id` */
+ flow_run_id?: components["schemas"]["ArtifactFilterFlowRunId"] | null;
+ /** @description Filter criteria for `Artifact.task_run_id` */
+ task_run_id?: components["schemas"]["ArtifactFilterTaskRunId"] | null;
+ /** @description Filter criteria for `Artifact.type` */
+ type?: components["schemas"]["ArtifactFilterType"] | null;
+ };
+ /**
+ * ArtifactFilterFlowRunId
+ * @description Filter by `Artifact.flow_run_id`.
+ */
+ ArtifactFilterFlowRunId: {
+ /**
+ * Any
+ * @description A list of flow run IDs to include
+ */
+ any_?: string[] | null;
+ };
+ /**
+ * ArtifactFilterId
+ * @description Filter by `Artifact.id`.
+ */
+ ArtifactFilterId: {
+ /**
+ * Any
+ * @description A list of artifact ids to include
+ */
+ any_?: string[] | null;
+ };
+ /**
+ * ArtifactFilterKey
+ * @description Filter by `Artifact.key`.
+ */
+ ArtifactFilterKey: {
+ /**
+ * Any
+ * @description A list of artifact keys to include
+ */
+ any_?: string[] | null;
+ /**
+ * Like
+ * @description A string to match artifact keys against. This can include SQL wildcard characters like `%` and `_`.
+ */
+ like_?: string | null;
+ /**
+ * Exists
+ * @description If `true`, only include artifacts with a non-null key. If `false`, only include artifacts with a null key.
+ */
+ exists_?: boolean | null;
+ };
+ /**
+ * ArtifactFilterTaskRunId
+ * @description Filter by `Artifact.task_run_id`.
+ */
+ ArtifactFilterTaskRunId: {
+ /**
+ * Any
+ * @description A list of task run IDs to include
+ */
+ any_?: string[] | null;
+ };
+ /**
+ * ArtifactFilterType
+ * @description Filter by `Artifact.type`.
+ */
+ ArtifactFilterType: {
+ /**
+ * Any
+ * @description A list of artifact types to include
+ */
+ any_?: string[] | null;
+ /**
+ * Not Any
+ * @description A list of artifact types to exclude
+ */
+ not_any_?: string[] | null;
+ };
+ /**
+ * ArtifactSort
+ * @description Defines artifact sorting options.
+ * @enum {string}
+ */
+ ArtifactSort:
+ | "CREATED_DESC"
+ | "UPDATED_DESC"
+ | "ID_DESC"
+ | "KEY_DESC"
+ | "KEY_ASC";
+ /**
+ * ArtifactUpdate
+ * @description Data used by the Prefect REST API to update an artifact.
+ */
+ ArtifactUpdate: {
+ /** Data */
+ data?: Record | unknown | null;
+ /** Description */
+ description?: string | null;
+ /** Metadata */
+ metadata_?: {
+ [key: string]: string;
+ } | null;
+ };
+ /** Automation */
+ Automation: {
+ /**
+ * Name
+ * @description The name of this automation
+ */
+ name: string;
+ /**
+ * Description
+ * @description A longer description of this automation
+ * @default
+ */
+ description: string;
+ /**
+ * Enabled
+ * @description Whether this automation will be evaluated
+ * @default true
+ */
+ enabled: boolean;
+ /**
+ * Trigger
+ * @description The criteria for which events this Automation covers and how it will respond to the presence or absence of those events
+ */
+ trigger:
+ | components["schemas"]["EventTrigger"]
+ | components["schemas"]["CompoundTrigger-Output"]
+ | components["schemas"]["SequenceTrigger-Output"];
+ /**
+ * Actions
+ * @description The actions to perform when this Automation triggers
+ */
+ actions: (
+ | components["schemas"]["DoNothing"]
+ | components["schemas"]["RunDeployment"]
+ | components["schemas"]["PauseDeployment"]
+ | components["schemas"]["ResumeDeployment"]
+ | components["schemas"]["CancelFlowRun"]
+ | components["schemas"]["ChangeFlowRunState"]
+ | components["schemas"]["PauseWorkQueue"]
+ | components["schemas"]["ResumeWorkQueue"]
+ | components["schemas"]["SendNotification"]
+ | components["schemas"]["CallWebhook"]
+ | components["schemas"]["PauseAutomation"]
+ | components["schemas"]["ResumeAutomation"]
+ | components["schemas"]["SuspendFlowRun"]
+ | components["schemas"]["ResumeFlowRun"]
+ | components["schemas"]["PauseWorkPool"]
+ | components["schemas"]["ResumeWorkPool"]
+ )[];
+ /**
+ * Actions On Trigger
+ * @description The actions to perform when an Automation goes into a triggered state
+ */
+ actions_on_trigger?: (
+ | components["schemas"]["DoNothing"]
+ | components["schemas"]["RunDeployment"]
+ | components["schemas"]["PauseDeployment"]
+ | components["schemas"]["ResumeDeployment"]
+ | components["schemas"]["CancelFlowRun"]
+ | components["schemas"]["ChangeFlowRunState"]
+ | components["schemas"]["PauseWorkQueue"]
+ | components["schemas"]["ResumeWorkQueue"]
+ | components["schemas"]["SendNotification"]
+ | components["schemas"]["CallWebhook"]
+ | components["schemas"]["PauseAutomation"]
+ | components["schemas"]["ResumeAutomation"]
+ | components["schemas"]["SuspendFlowRun"]
+ | components["schemas"]["ResumeFlowRun"]
+ | components["schemas"]["PauseWorkPool"]
+ | components["schemas"]["ResumeWorkPool"]
+ )[];
+ /**
+ * Actions On Resolve
+ * @description The actions to perform when an Automation goes into a resolving state
+ */
+ actions_on_resolve?: (
+ | components["schemas"]["DoNothing"]
+ | components["schemas"]["RunDeployment"]
+ | components["schemas"]["PauseDeployment"]
+ | components["schemas"]["ResumeDeployment"]
+ | components["schemas"]["CancelFlowRun"]
+ | components["schemas"]["ChangeFlowRunState"]
+ | components["schemas"]["PauseWorkQueue"]
+ | components["schemas"]["ResumeWorkQueue"]
+ | components["schemas"]["SendNotification"]
+ | components["schemas"]["CallWebhook"]
+ | components["schemas"]["PauseAutomation"]
+ | components["schemas"]["ResumeAutomation"]
+ | components["schemas"]["SuspendFlowRun"]
+ | components["schemas"]["ResumeFlowRun"]
+ | components["schemas"]["PauseWorkPool"]
+ | components["schemas"]["ResumeWorkPool"]
+ )[];
+ /**
+ * Id
+ * Format: uuid
+ */
+ id?: string;
+ /** Created */
+ created?: string | null;
+ /** Updated */
+ updated?: string | null;
+ };
+ /** AutomationCreate */
+ AutomationCreate: {
+ /**
+ * Name
+ * @description The name of this automation
+ */
+ name: string;
+ /**
+ * Description
+ * @description A longer description of this automation
+ * @default
+ */
+ description: string;
+ /**
+ * Enabled
+ * @description Whether this automation will be evaluated
+ * @default true
+ */
+ enabled: boolean;
+ /**
+ * Trigger
+ * @description The criteria for which events this Automation covers and how it will respond to the presence or absence of those events
+ */
+ trigger:
+ | components["schemas"]["EventTrigger"]
+ | components["schemas"]["CompoundTrigger-Input"]
+ | components["schemas"]["SequenceTrigger-Input"];
+ /**
+ * Actions
+ * @description The actions to perform when this Automation triggers
+ */
+ actions: (
+ | components["schemas"]["DoNothing"]
+ | components["schemas"]["RunDeployment"]
+ | components["schemas"]["PauseDeployment"]
+ | components["schemas"]["ResumeDeployment"]
+ | components["schemas"]["CancelFlowRun"]
+ | components["schemas"]["ChangeFlowRunState"]
+ | components["schemas"]["PauseWorkQueue"]
+ | components["schemas"]["ResumeWorkQueue"]
+ | components["schemas"]["SendNotification"]
+ | components["schemas"]["CallWebhook"]
+ | components["schemas"]["PauseAutomation"]
+ | components["schemas"]["ResumeAutomation"]
+ | components["schemas"]["SuspendFlowRun"]
+ | components["schemas"]["ResumeFlowRun"]
+ | components["schemas"]["PauseWorkPool"]
+ | components["schemas"]["ResumeWorkPool"]
+ )[];
+ /**
+ * Actions On Trigger
+ * @description The actions to perform when an Automation goes into a triggered state
+ */
+ actions_on_trigger?: (
+ | components["schemas"]["DoNothing"]
+ | components["schemas"]["RunDeployment"]
+ | components["schemas"]["PauseDeployment"]
+ | components["schemas"]["ResumeDeployment"]
+ | components["schemas"]["CancelFlowRun"]
+ | components["schemas"]["ChangeFlowRunState"]
+ | components["schemas"]["PauseWorkQueue"]
+ | components["schemas"]["ResumeWorkQueue"]
+ | components["schemas"]["SendNotification"]
+ | components["schemas"]["CallWebhook"]
+ | components["schemas"]["PauseAutomation"]
+ | components["schemas"]["ResumeAutomation"]
+ | components["schemas"]["SuspendFlowRun"]
+ | components["schemas"]["ResumeFlowRun"]
+ | components["schemas"]["PauseWorkPool"]
+ | components["schemas"]["ResumeWorkPool"]
+ )[];
+ /**
+ * Actions On Resolve
+ * @description The actions to perform when an Automation goes into a resolving state
+ */
+ actions_on_resolve?: (
+ | components["schemas"]["DoNothing"]
+ | components["schemas"]["RunDeployment"]
+ | components["schemas"]["PauseDeployment"]
+ | components["schemas"]["ResumeDeployment"]
+ | components["schemas"]["CancelFlowRun"]
+ | components["schemas"]["ChangeFlowRunState"]
+ | components["schemas"]["PauseWorkQueue"]
+ | components["schemas"]["ResumeWorkQueue"]
+ | components["schemas"]["SendNotification"]
+ | components["schemas"]["CallWebhook"]
+ | components["schemas"]["PauseAutomation"]
+ | components["schemas"]["ResumeAutomation"]
+ | components["schemas"]["SuspendFlowRun"]
+ | components["schemas"]["ResumeFlowRun"]
+ | components["schemas"]["PauseWorkPool"]
+ | components["schemas"]["ResumeWorkPool"]
+ )[];
+ /**
+ * Owner Resource
+ * @description The resource to which this automation belongs
+ */
+ owner_resource?: string | null;
+ };
+ /** AutomationFilter */
+ AutomationFilter: {
+ /**
+ * @description Operator for combining filter criteria. Defaults to 'and_'.
+ * @default and_
+ */
+ operator: components["schemas"]["Operator"];
+ /** @description Filter criteria for `Automation.name` */
+ name?: components["schemas"]["AutomationFilterName"] | null;
+ /** @description Filter criteria for `Automation.created` */
+ created?: components["schemas"]["AutomationFilterCreated"] | null;
+ };
+ /**
+ * AutomationFilterCreated
+ * @description Filter by `Automation.created`.
+ */
+ AutomationFilterCreated: {
+ /**
+ * Before
+ * @description Only include automations created before this datetime
+ */
+ before_?: string | null;
+ };
+ /**
+ * AutomationFilterName
+ * @description Filter by `Automation.created`.
+ */
+ AutomationFilterName: {
+ /**
+ * Any
+ * @description Only include automations with names that match any of these strings
+ */
+ any_?: string[] | null;
+ };
+ /** AutomationPartialUpdate */
+ AutomationPartialUpdate: {
+ /**
+ * Enabled
+ * @description Whether this automation will be evaluated
+ * @default true
+ */
+ enabled: boolean;
+ };
+ /**
+ * AutomationSort
+ * @description Defines automations sorting options.
+ * @enum {string}
+ */
+ AutomationSort: "CREATED_DESC" | "UPDATED_DESC" | "NAME_ASC" | "NAME_DESC";
+ /** AutomationUpdate */
+ AutomationUpdate: {
+ /**
+ * Name
+ * @description The name of this automation
+ */
+ name: string;
+ /**
+ * Description
+ * @description A longer description of this automation
+ * @default
+ */
+ description: string;
+ /**
+ * Enabled
+ * @description Whether this automation will be evaluated
+ * @default true
+ */
+ enabled: boolean;
+ /**
+ * Trigger
+ * @description The criteria for which events this Automation covers and how it will respond to the presence or absence of those events
+ */
+ trigger:
+ | components["schemas"]["EventTrigger"]
+ | components["schemas"]["CompoundTrigger-Input"]
+ | components["schemas"]["SequenceTrigger-Input"];
+ /**
+ * Actions
+ * @description The actions to perform when this Automation triggers
+ */
+ actions: (
+ | components["schemas"]["DoNothing"]
+ | components["schemas"]["RunDeployment"]
+ | components["schemas"]["PauseDeployment"]
+ | components["schemas"]["ResumeDeployment"]
+ | components["schemas"]["CancelFlowRun"]
+ | components["schemas"]["ChangeFlowRunState"]
+ | components["schemas"]["PauseWorkQueue"]
+ | components["schemas"]["ResumeWorkQueue"]
+ | components["schemas"]["SendNotification"]
+ | components["schemas"]["CallWebhook"]
+ | components["schemas"]["PauseAutomation"]
+ | components["schemas"]["ResumeAutomation"]
+ | components["schemas"]["SuspendFlowRun"]
+ | components["schemas"]["ResumeFlowRun"]
+ | components["schemas"]["PauseWorkPool"]
+ | components["schemas"]["ResumeWorkPool"]
+ )[];
+ /**
+ * Actions On Trigger
+ * @description The actions to perform when an Automation goes into a triggered state
+ */
+ actions_on_trigger?: (
+ | components["schemas"]["DoNothing"]
+ | components["schemas"]["RunDeployment"]
+ | components["schemas"]["PauseDeployment"]
+ | components["schemas"]["ResumeDeployment"]
+ | components["schemas"]["CancelFlowRun"]
+ | components["schemas"]["ChangeFlowRunState"]
+ | components["schemas"]["PauseWorkQueue"]
+ | components["schemas"]["ResumeWorkQueue"]
+ | components["schemas"]["SendNotification"]
+ | components["schemas"]["CallWebhook"]
+ | components["schemas"]["PauseAutomation"]
+ | components["schemas"]["ResumeAutomation"]
+ | components["schemas"]["SuspendFlowRun"]
+ | components["schemas"]["ResumeFlowRun"]
+ | components["schemas"]["PauseWorkPool"]
+ | components["schemas"]["ResumeWorkPool"]
+ )[];
+ /**
+ * Actions On Resolve
+ * @description The actions to perform when an Automation goes into a resolving state
+ */
+ actions_on_resolve?: (
+ | components["schemas"]["DoNothing"]
+ | components["schemas"]["RunDeployment"]
+ | components["schemas"]["PauseDeployment"]
+ | components["schemas"]["ResumeDeployment"]
+ | components["schemas"]["CancelFlowRun"]
+ | components["schemas"]["ChangeFlowRunState"]
+ | components["schemas"]["PauseWorkQueue"]
+ | components["schemas"]["ResumeWorkQueue"]
+ | components["schemas"]["SendNotification"]
+ | components["schemas"]["CallWebhook"]
+ | components["schemas"]["PauseAutomation"]
+ | components["schemas"]["ResumeAutomation"]
+ | components["schemas"]["SuspendFlowRun"]
+ | components["schemas"]["ResumeFlowRun"]
+ | components["schemas"]["PauseWorkPool"]
+ | components["schemas"]["ResumeWorkPool"]
+ )[];
+ };
+ /**
+ * BlockDocument
+ * @description An ORM representation of a block document.
+ */
+ BlockDocument: {
+ /**
+ * Id
+ * Format: uuid
+ */
+ id?: string;
+ /** Created */
+ created?: string | null;
+ /** Updated */
+ updated?: string | null;
+ /**
+ * Name
+ * @description The block document's name. Not required for anonymous block documents.
+ */
+ name?: string | null;
+ /**
+ * Data
+ * @description The block document's data
+ */
+ data?: Record;
+ /**
+ * Block Schema Id
+ * Format: uuid
+ * @description A block schema ID
+ */
+ block_schema_id: string;
+ /** @description The associated block schema */
+ block_schema?: components["schemas"]["BlockSchema"] | null;
+ /**
+ * Block Type Id
+ * Format: uuid
+ * @description A block type ID
+ */
+ block_type_id: string;
+ /**
+ * Block Type Name
+ * @description The associated block type's name
+ */
+ block_type_name?: string | null;
+ /** @description The associated block type */
+ block_type?: components["schemas"]["BlockType"] | null;
+ /**
+ * Block Document References
+ * @description Record of the block document's references
+ */
+ block_document_references?: {
+ [key: string]: Record;
+ };
+ /**
+ * Is Anonymous
+ * @description Whether the block is anonymous (anonymous blocks are usually created by Prefect automatically)
+ * @default false
+ */
+ is_anonymous: boolean;
+ };
+ /**
+ * BlockDocumentCreate
+ * @description Data used by the Prefect REST API to create a block document.
+ */
+ BlockDocumentCreate: {
+ /**
+ * Name
+ * @description The block document's name. Not required for anonymous block documents.
+ */
+ name?: string | null;
+ /**
+ * Data
+ * @description The block document's data
+ */
+ data?: Record;
+ /**
+ * Block Schema Id
+ * Format: uuid
+ * @description A block schema ID
+ */
+ block_schema_id: string;
+ /**
+ * Block Type Id
+ * Format: uuid
+ * @description A block type ID
+ */
+ block_type_id: string;
+ /**
+ * Is Anonymous
+ * @description Whether the block is anonymous (anonymous blocks are usually created by Prefect automatically)
+ * @default false
+ */
+ is_anonymous: boolean;
+ };
+ /**
+ * BlockDocumentFilter
+ * @description Filter BlockDocuments. Only BlockDocuments matching all criteria will be returned
+ */
+ BlockDocumentFilter: {
+ /**
+ * @description Operator for combining filter criteria. Defaults to 'and_'.
+ * @default and_
+ */
+ operator: components["schemas"]["Operator"];
+ /** @description Filter criteria for `BlockDocument.id` */
+ id?: components["schemas"]["BlockDocumentFilterId"] | null;
+ /**
+ * @description Filter criteria for `BlockDocument.is_anonymous`. Defaults to excluding anonymous blocks.
+ * @default {
+ * "eq_": false
+ * }
+ */
+ is_anonymous:
+ | components["schemas"]["BlockDocumentFilterIsAnonymous"]
+ | null;
+ /** @description Filter criteria for `BlockDocument.block_type_id` */
+ block_type_id?:
+ | components["schemas"]["BlockDocumentFilterBlockTypeId"]
+ | null;
+ /** @description Filter criteria for `BlockDocument.name` */
+ name?: components["schemas"]["BlockDocumentFilterName"] | null;
+ };
+ /**
+ * BlockDocumentFilterBlockTypeId
+ * @description Filter by `BlockDocument.block_type_id`.
+ */
+ BlockDocumentFilterBlockTypeId: {
+ /**
+ * Any
+ * @description A list of block type ids to include
+ */
+ any_?: string[] | null;
+ };
+ /**
+ * BlockDocumentFilterId
+ * @description Filter by `BlockDocument.id`.
+ */
+ BlockDocumentFilterId: {
+ /**
+ * Any
+ * @description A list of block ids to include
+ */
+ any_?: string[] | null;
+ };
+ /**
+ * BlockDocumentFilterIsAnonymous
+ * @description Filter by `BlockDocument.is_anonymous`.
+ */
+ BlockDocumentFilterIsAnonymous: {
+ /**
+ * Eq
+ * @description Filter block documents for only those that are or are not anonymous.
+ */
+ eq_?: boolean | null;
+ };
+ /**
+ * BlockDocumentFilterName
+ * @description Filter by `BlockDocument.name`.
+ */
+ BlockDocumentFilterName: {
+ /**
+ * Any
+ * @description A list of block names to include
+ */
+ any_?: string[] | null;
+ /**
+ * Like
+ * @description A string to match block names against. This can include SQL wildcard characters like `%` and `_`.
+ */
+ like_?: string | null;
+ };
+ /**
+ * BlockDocumentSort
+ * @description Defines block document sorting options.
+ * @enum {string}
+ */
+ BlockDocumentSort: "NAME_DESC" | "NAME_ASC" | "BLOCK_TYPE_AND_NAME_ASC";
+ /**
+ * BlockDocumentUpdate
+ * @description Data used by the Prefect REST API to update a block document.
+ */
+ BlockDocumentUpdate: {
+ /**
+ * Block Schema Id
+ * @description A block schema ID
+ */
+ block_schema_id?: string | null;
+ /**
+ * Data
+ * @description The block document's data
+ */
+ data?: Record;
+ /**
+ * Merge Existing Data
+ * @default true
+ */
+ merge_existing_data: boolean;
+ };
+ /**
+ * BlockSchema
+ * @description An ORM representation of a block schema.
+ */
+ BlockSchema: {
+ /**
+ * Id
+ * Format: uuid
+ */
+ id?: string;
+ /** Created */
+ created?: string | null;
+ /** Updated */
+ updated?: string | null;
+ /**
+ * Checksum
+ * @description The block schema's unique checksum
+ */
+ checksum: string;
+ /**
+ * Fields
+ * @description The block schema's field schema
+ */
+ fields?: Record;
+ /**
+ * Block Type Id
+ * @description A block type ID
+ */
+ block_type_id: string | null;
+ /** @description The associated block type */
+ block_type?: components["schemas"]["BlockType"] | null;
+ /**
+ * Capabilities
+ * @description A list of Block capabilities
+ */
+ capabilities?: string[];
+ /**
+ * Version
+ * @description Human readable identifier for the block schema
+ * @default non-versioned
+ */
+ version: string;
+ };
+ /**
+ * BlockSchemaCreate
+ * @description Data used by the Prefect REST API to create a block schema.
+ */
+ BlockSchemaCreate: {
+ /**
+ * Fields
+ * @description The block schema's field schema
+ */
+ fields?: Record;
+ /**
+ * Block Type Id
+ * Format: uuid
+ * @description A block type ID
+ */
+ block_type_id: string;
+ /**
+ * Capabilities
+ * @description A list of Block capabilities
+ */
+ capabilities?: string[];
+ /**
+ * Version
+ * @description Human readable identifier for the block schema
+ * @default non-versioned
+ */
+ version: string;
+ };
+ /**
+ * BlockSchemaFilter
+ * @description Filter BlockSchemas
+ */
+ BlockSchemaFilter: {
+ /**
+ * @description Operator for combining filter criteria. Defaults to 'and_'.
+ * @default and_
+ */
+ operator: components["schemas"]["Operator"];
+ /** @description Filter criteria for `BlockSchema.block_type_id` */
+ block_type_id?:
+ | components["schemas"]["BlockSchemaFilterBlockTypeId"]
+ | null;
+ /** @description Filter criteria for `BlockSchema.capabilities` */
+ block_capabilities?:
+ | components["schemas"]["BlockSchemaFilterCapabilities"]
+ | null;
+ /** @description Filter criteria for `BlockSchema.id` */
+ id?: components["schemas"]["BlockSchemaFilterId"] | null;
+ /** @description Filter criteria for `BlockSchema.version` */
+ version?: components["schemas"]["BlockSchemaFilterVersion"] | null;
+ };
+ /**
+ * BlockSchemaFilterBlockTypeId
+ * @description Filter by `BlockSchema.block_type_id`.
+ */
+ BlockSchemaFilterBlockTypeId: {
+ /**
+ * Any
+ * @description A list of block type ids to include
+ */
+ any_?: string[] | null;
+ };
+ /**
+ * BlockSchemaFilterCapabilities
+ * @description Filter by `BlockSchema.capabilities`
+ */
+ BlockSchemaFilterCapabilities: {
+ /**
+ * All
+ * @description A list of block capabilities. Block entities will be returned only if an associated block schema has a superset of the defined capabilities.
+ */
+ all_?: string[] | null;
+ };
+ /**
+ * BlockSchemaFilterId
+ * @description Filter by BlockSchema.id
+ */
+ BlockSchemaFilterId: {
+ /**
+ * Any
+ * @description A list of IDs to include
+ */
+ any_?: string[] | null;
+ };
+ /**
+ * BlockSchemaFilterVersion
+ * @description Filter by `BlockSchema.capabilities`
+ */
+ BlockSchemaFilterVersion: {
+ /**
+ * Any
+ * @description A list of block schema versions.
+ */
+ any_?: string[] | null;
+ };
+ /**
+ * BlockType
+ * @description An ORM representation of a block type
+ */
+ BlockType: {
+ /**
+ * Id
+ * Format: uuid
+ */
+ id?: string;
+ /** Created */
+ created?: string | null;
+ /** Updated */
+ updated?: string | null;
+ /**
+ * Name
+ * @description A block type's name
+ */
+ name: string;
+ /**
+ * Slug
+ * @description A block type's slug
+ */
+ slug: string;
+ /**
+ * Logo Url
+ * @description Web URL for the block type's logo
+ */
+ logo_url?: string | null;
+ /**
+ * Documentation Url
+ * @description Web URL for the block type's documentation
+ */
+ documentation_url?: string | null;
+ /**
+ * Description
+ * @description A short blurb about the corresponding block's intended use
+ */
+ description?: string | null;
+ /**
+ * Code Example
+ * @description A code snippet demonstrating use of the corresponding block
+ */
+ code_example?: string | null;
+ /**
+ * Is Protected
+ * @description Protected block types cannot be modified via API.
+ * @default false
+ */
+ is_protected: boolean;
+ };
+ /**
+ * BlockTypeCreate
+ * @description Data used by the Prefect REST API to create a block type.
+ */
+ BlockTypeCreate: {
+ /**
+ * Name
+ * @description A block type's name
+ */
+ name: string;
+ /**
+ * Slug
+ * @description A block type's slug
+ */
+ slug: string;
+ /**
+ * Logo Url
+ * @description Web URL for the block type's logo
+ */
+ logo_url?: string | null;
+ /**
+ * Documentation Url
+ * @description Web URL for the block type's documentation
+ */
+ documentation_url?: string | null;
+ /**
+ * Description
+ * @description A short blurb about the corresponding block's intended use
+ */
+ description?: string | null;
+ /**
+ * Code Example
+ * @description A code snippet demonstrating use of the corresponding block
+ */
+ code_example?: string | null;
+ };
+ /**
+ * BlockTypeFilter
+ * @description Filter BlockTypes
+ */
+ BlockTypeFilter: {
+ /** @description Filter criteria for `BlockType.name` */
+ name?: components["schemas"]["BlockTypeFilterName"] | null;
+ /** @description Filter criteria for `BlockType.slug` */
+ slug?: components["schemas"]["BlockTypeFilterSlug"] | null;
+ };
+ /**
+ * BlockTypeFilterName
+ * @description Filter by `BlockType.name`
+ */
+ BlockTypeFilterName: {
+ /**
+ * Like
+ * @description A case-insensitive partial match. For example, passing 'marvin' will match 'marvin', 'sad-Marvin', and 'marvin-robot'.
+ */
+ like_?: string | null;
+ };
+ /**
+ * BlockTypeFilterSlug
+ * @description Filter by `BlockType.slug`
+ */
+ BlockTypeFilterSlug: {
+ /**
+ * Any
+ * @description A list of slugs to match
+ */
+ any_?: string[] | null;
+ };
+ /**
+ * BlockTypeUpdate
+ * @description Data used by the Prefect REST API to update a block type.
+ */
+ BlockTypeUpdate: {
+ /** Logo Url */
+ logo_url?: string | null;
+ /** Documentation Url */
+ documentation_url?: string | null;
+ /** Description */
+ description?: string | null;
+ /** Code Example */
+ code_example?: string | null;
+ };
+ /** Body_average_flow_run_lateness_flow_runs_lateness_post */
+ Body_average_flow_run_lateness_flow_runs_lateness_post: {
+ flows?: components["schemas"]["FlowFilter"] | null;
+ flow_runs?: components["schemas"]["FlowRunFilter"] | null;
+ task_runs?: components["schemas"]["TaskRunFilter"] | null;
+ deployments?: components["schemas"]["DeploymentFilter"] | null;
+ work_pools?: components["schemas"]["WorkPoolFilter"] | null;
+ work_pool_queues?: components["schemas"]["WorkQueueFilter"] | null;
+ };
+ /** Body_bulk_decrement_active_slots_v2_concurrency_limits_decrement_post */
+ Body_bulk_decrement_active_slots_v2_concurrency_limits_decrement_post: {
+ /** Slots */
+ slots: number;
+ /** Names */
+ names: string[];
+ /** Occupancy Seconds */
+ occupancy_seconds?: number | null;
+ /**
+ * Create If Missing
+ * @default true
+ */
+ create_if_missing: boolean;
+ };
+ /** Body_bulk_increment_active_slots_v2_concurrency_limits_increment_post */
+ Body_bulk_increment_active_slots_v2_concurrency_limits_increment_post: {
+ /** Slots */
+ slots: number;
+ /** Names */
+ names: string[];
+ /**
+ * Mode
+ * @default concurrency
+ * @enum {string}
+ */
+ mode: "concurrency" | "rate_limit";
+ /** Create If Missing */
+ create_if_missing?: boolean | null;
+ };
+ /** Body_clear_database_admin_database_clear_post */
+ Body_clear_database_admin_database_clear_post: {
+ /**
+ * Confirm
+ * @description Pass confirm=True to confirm you want to modify the database.
+ * @default false
+ */
+ confirm: boolean;
+ };
+ /** Body_count_account_events_events_count_by__countable__post */
+ Body_count_account_events_events_count_by__countable__post: {
+ filter: components["schemas"]["EventFilter"];
+ /** @default day */
+ time_unit: components["schemas"]["TimeUnit"];
+ /**
+ * Time Interval
+ * @default 1
+ */
+ time_interval: number;
+ };
+ /** Body_count_artifacts_artifacts_count_post */
+ Body_count_artifacts_artifacts_count_post: {
+ artifacts?: components["schemas"]["ArtifactFilter"];
+ flow_runs?: components["schemas"]["FlowRunFilter"];
+ task_runs?: components["schemas"]["TaskRunFilter"];
+ flows?: components["schemas"]["FlowFilter"];
+ deployments?: components["schemas"]["DeploymentFilter"];
+ };
+ /** Body_count_block_documents_block_documents_count_post */
+ Body_count_block_documents_block_documents_count_post: {
+ block_documents?: components["schemas"]["BlockDocumentFilter"] | null;
+ block_types?: components["schemas"]["BlockTypeFilter"] | null;
+ block_schemas?: components["schemas"]["BlockSchemaFilter"] | null;
+ };
+ /** Body_count_deployments_by_flow_ui_flows_count_deployments_post */
+ Body_count_deployments_by_flow_ui_flows_count_deployments_post: {
+ /** Flow Ids */
+ flow_ids: string[];
+ };
+ /** Body_count_deployments_deployments_count_post */
+ Body_count_deployments_deployments_count_post: {
+ flows?: components["schemas"]["FlowFilter"];
+ flow_runs?: components["schemas"]["FlowRunFilter"];
+ task_runs?: components["schemas"]["TaskRunFilter"];
+ deployments?: components["schemas"]["DeploymentFilter"];
+ work_pools?: components["schemas"]["WorkPoolFilter"];
+ work_pool_queues?: components["schemas"]["WorkQueueFilter"];
+ };
+ /** Body_count_flow_runs_flow_runs_count_post */
+ Body_count_flow_runs_flow_runs_count_post: {
+ flows?: components["schemas"]["FlowFilter"];
+ flow_runs?: components["schemas"]["FlowRunFilter"];
+ task_runs?: components["schemas"]["TaskRunFilter"];
+ deployments?: components["schemas"]["DeploymentFilter"];
+ work_pools?: components["schemas"]["WorkPoolFilter"];
+ work_pool_queues?: components["schemas"]["WorkQueueFilter"];
+ };
+ /** Body_count_flows_flows_count_post */
+ Body_count_flows_flows_count_post: {
+ flows?: components["schemas"]["FlowFilter"];
+ flow_runs?: components["schemas"]["FlowRunFilter"];
+ task_runs?: components["schemas"]["TaskRunFilter"];
+ deployments?: components["schemas"]["DeploymentFilter"];
+ work_pools?: components["schemas"]["WorkPoolFilter"];
+ };
+ /** Body_count_latest_artifacts_artifacts_latest_count_post */
+ Body_count_latest_artifacts_artifacts_latest_count_post: {
+ artifacts?: components["schemas"]["ArtifactCollectionFilter"];
+ flow_runs?: components["schemas"]["FlowRunFilter"];
+ task_runs?: components["schemas"]["TaskRunFilter"];
+ flows?: components["schemas"]["FlowFilter"];
+ deployments?: components["schemas"]["DeploymentFilter"];
+ };
+ /** Body_count_task_runs_by_flow_run_ui_flow_runs_count_task_runs_post */
+ Body_count_task_runs_by_flow_run_ui_flow_runs_count_task_runs_post: {
+ /** Flow Run Ids */
+ flow_run_ids: string[];
+ };
+ /** Body_count_task_runs_task_runs_count_post */
+ Body_count_task_runs_task_runs_count_post: {
+ flows?: components["schemas"]["FlowFilter"];
+ flow_runs?: components["schemas"]["FlowRunFilter"];
+ task_runs?: components["schemas"]["TaskRunFilter"];
+ deployments?: components["schemas"]["DeploymentFilter"];
+ };
+ /** Body_count_variables_variables_count_post */
+ Body_count_variables_variables_count_post: {
+ variables?: components["schemas"]["VariableFilter"] | null;
+ };
+ /** Body_count_work_pools_work_pools_count_post */
+ Body_count_work_pools_work_pools_count_post: {
+ work_pools?: components["schemas"]["WorkPoolFilter"] | null;
+ };
+ /** Body_create_database_admin_database_create_post */
+ Body_create_database_admin_database_create_post: {
+ /**
+ * Confirm
+ * @description Pass confirm=True to confirm you want to modify the database.
+ * @default false
+ */
+ confirm: boolean;
+ };
+ /** Body_create_flow_run_input_flow_runs__id__input_post */
+ Body_create_flow_run_input_flow_runs__id__input_post: {
+ /**
+ * Key
+ * @description The input key
+ */
+ key: string;
+ /**
+ * Value
+ * Format: binary
+ * @description The value of the input
+ */
+ value: string;
+ /**
+ * Sender
+ * @description The sender of the input
+ */
+ sender?: string | null;
+ };
+ /** Body_decrement_concurrency_limits_v1_concurrency_limits_decrement_post */
+ Body_decrement_concurrency_limits_v1_concurrency_limits_decrement_post: {
+ /**
+ * Names
+ * @description The tags to release a slot for
+ */
+ names: string[];
+ /**
+ * Task Run Id
+ * Format: uuid
+ * @description The ID of the task run releasing the slot
+ */
+ task_run_id: string;
+ };
+ /** Body_drop_database_admin_database_drop_post */
+ Body_drop_database_admin_database_drop_post: {
+ /**
+ * Confirm
+ * @description Pass confirm=True to confirm you want to modify the database.
+ * @default false
+ */
+ confirm: boolean;
+ };
+ /** Body_filter_flow_run_input_flow_runs__id__input_filter_post */
+ Body_filter_flow_run_input_flow_runs__id__input_filter_post: {
+ /**
+ * Prefix
+ * @description The input key prefix
+ */
+ prefix: string;
+ /**
+ * Limit
+ * @description The maximum number of results to return
+ * @default 1
+ */
+ limit: number;
+ /**
+ * Exclude Keys
+ * @description Exclude inputs with these keys
+ * @default []
+ */
+ exclude_keys: string[];
+ };
+ /** Body_flow_run_history_flow_runs_history_post */
+ Body_flow_run_history_flow_runs_history_post: {
+ /**
+ * History Start
+ * Format: date-time
+ * @description The history's start time.
+ */
+ history_start: string;
+ /**
+ * History End
+ * Format: date-time
+ * @description The history's end time.
+ */
+ history_end: string;
+ /**
+ * History Interval
+ * Format: time-delta
+ * @description The size of each history interval, in seconds. Must be at least 1 second.
+ */
+ history_interval: number;
+ flows?: components["schemas"]["FlowFilter"];
+ flow_runs?: components["schemas"]["FlowRunFilter"];
+ task_runs?: components["schemas"]["TaskRunFilter"];
+ deployments?: components["schemas"]["DeploymentFilter"];
+ work_pools?: components["schemas"]["WorkPoolFilter"];
+ work_queues?: components["schemas"]["WorkQueueFilter"];
+ };
+ /** Body_get_scheduled_flow_runs_for_deployments_deployments_get_scheduled_flow_runs_post */
+ Body_get_scheduled_flow_runs_for_deployments_deployments_get_scheduled_flow_runs_post: {
+ /**
+ * Deployment Ids
+ * @description The deployment IDs to get scheduled runs for
+ */
+ deployment_ids: string[];
+ /**
+ * Scheduled Before
+ * Format: date-time
+ * @description The maximum time to look for scheduled flow runs
+ */
+ scheduled_before?: string;
+ /**
+ * Limit
+ * @description Defaults to PREFECT_API_DEFAULT_LIMIT if not provided.
+ */
+ limit?: number;
+ };
+ /** Body_get_scheduled_flow_runs_work_pools__name__get_scheduled_flow_runs_post */
+ Body_get_scheduled_flow_runs_work_pools__name__get_scheduled_flow_runs_post: {
+ /**
+ * Work Queue Names
+ * @description The names of work pool queues
+ */
+ work_queue_names?: string[];
+ /**
+ * Scheduled Before
+ * Format: date-time
+ * @description The maximum time to look for scheduled flow runs
+ */
+ scheduled_before?: string;
+ /**
+ * Scheduled After
+ * Format: date-time
+ * @description The minimum time to look for scheduled flow runs
+ */
+ scheduled_after?: string;
+ /**
+ * Limit
+ * @description Defaults to PREFECT_API_DEFAULT_LIMIT if not provided.
+ */
+ limit?: number;
+ };
+ /** Body_increment_concurrency_limits_v1_concurrency_limits_increment_post */
+ Body_increment_concurrency_limits_v1_concurrency_limits_increment_post: {
+ /**
+ * Names
+ * @description The tags to acquire a slot for
+ */
+ names: string[];
+ /**
+ * Task Run Id
+ * Format: uuid
+ * @description The ID of the task run acquiring the slot
+ */
+ task_run_id: string;
+ };
+ /** Body_next_runs_by_flow_ui_flows_next_runs_post */
+ Body_next_runs_by_flow_ui_flows_next_runs_post: {
+ /** Flow Ids */
+ flow_ids: string[];
+ };
+ /** Body_paginate_deployments_deployments_paginate_post */
+ Body_paginate_deployments_deployments_paginate_post: {
+ /**
+ * Page
+ * @default 1
+ */
+ page: number;
+ flows?: components["schemas"]["FlowFilter"];
+ flow_runs?: components["schemas"]["FlowRunFilter"];
+ task_runs?: components["schemas"]["TaskRunFilter"];
+ deployments?: components["schemas"]["DeploymentFilter"];
+ work_pools?: components["schemas"]["WorkPoolFilter"];
+ work_pool_queues?: components["schemas"]["WorkQueueFilter"];
+ /** @default NAME_ASC */
+ sort: components["schemas"]["DeploymentSort"];
+ /**
+ * Limit
+ * @description Defaults to PREFECT_API_DEFAULT_LIMIT if not provided.
+ */
+ limit?: number;
+ };
+ /** Body_paginate_flow_runs_flow_runs_paginate_post */
+ Body_paginate_flow_runs_flow_runs_paginate_post: {
+ /** @default ID_DESC */
+ sort: components["schemas"]["FlowRunSort"];
+ /**
+ * Page
+ * @default 1
+ */
+ page: number;
+ flows?: components["schemas"]["FlowFilter"] | null;
+ flow_runs?: components["schemas"]["FlowRunFilter"] | null;
+ task_runs?: components["schemas"]["TaskRunFilter"] | null;
+ deployments?: components["schemas"]["DeploymentFilter"] | null;
+ work_pools?: components["schemas"]["WorkPoolFilter"] | null;
+ work_pool_queues?: components["schemas"]["WorkQueueFilter"] | null;
+ /**
+ * Limit
+ * @description Defaults to PREFECT_API_DEFAULT_LIMIT if not provided.
+ */
+ limit?: number;
+ };
+ /** Body_paginate_flows_flows_paginate_post */
+ Body_paginate_flows_flows_paginate_post: {
+ /**
+ * Page
+ * @default 1
+ */
+ page: number;
+ flows?: components["schemas"]["FlowFilter"] | null;
+ flow_runs?: components["schemas"]["FlowRunFilter"] | null;
+ task_runs?: components["schemas"]["TaskRunFilter"] | null;
+ deployments?: components["schemas"]["DeploymentFilter"] | null;
+ work_pools?: components["schemas"]["WorkPoolFilter"] | null;
+ /** @default NAME_ASC */
+ sort: components["schemas"]["FlowSort"];
+ /**
+ * Limit
+ * @description Defaults to PREFECT_API_DEFAULT_LIMIT if not provided.
+ */
+ limit?: number;
+ };
+ /** Body_read_all_concurrency_limits_v2_v2_concurrency_limits_filter_post */
+ Body_read_all_concurrency_limits_v2_v2_concurrency_limits_filter_post: {
+ /**
+ * Offset
+ * @default 0
+ */
+ offset: number;
+ /**
+ * Limit
+ * @description Defaults to PREFECT_API_DEFAULT_LIMIT if not provided.
+ */
+ limit?: number;
+ };
+ /** Body_read_artifacts_artifacts_filter_post */
+ Body_read_artifacts_artifacts_filter_post: {
+ /** @default ID_DESC */
+ sort: components["schemas"]["ArtifactSort"];
+ /**
+ * Offset
+ * @default 0
+ */
+ offset: number;
+ artifacts?: components["schemas"]["ArtifactFilter"];
+ flow_runs?: components["schemas"]["FlowRunFilter"];
+ task_runs?: components["schemas"]["TaskRunFilter"];
+ flows?: components["schemas"]["FlowFilter"];
+ deployments?: components["schemas"]["DeploymentFilter"];
+ /**
+ * Limit
+ * @description Defaults to PREFECT_API_DEFAULT_LIMIT if not provided.
+ */
+ limit?: number;
+ };
+ /** Body_read_automations_automations_filter_post */
+ Body_read_automations_automations_filter_post: {
+ /** @default NAME_ASC */
+ sort: components["schemas"]["AutomationSort"];
+ /**
+ * Offset
+ * @default 0
+ */
+ offset: number;
+ automations?: components["schemas"]["AutomationFilter"] | null;
+ /**
+ * Limit
+ * @description Defaults to PREFECT_API_DEFAULT_LIMIT if not provided.
+ */
+ limit?: number;
+ };
+ /** Body_read_block_documents_block_documents_filter_post */
+ Body_read_block_documents_block_documents_filter_post: {
+ block_documents?: components["schemas"]["BlockDocumentFilter"] | null;
+ block_types?: components["schemas"]["BlockTypeFilter"] | null;
+ block_schemas?: components["schemas"]["BlockSchemaFilter"] | null;
+ /**
+ * Include Secrets
+ * @description Whether to include sensitive values in the block document.
+ * @default false
+ */
+ include_secrets: boolean;
+ /** @default NAME_ASC */
+ sort: components["schemas"]["BlockDocumentSort"] | null;
+ /**
+ * Offset
+ * @default 0
+ */
+ offset: number;
+ /**
+ * Limit
+ * @description Defaults to PREFECT_API_DEFAULT_LIMIT if not provided.
+ */
+ limit?: number;
+ };
+ /** Body_read_block_schemas_block_schemas_filter_post */
+ Body_read_block_schemas_block_schemas_filter_post: {
+ block_schemas?: components["schemas"]["BlockSchemaFilter"] | null;
+ /**
+ * Offset
+ * @default 0
+ */
+ offset: number;
+ /**
+ * Limit
+ * @description Defaults to PREFECT_API_DEFAULT_LIMIT if not provided.
+ */
+ limit?: number;
+ };
+ /** Body_read_block_types_block_types_filter_post */
+ Body_read_block_types_block_types_filter_post: {
+ block_types?: components["schemas"]["BlockTypeFilter"] | null;
+ block_schemas?: components["schemas"]["BlockSchemaFilter"] | null;
+ /**
+ * Offset
+ * @default 0
+ */
+ offset: number;
+ /**
+ * Limit
+ * @description Defaults to PREFECT_API_DEFAULT_LIMIT if not provided.
+ */
+ limit?: number;
+ };
+ /** Body_read_concurrency_limits_concurrency_limits_filter_post */
+ Body_read_concurrency_limits_concurrency_limits_filter_post: {
+ /**
+ * Offset
+ * @default 0
+ */
+ offset: number;
+ /**
+ * Limit
+ * @description Defaults to PREFECT_API_DEFAULT_LIMIT if not provided.
+ */
+ limit?: number;
+ };
+ /** Body_read_dashboard_task_run_counts_ui_task_runs_dashboard_counts_post */
+ Body_read_dashboard_task_run_counts_ui_task_runs_dashboard_counts_post: {
+ task_runs: components["schemas"]["TaskRunFilter"];
+ flows?: components["schemas"]["FlowFilter"] | null;
+ flow_runs?: components["schemas"]["FlowRunFilter"] | null;
+ deployments?: components["schemas"]["DeploymentFilter"] | null;
+ work_pools?: components["schemas"]["WorkPoolFilter"] | null;
+ work_queues?: components["schemas"]["WorkQueueFilter"] | null;
+ };
+ /** Body_read_deployments_deployments_filter_post */
+ Body_read_deployments_deployments_filter_post: {
+ /**
+ * Offset
+ * @default 0
+ */
+ offset: number;
+ flows?: components["schemas"]["FlowFilter"];
+ flow_runs?: components["schemas"]["FlowRunFilter"];
+ task_runs?: components["schemas"]["TaskRunFilter"];
+ deployments?: components["schemas"]["DeploymentFilter"];
+ work_pools?: components["schemas"]["WorkPoolFilter"];
+ work_pool_queues?: components["schemas"]["WorkQueueFilter"];
+ /** @default NAME_ASC */
+ sort: components["schemas"]["DeploymentSort"];
+ /**
+ * Limit
+ * @description Defaults to PREFECT_API_DEFAULT_LIMIT if not provided.
+ */
+ limit?: number;
+ };
+ /** Body_read_events_events_filter_post */
+ Body_read_events_events_filter_post: {
+ /** @description Additional optional filter criteria to narrow down the set of Events */
+ filter?: components["schemas"]["EventFilter"] | null;
+ /**
+ * Limit
+ * @description The number of events to return with each page
+ * @default 50
+ */
+ limit: number;
+ };
+ /** Body_read_flow_run_history_ui_flow_runs_history_post */
+ Body_read_flow_run_history_ui_flow_runs_history_post: {
+ /** @default EXPECTED_START_TIME_DESC */
+ sort: components["schemas"]["FlowRunSort"];
+ /**
+ * Limit
+ * @default 1000
+ */
+ limit: number;
+ /**
+ * Offset
+ * @default 0
+ */
+ offset: number;
+ flows?: components["schemas"]["FlowFilter"];
+ flow_runs?: components["schemas"]["FlowRunFilter"];
+ task_runs?: components["schemas"]["TaskRunFilter"];
+ deployments?: components["schemas"]["DeploymentFilter"];
+ work_pools?: components["schemas"]["WorkPoolFilter"];
+ };
+ /** Body_read_flow_run_notification_policies_flow_run_notification_policies_filter_post */
+ Body_read_flow_run_notification_policies_flow_run_notification_policies_filter_post: {
+ flow_run_notification_policy_filter?: components["schemas"]["FlowRunNotificationPolicyFilter"];
+ /**
+ * Offset
+ * @default 0
+ */
+ offset: number;
+ /**
+ * Limit
+ * @description Defaults to PREFECT_API_DEFAULT_LIMIT if not provided.
+ */
+ limit?: number;
+ };
+ /** Body_read_flow_runs_flow_runs_filter_post */
+ Body_read_flow_runs_flow_runs_filter_post: {
+ /** @default ID_DESC */
+ sort: components["schemas"]["FlowRunSort"];
+ /**
+ * Offset
+ * @default 0
+ */
+ offset: number;
+ flows?: components["schemas"]["FlowFilter"] | null;
+ flow_runs?: components["schemas"]["FlowRunFilter"] | null;
+ task_runs?: components["schemas"]["TaskRunFilter"] | null;
+ deployments?: components["schemas"]["DeploymentFilter"] | null;
+ work_pools?: components["schemas"]["WorkPoolFilter"] | null;
+ work_pool_queues?: components["schemas"]["WorkQueueFilter"] | null;
+ /**
+ * Limit
+ * @description Defaults to PREFECT_API_DEFAULT_LIMIT if not provided.
+ */
+ limit?: number;
+ };
+ /** Body_read_flows_flows_filter_post */
+ Body_read_flows_flows_filter_post: {
+ /**
+ * Offset
+ * @default 0
+ */
+ offset: number;
+ flows?: components["schemas"]["FlowFilter"];
+ flow_runs?: components["schemas"]["FlowRunFilter"];
+ task_runs?: components["schemas"]["TaskRunFilter"];
+ deployments?: components["schemas"]["DeploymentFilter"];
+ work_pools?: components["schemas"]["WorkPoolFilter"];
+ /** @default NAME_ASC */
+ sort: components["schemas"]["FlowSort"];
+ /**
+ * Limit
+ * @description Defaults to PREFECT_API_DEFAULT_LIMIT if not provided.
+ */
+ limit?: number;
+ };
+ /** Body_read_latest_artifacts_artifacts_latest_filter_post */
+ Body_read_latest_artifacts_artifacts_latest_filter_post: {
+ /** @default ID_DESC */
+ sort: components["schemas"]["ArtifactCollectionSort"];
+ /**
+ * Offset
+ * @default 0
+ */
+ offset: number;
+ artifacts?: components["schemas"]["ArtifactCollectionFilter"];
+ flow_runs?: components["schemas"]["FlowRunFilter"];
+ task_runs?: components["schemas"]["TaskRunFilter"];
+ flows?: components["schemas"]["FlowFilter"];
+ deployments?: components["schemas"]["DeploymentFilter"];
+ /**
+ * Limit
+ * @description Defaults to PREFECT_API_DEFAULT_LIMIT if not provided.
+ */
+ limit?: number;
+ };
+ /** Body_read_logs_logs_filter_post */
+ Body_read_logs_logs_filter_post: {
+ /**
+ * Offset
+ * @default 0
+ */
+ offset: number;
+ logs?: components["schemas"]["LogFilter"];
+ /** @default TIMESTAMP_ASC */
+ sort: components["schemas"]["LogSort"];
+ /**
+ * Limit
+ * @description Defaults to PREFECT_API_DEFAULT_LIMIT if not provided.
+ */
+ limit?: number;
+ };
+ /** Body_read_saved_searches_saved_searches_filter_post */
+ Body_read_saved_searches_saved_searches_filter_post: {
+ /**
+ * Offset
+ * @default 0
+ */
+ offset: number;
+ /**
+ * Limit
+ * @description Defaults to PREFECT_API_DEFAULT_LIMIT if not provided.
+ */
+ limit?: number;
+ };
+ /** Body_read_task_run_counts_by_state_ui_task_runs_count_post */
+ Body_read_task_run_counts_by_state_ui_task_runs_count_post: {
+ flows?: components["schemas"]["FlowFilter"] | null;
+ flow_runs?: components["schemas"]["FlowRunFilter"] | null;
+ task_runs?: components["schemas"]["TaskRunFilter"] | null;
+ deployments?: components["schemas"]["DeploymentFilter"] | null;
+ };
+ /** Body_read_task_runs_task_runs_filter_post */
+ Body_read_task_runs_task_runs_filter_post: {
+ /** @default ID_DESC */
+ sort: components["schemas"]["TaskRunSort"];
+ /**
+ * Offset
+ * @default 0
+ */
+ offset: number;
+ flows?: components["schemas"]["FlowFilter"] | null;
+ flow_runs?: components["schemas"]["FlowRunFilter"] | null;
+ task_runs?: components["schemas"]["TaskRunFilter"] | null;
+ deployments?: components["schemas"]["DeploymentFilter"] | null;
+ /**
+ * Limit
+ * @description Defaults to PREFECT_API_DEFAULT_LIMIT if not provided.
+ */
+ limit?: number;
+ };
+ /** Body_read_task_workers_task_workers_filter_post */
+ Body_read_task_workers_task_workers_filter_post: {
+ /** @description The task worker filter */
+ task_worker_filter?: components["schemas"]["TaskWorkerFilter"] | null;
+ };
+ /** Body_read_variables_variables_filter_post */
+ Body_read_variables_variables_filter_post: {
+ /**
+ * Offset
+ * @default 0
+ */
+ offset: number;
+ variables?: components["schemas"]["VariableFilter"] | null;
+ /** @default NAME_ASC */
+ sort: components["schemas"]["VariableSort"];
+ /**
+ * Limit
+ * @description Defaults to PREFECT_API_DEFAULT_LIMIT if not provided.
+ */
+ limit?: number;
+ };
+ /** Body_read_work_pools_work_pools_filter_post */
+ Body_read_work_pools_work_pools_filter_post: {
+ work_pools?: components["schemas"]["WorkPoolFilter"] | null;
+ /**
+ * Offset
+ * @default 0
+ */
+ offset: number;
+ /**
+ * Limit
+ * @description Defaults to PREFECT_API_DEFAULT_LIMIT if not provided.
+ */
+ limit?: number;
+ };
+ /** Body_read_work_queue_runs_work_queues__id__get_runs_post */
+ Body_read_work_queue_runs_work_queues__id__get_runs_post: {
+ /**
+ * Scheduled Before
+ * Format: date-time
+ * @description Only flow runs scheduled to start before this time will be returned.
+ */
+ scheduled_before?: string;
+ /**
+ * Agent Id
+ * @description An optional unique identifier for the agent making this query. If provided, the Prefect REST API will track the last time this agent polled the work queue.
+ */
+ agent_id?: string | null;
+ /**
+ * Limit
+ * @description Defaults to PREFECT_API_DEFAULT_LIMIT if not provided.
+ */
+ limit?: number;
+ };
+ /** Body_read_work_queues_work_pools__work_pool_name__queues_filter_post */
+ Body_read_work_queues_work_pools__work_pool_name__queues_filter_post: {
+ work_queues?: components["schemas"]["WorkQueueFilter"];
+ /**
+ * Offset
+ * @default 0
+ */
+ offset: number;
+ /**
+ * Limit
+ * @description Defaults to PREFECT_API_DEFAULT_LIMIT if not provided.
+ */
+ limit?: number;
+ };
+ /** Body_read_work_queues_work_queues_filter_post */
+ Body_read_work_queues_work_queues_filter_post: {
+ /**
+ * Offset
+ * @default 0
+ */
+ offset: number;
+ work_queues?: components["schemas"]["WorkQueueFilter"];
+ /**
+ * Limit
+ * @description Defaults to PREFECT_API_DEFAULT_LIMIT if not provided.
+ */
+ limit?: number;
+ };
+ /** Body_read_workers_work_pools__work_pool_name__workers_filter_post */
+ Body_read_workers_work_pools__work_pool_name__workers_filter_post: {
+ workers?: components["schemas"]["WorkerFilter"] | null;
+ /**
+ * Offset
+ * @default 0
+ */
+ offset: number;
+ /**
+ * Limit
+ * @description Defaults to PREFECT_API_DEFAULT_LIMIT if not provided.
+ */
+ limit?: number;
+ };
+ /** Body_reset_concurrency_limit_by_tag_concurrency_limits_tag__tag__reset_post */
+ Body_reset_concurrency_limit_by_tag_concurrency_limits_tag__tag__reset_post: {
+ /**
+ * Slot Override
+ * @description Manual override for active concurrency limit slots.
+ */
+ slot_override?: string[] | null;
+ };
+ /** Body_resume_flow_run_flow_runs__id__resume_post */
+ Body_resume_flow_run_flow_runs__id__resume_post: {
+ /** Run Input */
+ run_input?: Record | null;
+ };
+ /** Body_schedule_deployment_deployments__id__schedule_post */
+ Body_schedule_deployment_deployments__id__schedule_post: {
+ /**
+ * Start Time
+ * Format: date-time
+ * @description The earliest date to schedule
+ */
+ start_time?: string;
+ /**
+ * End Time
+ * Format: date-time
+ * @description The latest date to schedule
+ */
+ end_time?: string;
+ /**
+ * Min Time
+ * Format: time-delta
+ * @description Runs will be scheduled until at least this long after the `start_time`
+ */
+ min_time?: number;
+ /**
+ * Min Runs
+ * @description The minimum number of runs to schedule
+ */
+ min_runs?: number;
+ /**
+ * Max Runs
+ * @description The maximum number of runs to schedule
+ */
+ max_runs?: number;
+ };
+ /** Body_set_flow_run_state_flow_runs__id__set_state_post */
+ Body_set_flow_run_state_flow_runs__id__set_state_post: {
+ /** @description The intended state. */
+ state: components["schemas"]["StateCreate"];
+ /**
+ * Force
+ * @description If false, orchestration rules will be applied that may alter or prevent the state transition. If True, orchestration rules are not applied.
+ * @default false
+ */
+ force: boolean;
+ };
+ /** Body_set_task_run_state_task_runs__id__set_state_post */
+ Body_set_task_run_state_task_runs__id__set_state_post: {
+ /** @description The intended state. */
+ state: components["schemas"]["StateCreate"];
+ /**
+ * Force
+ * @description If false, orchestration rules will be applied that may alter or prevent the state transition. If True, orchestration rules are not applied.
+ * @default false
+ */
+ force: boolean;
+ };
+ /** Body_task_run_history_task_runs_history_post */
+ Body_task_run_history_task_runs_history_post: {
+ /**
+ * History Start
+ * Format: date-time
+ * @description The history's start time.
+ */
+ history_start: string;
+ /**
+ * History End
+ * Format: date-time
+ * @description The history's end time.
+ */
+ history_end: string;
+ /**
+ * History Interval
+ * Format: time-delta
+ * @description The size of each history interval, in seconds. Must be at least 1 second.
+ */
+ history_interval: number;
+ flows?: components["schemas"]["FlowFilter"];
+ flow_runs?: components["schemas"]["FlowRunFilter"];
+ task_runs?: components["schemas"]["TaskRunFilter"];
+ deployments?: components["schemas"]["DeploymentFilter"];
+ };
+ /** Body_validate_obj_ui_schemas_validate_post */
+ Body_validate_obj_ui_schemas_validate_post: {
+ /** Json Schema */
+ json_schema: Record;
+ /** Values */
+ values: Record;
+ };
+ /** Body_worker_heartbeat_work_pools__work_pool_name__workers_heartbeat_post */
+ Body_worker_heartbeat_work_pools__work_pool_name__workers_heartbeat_post: {
+ /**
+ * Name
+ * @description The worker process name
+ */
+ name: string;
+ /**
+ * Heartbeat Interval Seconds
+ * @description The worker's heartbeat interval in seconds
+ */
+ heartbeat_interval_seconds?: number | null;
+ };
+ /**
+ * CallWebhook
+ * @description Call a webhook when an Automation is triggered.
+ */
+ CallWebhook: {
+ /**
+ * Type
+ * @default call-webhook
+ * @constant
+ * @enum {string}
+ */
+ type: "call-webhook";
+ /**
+ * Block Document Id
+ * Format: uuid
+ * @description The identifier of the webhook block to use
+ */
+ block_document_id: string;
+ /**
+ * Payload
+ * @description An optional templatable payload to send when calling the webhook.
+ * @default
+ */
+ payload: string;
+ };
+ /**
+ * CancelFlowRun
+ * @description Cancels a flow run associated with the trigger
+ */
+ CancelFlowRun: {
+ /**
+ * Type
+ * @default cancel-flow-run
+ * @constant
+ * @enum {string}
+ */
+ type: "cancel-flow-run";
+ };
+ /**
+ * ChangeFlowRunState
+ * @description Changes the state of a flow run associated with the trigger
+ */
+ ChangeFlowRunState: {
+ /**
+ * Type
+ * @default change-flow-run-state
+ * @constant
+ * @enum {string}
+ */
+ type: "change-flow-run-state";
+ /**
+ * Name
+ * @description The name of the state to change the flow run to
+ */
+ name?: string | null;
+ /** @description The type of the state to change the flow run to */
+ state: components["schemas"]["StateType"];
+ /**
+ * Message
+ * @description An optional message to associate with the state change
+ */
+ message?: string | null;
+ };
+ /**
+ * CompoundTrigger
+ * @description A composite trigger that requires some number of triggers to have
+ * fired within the given time period
+ */
+ "CompoundTrigger-Input": {
+ /**
+ * Type
+ * @default compound
+ * @constant
+ * @enum {string}
+ */
+ type: "compound";
+ /**
+ * Id
+ * Format: uuid
+ * @description The unique ID of this trigger
+ */
+ id?: string;
+ /** Triggers */
+ triggers: (
+ | components["schemas"]["EventTrigger"]
+ | components["schemas"]["CompoundTrigger-Input"]
+ | components["schemas"]["SequenceTrigger-Input"]
+ )[];
+ /** Within */
+ within: number | null;
+ /** Require */
+ require: number | ("any" | "all");
+ };
+ /**
+ * CompoundTrigger
+ * @description A composite trigger that requires some number of triggers to have
+ * fired within the given time period
+ */
+ "CompoundTrigger-Output": {
+ /**
+ * Type
+ * @default compound
+ * @constant
+ * @enum {string}
+ */
+ type: "compound";
+ /**
+ * Id
+ * Format: uuid
+ * @description The unique ID of this trigger
+ */
+ id?: string;
+ /** Triggers */
+ triggers: (
+ | components["schemas"]["EventTrigger"]
+ | components["schemas"]["CompoundTrigger-Output"]
+ | components["schemas"]["SequenceTrigger-Output"]
+ )[];
+ /** Within */
+ within: number | null;
+ /** Require */
+ require: number | ("any" | "all");
+ };
+ /**
+ * ConcurrencyLimit
+ * @description An ORM representation of a concurrency limit.
+ */
+ ConcurrencyLimit: {
+ /**
+ * Id
+ * Format: uuid
+ */
+ id?: string;
+ /** Created */
+ created?: string | null;
+ /** Updated */
+ updated?: string | null;
+ /**
+ * Tag
+ * @description A tag the concurrency limit is applied to.
+ */
+ tag: string;
+ /**
+ * Concurrency Limit
+ * @description The concurrency limit.
+ */
+ concurrency_limit: number;
+ /**
+ * Active Slots
+ * @description A list of active run ids using a concurrency slot
+ */
+ active_slots?: string[];
+ };
+ /**
+ * ConcurrencyLimitCreate
+ * @description Data used by the Prefect REST API to create a concurrency limit.
+ */
+ ConcurrencyLimitCreate: {
+ /**
+ * Tag
+ * @description A tag the concurrency limit is applied to.
+ */
+ tag: string;
+ /**
+ * Concurrency Limit
+ * @description The concurrency limit.
+ */
+ concurrency_limit: number;
+ };
+ /**
+ * ConcurrencyLimitStrategy
+ * @description Enumeration of concurrency collision strategies.
+ * @enum {string}
+ */
+ ConcurrencyLimitStrategy: "ENQUEUE" | "CANCEL_NEW";
+ /**
+ * ConcurrencyLimitV2
+ * @description An ORM representation of a v2 concurrency limit.
+ */
+ ConcurrencyLimitV2: {
+ /**
+ * Id
+ * Format: uuid
+ */
+ id?: string;
+ /** Created */
+ created?: string | null;
+ /** Updated */
+ updated?: string | null;
+ /**
+ * Active
+ * @description Whether the concurrency limit is active.
+ * @default true
+ */
+ active: boolean;
+ /**
+ * Name
+ * @description The name of the concurrency limit.
+ */
+ name: string;
+ /**
+ * Limit
+ * @description The concurrency limit.
+ */
+ limit: number;
+ /**
+ * Active Slots
+ * @description The number of active slots.
+ * @default 0
+ */
+ active_slots: number;
+ /**
+ * Denied Slots
+ * @description The number of denied slots.
+ * @default 0
+ */
+ denied_slots: number;
+ /**
+ * Slot Decay Per Second
+ * @description The decay rate for active slots when used as a rate limit.
+ * @default 0
+ */
+ slot_decay_per_second: number;
+ /**
+ * Avg Slot Occupancy Seconds
+ * @description The average amount of time a slot is occupied.
+ * @default 2
+ */
+ avg_slot_occupancy_seconds: number;
+ };
+ /**
+ * ConcurrencyLimitV2Create
+ * @description Data used by the Prefect REST API to create a v2 concurrency limit.
+ */
+ ConcurrencyLimitV2Create: {
+ /**
+ * Active
+ * @description Whether the concurrency limit is active.
+ * @default true
+ */
+ active: boolean;
+ /**
+ * Name
+ * @description The name of the concurrency limit.
+ */
+ name: string;
+ /**
+ * Limit
+ * @description The concurrency limit.
+ */
+ limit: number;
+ /**
+ * Active Slots
+ * @description The number of active slots.
+ * @default 0
+ */
+ active_slots: number;
+ /**
+ * Denied Slots
+ * @description The number of denied slots.
+ * @default 0
+ */
+ denied_slots: number;
+ /**
+ * Slot Decay Per Second
+ * @description The decay rate for active slots when used as a rate limit.
+ * @default 0
+ */
+ slot_decay_per_second: number;
+ };
+ /**
+ * ConcurrencyLimitV2Update
+ * @description Data used by the Prefect REST API to update a v2 concurrency limit.
+ */
+ ConcurrencyLimitV2Update: {
+ /** Active */
+ active?: boolean | null;
+ /** Name */
+ name?: string | null;
+ /** Limit */
+ limit?: number | null;
+ /** Active Slots */
+ active_slots?: number | null;
+ /** Denied Slots */
+ denied_slots?: number | null;
+ /** Slot Decay Per Second */
+ slot_decay_per_second?: number | null;
+ };
+ /**
+ * ConcurrencyOptions
+ * @description Class for storing the concurrency config in database.
+ */
+ ConcurrencyOptions: {
+ collision_strategy: components["schemas"]["ConcurrencyLimitStrategy"];
+ };
+ /**
+ * Constant
+ * @description Represents constant input value to a task run.
+ */
+ Constant: {
+ /**
+ * Input Type
+ * @default constant
+ * @constant
+ * @enum {string}
+ */
+ input_type: "constant";
+ /** Type */
+ type: string;
+ };
+ /** CountByState */
+ CountByState: {
+ /**
+ * Completed
+ * @default 0
+ */
+ COMPLETED: number;
+ /**
+ * Pending
+ * @default 0
+ */
+ PENDING: number;
+ /**
+ * Running
+ * @default 0
+ */
+ RUNNING: number;
+ /**
+ * Failed
+ * @default 0
+ */
+ FAILED: number;
+ /**
+ * Cancelled
+ * @default 0
+ */
+ CANCELLED: number;
+ /**
+ * Crashed
+ * @default 0
+ */
+ CRASHED: number;
+ /**
+ * Paused
+ * @default 0
+ */
+ PAUSED: number;
+ /**
+ * Cancelling
+ * @default 0
+ */
+ CANCELLING: number;
+ /**
+ * Scheduled
+ * @default 0
+ */
+ SCHEDULED: number;
+ };
+ /**
+ * Countable
+ * @enum {string}
+ */
+ Countable: "day" | "time" | "event" | "resource";
+ /** CreatedBy */
+ CreatedBy: {
+ /**
+ * Id
+ * @description The id of the creator of the object.
+ */
+ id?: string | null;
+ /**
+ * Type
+ * @description The type of the creator of the object.
+ */
+ type?: string | null;
+ /**
+ * Display Value
+ * @description The display value for the creator.
+ */
+ display_value?: string | null;
+ };
+ /**
+ * CronSchedule
+ * @description Cron schedule
+ *
+ * NOTE: If the timezone is a DST-observing one, then the schedule will adjust
+ * itself appropriately. Cron's rules for DST are based on schedule times, not
+ * intervals. This means that an hourly cron schedule will fire on every new
+ * schedule hour, not every elapsed hour; for example, when clocks are set back
+ * this will result in a two-hour pause as the schedule will fire *the first
+ * time* 1am is reached and *the first time* 2am is reached, 120 minutes later.
+ * Longer schedules, such as one that fires at 9am every morning, will
+ * automatically adjust for DST.
+ *
+ * Args:
+ * cron (str): a valid cron string
+ * timezone (str): a valid timezone string in IANA tzdata format (for example,
+ * America/New_York).
+ * day_or (bool, optional): Control how croniter handles `day` and `day_of_week`
+ * entries. Defaults to True, matching cron which connects those values using
+ * OR. If the switch is set to False, the values are connected using AND. This
+ * behaves like fcron and enables you to e.g. define a job that executes each
+ * 2nd friday of a month by setting the days of month and the weekday.
+ */
+ CronSchedule: {
+ /** Cron */
+ cron: string;
+ /** Timezone */
+ timezone?: string | null;
+ /**
+ * Day Or
+ * @description Control croniter behavior for handling day and day_of_week entries.
+ * @default true
+ */
+ day_or: boolean;
+ };
+ /** CsrfToken */
+ CsrfToken: {
+ /**
+ * Id
+ * Format: uuid
+ */
+ id?: string;
+ /** Created */
+ created?: string | null;
+ /** Updated */
+ updated?: string | null;
+ /**
+ * Token
+ * @description The CSRF token
+ */
+ token: string;
+ /**
+ * Client
+ * @description The client id associated with the CSRF token
+ */
+ client: string;
+ /**
+ * Expiration
+ * Format: date-time
+ * @description The expiration time of the CSRF token
+ */
+ expiration: string;
+ };
+ /** DependencyResult */
+ DependencyResult: {
+ /**
+ * Id
+ * Format: uuid
+ */
+ id: string;
+ /** Name */
+ name: string;
+ /** Upstream Dependencies */
+ upstream_dependencies: components["schemas"]["TaskRunResult"][];
+ state: components["schemas"]["State"] | null;
+ /** Expected Start Time */
+ expected_start_time: string | null;
+ /** Start Time */
+ start_time: string | null;
+ /** End Time */
+ end_time: string | null;
+ /** Total Run Time */
+ total_run_time: number | null;
+ /** Estimated Run Time */
+ estimated_run_time: number | null;
+ /** Untrackable Result */
+ untrackable_result: boolean;
+ };
+ /**
+ * DeploymentCreate
+ * @description Data used by the Prefect REST API to create a deployment.
+ */
+ DeploymentCreate: {
+ /**
+ * Name
+ * @description The name of the deployment.
+ */
+ name: string;
+ /**
+ * Flow Id
+ * Format: uuid
+ * @description The ID of the flow associated with the deployment.
+ */
+ flow_id: string;
+ /**
+ * Paused
+ * @description Whether or not the deployment is paused.
+ * @default false
+ */
+ paused: boolean;
+ /**
+ * Schedules
+ * @description A list of schedules for the deployment.
+ */
+ schedules?: components["schemas"]["DeploymentScheduleCreate"][];
+ /**
+ * Concurrency Limit
+ * @description The deployment's concurrency limit.
+ */
+ concurrency_limit?: number | null;
+ /** @description The deployment's concurrency options. */
+ concurrency_options?: components["schemas"]["ConcurrencyOptions"] | null;
+ /**
+ * Enforce Parameter Schema
+ * @description Whether or not the deployment should enforce the parameter schema.
+ * @default true
+ */
+ enforce_parameter_schema: boolean;
+ /**
+ * Parameter Openapi Schema
+ * @description The parameter schema of the flow, including defaults.
+ */
+ parameter_openapi_schema?: Record | null;
+ /**
+ * Parameters
+ * @description Parameters for flow runs scheduled by the deployment.
+ */
+ parameters?: Record;
+ /**
+ * Tags
+ * @description A list of deployment tags.
+ */
+ tags?: string[];
+ /** Pull Steps */
+ pull_steps?: Record[] | null;
+ /** Work Queue Name */
+ work_queue_name?: string | null;
+ /**
+ * Work Pool Name
+ * @description The name of the deployment's work pool.
+ */
+ work_pool_name?: string | null;
+ /** Storage Document Id */
+ storage_document_id?: string | null;
+ /** Infrastructure Document Id */
+ infrastructure_document_id?: string | null;
+ /** Description */
+ description?: string | null;
+ /** Path */
+ path?: string | null;
+ /** Version */
+ version?: string | null;
+ /** Entrypoint */
+ entrypoint?: string | null;
+ /**
+ * Job Variables
+ * @description Overrides for the flow's infrastructure configuration.
+ */
+ job_variables?: Record;
+ };
+ /**
+ * DeploymentFilter
+ * @description Filter for deployments. Only deployments matching all criteria will be returned.
+ */
+ DeploymentFilter: {
+ /**
+ * @description Operator for combining filter criteria. Defaults to 'and_'.
+ * @default and_
+ */
+ operator: components["schemas"]["Operator"];
+ /** @description Filter criteria for `Deployment.id` */
+ id?: components["schemas"]["DeploymentFilterId"] | null;
+ /** @description Filter criteria for `Deployment.name` */
+ name?: components["schemas"]["DeploymentFilterName"] | null;
+ /** @description Filter criteria for `Deployment.name` or `Flow.name` */
+ flow_or_deployment_name?:
+ | components["schemas"]["DeploymentOrFlowNameFilter"]
+ | null;
+ /** @description Filter criteria for `Deployment.paused` */
+ paused?: components["schemas"]["DeploymentFilterPaused"] | null;
+ /** @description Filter criteria for `Deployment.tags` */
+ tags?: components["schemas"]["DeploymentFilterTags"] | null;
+ /** @description Filter criteria for `Deployment.work_queue_name` */
+ work_queue_name?:
+ | components["schemas"]["DeploymentFilterWorkQueueName"]
+ | null;
+ /**
+ * @deprecated
+ * @description DEPRECATED: Prefer `Deployment.concurrency_limit_id` over `Deployment.concurrency_limit`. If provided, will be ignored for backwards-compatibility. Will be removed after December 2024.
+ */
+ concurrency_limit?:
+ | components["schemas"]["DeploymentFilterConcurrencyLimit"]
+ | null;
+ };
+ /**
+ * DeploymentFilterConcurrencyLimit
+ * @description DEPRECATED: Prefer `Deployment.concurrency_limit_id` over `Deployment.concurrency_limit`.
+ */
+ DeploymentFilterConcurrencyLimit: {
+ /**
+ * Ge
+ * @description Only include deployments with a concurrency limit greater than or equal to this value
+ */
+ ge_?: number | null;
+ /**
+ * Le
+ * @description Only include deployments with a concurrency limit less than or equal to this value
+ */
+ le_?: number | null;
+ /**
+ * Is Null
+ * @description If true, only include deployments without a concurrency limit
+ */
+ is_null_?: boolean | null;
+ };
+ /**
+ * DeploymentFilterId
+ * @description Filter by `Deployment.id`.
+ */
+ DeploymentFilterId: {
+ /**
+ * Any
+ * @description A list of deployment ids to include
+ */
+ any_?: string[] | null;
+ };
+ /**
+ * DeploymentFilterName
+ * @description Filter by `Deployment.name`.
+ */
+ DeploymentFilterName: {
+ /**
+ * Any
+ * @description A list of deployment names to include
+ */
+ any_?: string[] | null;
+ /**
+ * Like
+ * @description A case-insensitive partial match. For example, passing 'marvin' will match 'marvin', 'sad-Marvin', and 'marvin-robot'.
+ */
+ like_?: string | null;
+ };
+ /**
+ * DeploymentFilterPaused
+ * @description Filter by `Deployment.paused`.
+ */
+ DeploymentFilterPaused: {
+ /**
+ * Eq
+ * @description Only returns where deployment is/is not paused
+ */
+ eq_?: boolean | null;
+ };
+ /**
+ * DeploymentFilterTags
+ * @description Filter by `Deployment.tags`.
+ */
+ DeploymentFilterTags: {
+ /**
+ * @description Operator for combining filter criteria. Defaults to 'and_'.
+ * @default and_
+ */
+ operator: components["schemas"]["Operator"];
+ /**
+ * All
+ * @description A list of tags. Deployments will be returned only if their tags are a superset of the list
+ */
+ all_?: string[] | null;
+ /**
+ * Is Null
+ * @description If true, only include deployments without tags
+ */
+ is_null_?: boolean | null;
+ };
+ /**
+ * DeploymentFilterWorkQueueName
+ * @description Filter by `Deployment.work_queue_name`.
+ */
+ DeploymentFilterWorkQueueName: {
+ /**
+ * Any
+ * @description A list of work queue names to include
+ */
+ any_?: string[] | null;
+ };
+ /**
+ * DeploymentFlowRunCreate
+ * @description Data used by the Prefect REST API to create a flow run from a deployment.
+ */
+ DeploymentFlowRunCreate: {
+ /** @description The state of the flow run to create */
+ state?: components["schemas"]["StateCreate"] | null;
+ /**
+ * Name
+ * @description The name of the flow run. Defaults to a random slug if not specified.
+ */
+ name?: string;
+ /** Parameters */
+ parameters?: Record;
+ /**
+ * Enforce Parameter Schema
+ * @description Whether or not to enforce the parameter schema on this run.
+ */
+ enforce_parameter_schema?: boolean | null;
+ /** Context */
+ context?: Record;
+ /** Infrastructure Document Id */
+ infrastructure_document_id?: string | null;
+ /** @description The empirical policy for the flow run. */
+ empirical_policy?: components["schemas"]["FlowRunPolicy"];
+ /**
+ * Tags
+ * @description A list of tags for the flow run.
+ */
+ tags?: string[];
+ /**
+ * Idempotency Key
+ * @description An optional idempotency key. If a flow run with the same idempotency key has already been created, the existing flow run will be returned.
+ */
+ idempotency_key?: string | null;
+ /** Parent Task Run Id */
+ parent_task_run_id?: string | null;
+ /** Work Queue Name */
+ work_queue_name?: string | null;
+ /** Job Variables */
+ job_variables?: Record | null;
+ };
+ /**
+ * DeploymentOrFlowNameFilter
+ * @description Filter by `Deployment.name` or `Flow.name` with a single input string for ilike filtering.
+ */
+ DeploymentOrFlowNameFilter: {
+ /**
+ * Like
+ * @description A case-insensitive partial match on deployment or flow names. For example, passing 'example' might match deployments or flows with 'example' in their names.
+ */
+ like_?: string | null;
+ };
+ /** DeploymentPaginationResponse */
+ DeploymentPaginationResponse: {
+ /** Results */
+ results: components["schemas"]["DeploymentResponse"][];
+ /** Count */
+ count: number;
+ /** Limit */
+ limit: number;
+ /** Pages */
+ pages: number;
+ /** Page */
+ page: number;
+ };
+ /** DeploymentResponse */
+ DeploymentResponse: {
+ /**
+ * Id
+ * Format: uuid
+ */
+ id?: string;
+ /** Created */
+ created?: string | null;
+ /** Updated */
+ updated?: string | null;
+ /**
+ * Name
+ * @description The name of the deployment.
+ */
+ name: string;
+ /**
+ * Version
+ * @description An optional version for the deployment.
+ */
+ version?: string | null;
+ /**
+ * Description
+ * @description A description for the deployment.
+ */
+ description?: string | null;
+ /**
+ * Flow Id
+ * Format: uuid
+ * @description The flow id associated with the deployment.
+ */
+ flow_id: string;
+ /**
+ * Paused
+ * @description Whether or not the deployment is paused.
+ * @default false
+ */
+ paused: boolean;
+ /**
+ * Schedules
+ * @description A list of schedules for the deployment.
+ */
+ schedules?: components["schemas"]["DeploymentSchedule"][];
+ /**
+ * Concurrency Limit
+ * @deprecated
+ * @description DEPRECATED: Prefer `global_concurrency_limit`. Will always be None for backwards compatibility. Will be removed after December 2024.
+ */
+ concurrency_limit?: number | null;
+ /** @description The global concurrency limit object for enforcing the maximum number of flow runs that can be active at once. */
+ global_concurrency_limit?:
+ | components["schemas"]["GlobalConcurrencyLimitResponse"]
+ | null;
+ /** @description The concurrency options for the deployment. */
+ concurrency_options?: components["schemas"]["ConcurrencyOptions"] | null;
+ /**
+ * Job Variables
+ * @description Overrides to apply to the base infrastructure block at runtime.
+ */
+ job_variables?: Record;
+ /**
+ * Parameters
+ * @description Parameters for flow runs scheduled by the deployment.
+ */
+ parameters?: Record;
+ /**
+ * Tags
+ * @description A list of tags for the deployment
+ */
+ tags?: string[];
+ /**
+ * Work Queue Name
+ * @description The work queue for the deployment. If no work queue is set, work will not be scheduled.
+ */
+ work_queue_name?: string | null;
+ /**
+ * Last Polled
+ * @description The last time the deployment was polled for status updates.
+ */
+ last_polled?: string | null;
+ /**
+ * Parameter Openapi Schema
+ * @description The parameter schema of the flow, including defaults.
+ */
+ parameter_openapi_schema?: Record | null;
+ /**
+ * Path
+ * @description The path to the working directory for the workflow, relative to remote storage or an absolute path.
+ */
+ path?: string | null;
+ /**
+ * Pull Steps
+ * @description Pull steps for cloning and running this deployment.
+ */
+ pull_steps?: Record[] | null;
+ /**
+ * Entrypoint
+ * @description The path to the entrypoint for the workflow, relative to the `path`.
+ */
+ entrypoint?: string | null;
+ /**
+ * Storage Document Id
+ * @description The block document defining storage used for this flow.
+ */
+ storage_document_id?: string | null;
+ /**
+ * Infrastructure Document Id
+ * @description The block document defining infrastructure to use for flow runs.
+ */
+ infrastructure_document_id?: string | null;
+ /** @description Optional information about the creator of this deployment. */
+ created_by?: components["schemas"]["CreatedBy"] | null;
+ /** @description Optional information about the updater of this deployment. */
+ updated_by?: components["schemas"]["UpdatedBy"] | null;
+ /**
+ * Work Pool Name
+ * @description The name of the deployment's work pool.
+ */
+ work_pool_name?: string | null;
+ /**
+ * @description Whether the deployment is ready to run flows.
+ * @default NOT_READY
+ */
+ status: components["schemas"]["DeploymentStatus"] | null;
+ /**
+ * Enforce Parameter Schema
+ * @description Whether or not the deployment should enforce the parameter schema.
+ * @default true
+ */
+ enforce_parameter_schema: boolean;
+ };
+ /** DeploymentSchedule */
+ DeploymentSchedule: {
+ /**
+ * Id
+ * Format: uuid
+ */
+ id?: string;
+ /** Created */
+ created?: string | null;
+ /** Updated */
+ updated?: string | null;
+ /**
+ * Deployment Id
+ * @description The deployment id associated with this schedule.
+ */
+ deployment_id?: string | null;
+ /**
+ * Schedule
+ * @description The schedule for the deployment.
+ */
+ schedule:
+ | components["schemas"]["IntervalSchedule"]
+ | components["schemas"]["CronSchedule"]
+ | components["schemas"]["RRuleSchedule"];
+ /**
+ * Active
+ * @description Whether or not the schedule is active.
+ * @default true
+ */
+ active: boolean;
+ /**
+ * Max Scheduled Runs
+ * @description The maximum number of scheduled runs for the schedule.
+ */
+ max_scheduled_runs?: number | null;
+ };
+ /** DeploymentScheduleCreate */
+ DeploymentScheduleCreate: {
+ /**
+ * Active
+ * @description Whether or not the schedule is active.
+ * @default true
+ */
+ active: boolean;
+ /**
+ * Schedule
+ * @description The schedule for the deployment.
+ */
+ schedule:
+ | components["schemas"]["IntervalSchedule"]
+ | components["schemas"]["CronSchedule"]
+ | components["schemas"]["RRuleSchedule"];
+ /**
+ * Max Scheduled Runs
+ * @description The maximum number of scheduled runs for the schedule.
+ */
+ max_scheduled_runs?: number | null;
+ };
+ /** DeploymentScheduleUpdate */
+ DeploymentScheduleUpdate: {
+ /**
+ * Active
+ * @description Whether or not the schedule is active.
+ */
+ active?: boolean | null;
+ /**
+ * Schedule
+ * @description The schedule for the deployment.
+ */
+ schedule?:
+ | components["schemas"]["IntervalSchedule"]
+ | components["schemas"]["CronSchedule"]
+ | components["schemas"]["RRuleSchedule"]
+ | null;
+ /**
+ * Max Scheduled Runs
+ * @description The maximum number of scheduled runs for the schedule.
+ */
+ max_scheduled_runs?: number | null;
+ };
+ /**
+ * DeploymentSort
+ * @description Defines deployment sorting options.
+ * @enum {string}
+ */
+ DeploymentSort: "CREATED_DESC" | "UPDATED_DESC" | "NAME_ASC" | "NAME_DESC";
+ /**
+ * DeploymentStatus
+ * @description Enumeration of deployment statuses.
+ * @enum {string}
+ */
+ DeploymentStatus: "READY" | "NOT_READY";
+ /**
+ * DeploymentUpdate
+ * @description Data used by the Prefect REST API to update a deployment.
+ */
+ DeploymentUpdate: {
+ /** Version */
+ version?: string | null;
+ /** Description */
+ description?: string | null;
+ /**
+ * Paused
+ * @description Whether or not the deployment is paused.
+ * @default false
+ */
+ paused: boolean;
+ /**
+ * Schedules
+ * @description A list of schedules for the deployment.
+ */
+ schedules?: components["schemas"]["DeploymentScheduleCreate"][];
+ /**
+ * Concurrency Limit
+ * @description The deployment's concurrency limit.
+ */
+ concurrency_limit?: number | null;
+ /** @description The deployment's concurrency options. */
+ concurrency_options?: components["schemas"]["ConcurrencyOptions"] | null;
+ /**
+ * Parameters
+ * @description Parameters for flow runs scheduled by the deployment.
+ */
+ parameters?: Record | null;
+ /**
+ * Tags
+ * @description A list of deployment tags.
+ */
+ tags?: string[];
+ /** Work Queue Name */
+ work_queue_name?: string | null;
+ /**
+ * Work Pool Name
+ * @description The name of the deployment's work pool.
+ */
+ work_pool_name?: string | null;
+ /** Path */
+ path?: string | null;
+ /**
+ * Job Variables
+ * @description Overrides for the flow's infrastructure configuration.
+ */
+ job_variables?: Record | null;
+ /** Entrypoint */
+ entrypoint?: string | null;
+ /** Storage Document Id */
+ storage_document_id?: string | null;
+ /** Infrastructure Document Id */
+ infrastructure_document_id?: string | null;
+ /**
+ * Enforce Parameter Schema
+ * @description Whether or not the deployment should enforce the parameter schema.
+ */
+ enforce_parameter_schema?: boolean | null;
+ };
+ /**
+ * DoNothing
+ * @description Do nothing when an Automation is triggered
+ */
+ DoNothing: {
+ /**
+ * Type
+ * @default do-nothing
+ * @constant
+ * @enum {string}
+ */
+ type: "do-nothing";
+ };
+ /** Edge */
+ Edge: {
+ /**
+ * Id
+ * Format: uuid
+ */
+ id: string;
+ };
+ /**
+ * Event
+ * @description The client-side view of an event that has happened to a Resource
+ */
+ Event: {
+ /**
+ * Occurred
+ * Format: date-time
+ * @description When the event happened from the sender's perspective
+ */
+ occurred: string;
+ /**
+ * Event
+ * @description The name of the event that happened
+ */
+ event: string;
+ /** @description The primary Resource this event concerns */
+ resource: components["schemas"]["Resource"];
+ /**
+ * Related
+ * @description A list of additional Resources involved in this event
+ */
+ related?: components["schemas"]["RelatedResource"][];
+ /**
+ * Payload
+ * @description An open-ended set of data describing what happened
+ */
+ payload?: Record;
+ /**
+ * Id
+ * Format: uuid
+ * @description The client-provided identifier of this event
+ */
+ id: string;
+ /**
+ * Follows
+ * @description The ID of an event that is known to have occurred prior to this one. If set, this may be used to establish a more precise ordering of causally-related events when they occur close enough together in time that the system may receive them out-of-order.
+ */
+ follows?: string | null;
+ };
+ /** EventAnyResourceFilter */
+ EventAnyResourceFilter: {
+ /**
+ * Id
+ * @description Only include events for resources with these IDs
+ */
+ id?: string[] | null;
+ /**
+ * Id Prefix
+ * @description Only include events for resources with IDs starting with these prefixes
+ */
+ id_prefix?: string[] | null;
+ /** @description Only include events for related resources with these labels */
+ labels?: components["schemas"]["ResourceSpecification"] | null;
+ };
+ /**
+ * EventCount
+ * @description The count of events with the given filter value
+ */
+ EventCount: {
+ /**
+ * Value
+ * @description The value to use for filtering
+ */
+ value: string;
+ /**
+ * Label
+ * @description The value to display for this count
+ */
+ label: string;
+ /**
+ * Count
+ * @description The count of matching events
+ */
+ count: number;
+ /**
+ * Start Time
+ * Format: date-time
+ * @description The start time of this group of events
+ */
+ start_time: string;
+ /**
+ * End Time
+ * Format: date-time
+ * @description The end time of this group of events
+ */
+ end_time: string;
+ };
+ /** EventFilter */
+ EventFilter: {
+ /** @description Filter criteria for when the events occurred */
+ occurred?: components["schemas"]["EventOccurredFilter"];
+ /** @description Filter criteria for the event name */
+ event?: components["schemas"]["EventNameFilter"] | null;
+ /** @description Filter criteria for any resource involved in the event */
+ any_resource?: components["schemas"]["EventAnyResourceFilter"] | null;
+ /** @description Filter criteria for the resource of the event */
+ resource?: components["schemas"]["EventResourceFilter"] | null;
+ /** @description Filter criteria for the related resources of the event */
+ related?: components["schemas"]["EventRelatedFilter"] | null;
+ /** @description Filter criteria for the events' ID */
+ id?: components["schemas"]["EventIDFilter"];
+ /**
+ * @description The order to return filtered events
+ * @default DESC
+ */
+ order: components["schemas"]["EventOrder"];
+ };
+ /** EventIDFilter */
+ EventIDFilter: {
+ /**
+ * Id
+ * @description Only include events with one of these IDs
+ */
+ id?: string[] | null;
+ };
+ /** EventNameFilter */
+ EventNameFilter: {
+ /**
+ * Prefix
+ * @description Only include events matching one of these prefixes
+ */
+ prefix?: string[] | null;
+ /**
+ * Exclude Prefix
+ * @description Exclude events matching one of these prefixes
+ */
+ exclude_prefix?: string[] | null;
+ /**
+ * Name
+ * @description Only include events matching one of these names exactly
+ */
+ name?: string[] | null;
+ /**
+ * Exclude Name
+ * @description Exclude events matching one of these names exactly
+ */
+ exclude_name?: string[] | null;
+ };
+ /** EventOccurredFilter */
+ EventOccurredFilter: {
+ /**
+ * Since
+ * Format: date-time
+ * @description Only include events after this time (inclusive)
+ */
+ since?: string;
+ /**
+ * Until
+ * Format: date-time
+ * @description Only include events prior to this time (inclusive)
+ */
+ until?: string;
+ };
+ /**
+ * EventOrder
+ * @enum {string}
+ */
+ EventOrder: "ASC" | "DESC";
+ /**
+ * EventPage
+ * @description A single page of events returned from the API, with an optional link to the
+ * next page of results
+ */
+ EventPage: {
+ /**
+ * Events
+ * @description The Events matching the query
+ */
+ events: components["schemas"]["ReceivedEvent"][];
+ /**
+ * Total
+ * @description The total number of matching Events
+ */
+ total: number;
+ /**
+ * Next Page
+ * @description The URL for the next page of results, if there are more
+ */
+ next_page: string | null;
+ };
+ /** EventRelatedFilter */
+ EventRelatedFilter: {
+ /**
+ * Id
+ * @description Only include events for related resources with these IDs
+ */
+ id?: string[] | null;
+ /**
+ * Role
+ * @description Only include events for related resources in these roles
+ */
+ role?: string[] | null;
+ /**
+ * Resources In Roles
+ * @description Only include events with specific related resources in specific roles
+ */
+ resources_in_roles?: [string, string][] | null;
+ /** @description Only include events for related resources with these labels */
+ labels?: components["schemas"]["ResourceSpecification"] | null;
+ };
+ /** EventResourceFilter */
+ EventResourceFilter: {
+ /**
+ * Id
+ * @description Only include events for resources with these IDs
+ */
+ id?: string[] | null;
+ /**
+ * Id Prefix
+ * @description Only include events for resources with IDs starting with these prefixes.
+ */
+ id_prefix?: string[] | null;
+ /** @description Only include events for resources with these labels */
+ labels?: components["schemas"]["ResourceSpecification"] | null;
+ /**
+ * Distinct
+ * @description Only include events for distinct resources
+ * @default false
+ */
+ distinct: boolean;
+ };
+ /**
+ * EventTrigger
+ * @description A trigger that fires based on the presence or absence of events within a given
+ * period of time.
+ */
+ EventTrigger: {
+ /**
+ * Type
+ * @default event
+ * @constant
+ * @enum {string}
+ */
+ type: "event";
+ /**
+ * Id
+ * Format: uuid
+ * @description The unique ID of this trigger
+ */
+ id?: string;
+ /** @description Labels for resources which this trigger will match. */
+ match?: components["schemas"]["ResourceSpecification"];
+ /** @description Labels for related resources which this trigger will match. */
+ match_related?: components["schemas"]["ResourceSpecification"];
+ /**
+ * After
+ * @description The event(s) which must first been seen to fire this trigger. If empty, then fire this trigger immediately. Events may include trailing wildcards, like `prefect.flow-run.*`
+ */
+ after?: string[];
+ /**
+ * Expect
+ * @description The event(s) this trigger is expecting to see. If empty, this trigger will match any event. Events may include trailing wildcards, like `prefect.flow-run.*`
+ */
+ expect?: string[];
+ /**
+ * For Each
+ * @description Evaluate the trigger separately for each distinct value of these labels on the resource. By default, labels refer to the primary resource of the triggering event. You may also refer to labels from related resources by specifying `related::`. This will use the value of that label for the first related resource in that role. For example, `"for_each": ["related:flow:prefect.resource.id"]` would evaluate the trigger for each flow.
+ */
+ for_each?: string[];
+ /**
+ * Posture
+ * @description The posture of this trigger, either Reactive or Proactive. Reactive triggers respond to the _presence_ of the expected events, while Proactive triggers respond to the _absence_ of those expected events.
+ * @enum {string}
+ */
+ posture: "Reactive" | "Proactive";
+ /**
+ * Threshold
+ * @description The number of events required for this trigger to fire (for Reactive triggers), or the number of events expected (for Proactive triggers)
+ * @default 1
+ */
+ threshold: number;
+ /**
+ * Within
+ * @description The time period over which the events must occur. For Reactive triggers, this may be as low as 0 seconds, but must be at least 10 seconds for Proactive triggers
+ * @default 0
+ */
+ within: number;
+ };
+ /**
+ * Flow
+ * @description An ORM representation of flow data.
+ */
+ Flow: {
+ /**
+ * Id
+ * Format: uuid
+ */
+ id?: string;
+ /** Created */
+ created?: string | null;
+ /** Updated */
+ updated?: string | null;
+ /**
+ * Name
+ * @description The name of the flow
+ */
+ name: string;
+ /**
+ * Tags
+ * @description A list of flow tags
+ */
+ tags?: string[];
+ };
+ /**
+ * FlowCreate
+ * @description Data used by the Prefect REST API to create a flow.
+ */
+ FlowCreate: {
+ /**
+ * Name
+ * @description The name of the flow
+ */
+ name: string;
+ /**
+ * Tags
+ * @description A list of flow tags
+ */
+ tags?: string[];
+ };
+ /**
+ * FlowFilter
+ * @description Filter for flows. Only flows matching all criteria will be returned.
+ */
+ FlowFilter: {
+ /**
+ * @description Operator for combining filter criteria. Defaults to 'and_'.
+ * @default and_
+ */
+ operator: components["schemas"]["Operator"];
+ /** @description Filter criteria for `Flow.id` */
+ id?: components["schemas"]["FlowFilterId"] | null;
+ /** @description Filter criteria for Flow deployments */
+ deployment?: components["schemas"]["FlowFilterDeployment"] | null;
+ /** @description Filter criteria for `Flow.name` */
+ name?: components["schemas"]["FlowFilterName"] | null;
+ /** @description Filter criteria for `Flow.tags` */
+ tags?: components["schemas"]["FlowFilterTags"] | null;
+ };
+ /**
+ * FlowFilterDeployment
+ * @description Filter by flows by deployment
+ */
+ FlowFilterDeployment: {
+ /**
+ * @description Operator for combining filter criteria. Defaults to 'and_'.
+ * @default and_
+ */
+ operator: components["schemas"]["Operator"];
+ /**
+ * Is Null
+ * @description If true, only include flows without deployments
+ */
+ is_null_?: boolean | null;
+ };
+ /**
+ * FlowFilterId
+ * @description Filter by `Flow.id`.
+ */
+ FlowFilterId: {
+ /**
+ * Any
+ * @description A list of flow ids to include
+ */
+ any_?: string[] | null;
+ };
+ /**
+ * FlowFilterName
+ * @description Filter by `Flow.name`.
+ */
+ FlowFilterName: {
+ /**
+ * Any
+ * @description A list of flow names to include
+ */
+ any_?: string[] | null;
+ /**
+ * Like
+ * @description A case-insensitive partial match. For example, passing 'marvin' will match 'marvin', 'sad-Marvin', and 'marvin-robot'.
+ */
+ like_?: string | null;
+ };
+ /**
+ * FlowFilterTags
+ * @description Filter by `Flow.tags`.
+ */
+ FlowFilterTags: {
+ /**
+ * @description Operator for combining filter criteria. Defaults to 'and_'.
+ * @default and_
+ */
+ operator: components["schemas"]["Operator"];
+ /**
+ * All
+ * @description A list of tags. Flows will be returned only if their tags are a superset of the list
+ */
+ all_?: string[] | null;
+ /**
+ * Is Null
+ * @description If true, only include flows without tags
+ */
+ is_null_?: boolean | null;
+ };
+ /** FlowPaginationResponse */
+ FlowPaginationResponse: {
+ /** Results */
+ results: components["schemas"]["Flow"][];
+ /** Count */
+ count: number;
+ /** Limit */
+ limit: number;
+ /** Pages */
+ pages: number;
+ /** Page */
+ page: number;
+ };
+ /**
+ * FlowRun
+ * @description An ORM representation of flow run data.
+ */
+ FlowRun: {
+ /**
+ * Id
+ * Format: uuid
+ */
+ id?: string;
+ /** Created */
+ created?: string | null;
+ /** Updated */
+ updated?: string | null;
+ /**
+ * Name
+ * @description The name of the flow run. Defaults to a random slug if not specified.
+ */
+ name?: string;
+ /**
+ * Flow Id
+ * Format: uuid
+ * @description The id of the flow being run.
+ */
+ flow_id: string;
+ /**
+ * State Id
+ * @description The id of the flow run's current state.
+ */
+ state_id?: string | null;
+ /**
+ * Deployment Id
+ * @description The id of the deployment associated with this flow run, if available.
+ */
+ deployment_id?: string | null;
+ /**
+ * Deployment Version
+ * @description The version of the deployment associated with this flow run.
+ */
+ deployment_version?: string | null;
+ /**
+ * Work Queue Name
+ * @description The work queue that handled this flow run.
+ */
+ work_queue_name?: string | null;
+ /**
+ * Flow Version
+ * @description The version of the flow executed in this flow run.
+ */
+ flow_version?: string | null;
+ /**
+ * Parameters
+ * @description Parameters for the flow run.
+ */
+ parameters?: Record;
+ /**
+ * Idempotency Key
+ * @description An optional idempotency key for the flow run. Used to ensure the same flow run is not created multiple times.
+ */
+ idempotency_key?: string | null;
+ /**
+ * Context
+ * @description Additional context for the flow run.
+ */
+ context?: Record;
+ empirical_policy?: components["schemas"]["FlowRunPolicy"];
+ /**
+ * Tags
+ * @description A list of tags on the flow run
+ */
+ tags?: string[];
+ /**
+ * Parent Task Run Id
+ * @description If the flow run is a subflow, the id of the 'dummy' task in the parent flow used to track subflow state.
+ */
+ parent_task_run_id?: string | null;
+ /** @description The type of the current flow run state. */
+ state_type?: components["schemas"]["StateType"] | null;
+ /**
+ * State Name
+ * @description The name of the current flow run state.
+ */
+ state_name?: string | null;
+ /**
+ * Run Count
+ * @description The number of times the flow run was executed.
+ * @default 0
+ */
+ run_count: number;
+ /**
+ * Expected Start Time
+ * @description The flow run's expected start time.
+ */
+ expected_start_time?: string | null;
+ /**
+ * Next Scheduled Start Time
+ * @description The next time the flow run is scheduled to start.
+ */
+ next_scheduled_start_time?: string | null;
+ /**
+ * Start Time
+ * @description The actual start time.
+ */
+ start_time?: string | null;
+ /**
+ * End Time
+ * @description The actual end time.
+ */
+ end_time?: string | null;
+ /**
+ * Total Run Time
+ * @description Total run time. If the flow run was executed multiple times, the time of each run will be summed.
+ * @default 0
+ */
+ total_run_time: number;
+ /**
+ * Estimated Run Time
+ * @description A real-time estimate of the total run time.
+ * @default 0
+ */
+ estimated_run_time: number;
+ /**
+ * Estimated Start Time Delta
+ * @description The difference between actual and expected start time.
+ * @default 0
+ */
+ estimated_start_time_delta: number;
+ /**
+ * Auto Scheduled
+ * @description Whether or not the flow run was automatically scheduled.
+ * @default false
+ */
+ auto_scheduled: boolean;
+ /**
+ * Infrastructure Document Id
+ * @description The block document defining infrastructure to use this flow run.
+ */
+ infrastructure_document_id?: string | null;
+ /**
+ * Infrastructure Pid
+ * @description The id of the flow run as returned by an infrastructure block.
+ */
+ infrastructure_pid?: string | null;
+ /** @description Optional information about the creator of this flow run. */
+ created_by?: components["schemas"]["CreatedBy"] | null;
+ /**
+ * Work Queue Id
+ * @description The id of the run's work pool queue.
+ */
+ work_queue_id?: string | null;
+ /** @description The current state of the flow run. */
+ state?: components["schemas"]["State"] | null;
+ /**
+ * Job Variables
+ * @description Variables used as overrides in the base job template
+ */
+ job_variables?: Record | null;
+ };
+ /**
+ * FlowRunCreate
+ * @description Data used by the Prefect REST API to create a flow run.
+ */
+ FlowRunCreate: {
+ /** @description The state of the flow run to create */
+ state?: components["schemas"]["StateCreate"] | null;
+ /**
+ * Name
+ * @description The name of the flow run. Defaults to a random slug if not specified.
+ */
+ name?: string;
+ /**
+ * Flow Id
+ * Format: uuid
+ * @description The id of the flow being run.
+ */
+ flow_id: string;
+ /**
+ * Flow Version
+ * @description The version of the flow being run.
+ */
+ flow_version?: string | null;
+ /** Parameters */
+ parameters?: Record;
+ /**
+ * Context
+ * @description The context of the flow run.
+ */
+ context?: Record;
+ /** Parent Task Run Id */
+ parent_task_run_id?: string | null;
+ /** Infrastructure Document Id */
+ infrastructure_document_id?: string | null;
+ /** @description The empirical policy for the flow run. */
+ empirical_policy?: components["schemas"]["FlowRunPolicy"];
+ /**
+ * Tags
+ * @description A list of tags for the flow run.
+ */
+ tags?: string[];
+ /**
+ * Idempotency Key
+ * @description An optional idempotency key. If a flow run with the same idempotency key has already been created, the existing flow run will be returned.
+ */
+ idempotency_key?: string | null;
+ /**
+ * Deployment Id
+ * @deprecated
+ * @description DEPRECATED: The id of the deployment associated with this flow run, if available.
+ */
+ deployment_id?: string | null;
+ };
+ /**
+ * FlowRunFilter
+ * @description Filter flow runs. Only flow runs matching all criteria will be returned
+ */
+ FlowRunFilter: {
+ /**
+ * @description Operator for combining filter criteria. Defaults to 'and_'.
+ * @default and_
+ */
+ operator: components["schemas"]["Operator"];
+ /** @description Filter criteria for `FlowRun.id` */
+ id?: components["schemas"]["FlowRunFilterId"] | null;
+ /** @description Filter criteria for `FlowRun.name` */
+ name?: components["schemas"]["FlowRunFilterName"] | null;
+ /** @description Filter criteria for `FlowRun.tags` */
+ tags?: components["schemas"]["FlowRunFilterTags"] | null;
+ /** @description Filter criteria for `FlowRun.deployment_id` */
+ deployment_id?: components["schemas"]["FlowRunFilterDeploymentId"] | null;
+ /** @description Filter criteria for `FlowRun.work_queue_name */
+ work_queue_name?:
+ | components["schemas"]["FlowRunFilterWorkQueueName"]
+ | null;
+ /** @description Filter criteria for `FlowRun.state` */
+ state?: components["schemas"]["FlowRunFilterState"] | null;
+ /** @description Filter criteria for `FlowRun.flow_version` */
+ flow_version?: components["schemas"]["FlowRunFilterFlowVersion"] | null;
+ /** @description Filter criteria for `FlowRun.start_time` */
+ start_time?: components["schemas"]["FlowRunFilterStartTime"] | null;
+ /** @description Filter criteria for `FlowRun.end_time` */
+ end_time?: components["schemas"]["FlowRunFilterEndTime"] | null;
+ /** @description Filter criteria for `FlowRun.expected_start_time` */
+ expected_start_time?:
+ | components["schemas"]["FlowRunFilterExpectedStartTime"]
+ | null;
+ /** @description Filter criteria for `FlowRun.next_scheduled_start_time` */
+ next_scheduled_start_time?:
+ | components["schemas"]["FlowRunFilterNextScheduledStartTime"]
+ | null;
+ /** @description Filter criteria for subflows of the given flow runs */
+ parent_flow_run_id?:
+ | components["schemas"]["FlowRunFilterParentFlowRunId"]
+ | null;
+ /** @description Filter criteria for `FlowRun.parent_task_run_id` */
+ parent_task_run_id?:
+ | components["schemas"]["FlowRunFilterParentTaskRunId"]
+ | null;
+ /** @description Filter criteria for `FlowRun.idempotency_key` */
+ idempotency_key?:
+ | components["schemas"]["FlowRunFilterIdempotencyKey"]
+ | null;
+ };
+ /**
+ * FlowRunFilterDeploymentId
+ * @description Filter by `FlowRun.deployment_id`.
+ */
+ FlowRunFilterDeploymentId: {
+ /**
+ * @description Operator for combining filter criteria. Defaults to 'and_'.
+ * @default and_
+ */
+ operator: components["schemas"]["Operator"];
+ /**
+ * Any
+ * @description A list of flow run deployment ids to include
+ */
+ any_?: string[] | null;
+ /**
+ * Is Null
+ * @description If true, only include flow runs without deployment ids
+ */
+ is_null_?: boolean | null;
+ };
+ /**
+ * FlowRunFilterEndTime
+ * @description Filter by `FlowRun.end_time`.
+ */
+ FlowRunFilterEndTime: {
+ /**
+ * Before
+ * @description Only include flow runs ending at or before this time
+ */
+ before_?: string | null;
+ /**
+ * After
+ * @description Only include flow runs ending at or after this time
+ */
+ after_?: string | null;
+ /**
+ * Is Null
+ * @description If true, only return flow runs without an end time
+ */
+ is_null_?: boolean | null;
+ };
+ /**
+ * FlowRunFilterExpectedStartTime
+ * @description Filter by `FlowRun.expected_start_time`.
+ */
+ FlowRunFilterExpectedStartTime: {
+ /**
+ * Before
+ * @description Only include flow runs scheduled to start at or before this time
+ */
+ before_?: string | null;
+ /**
+ * After
+ * @description Only include flow runs scheduled to start at or after this time
+ */
+ after_?: string | null;
+ };
+ /**
+ * FlowRunFilterFlowVersion
+ * @description Filter by `FlowRun.flow_version`.
+ */
+ FlowRunFilterFlowVersion: {
+ /**
+ * Any
+ * @description A list of flow run flow_versions to include
+ */
+ any_?: string[] | null;
+ };
+ /**
+ * FlowRunFilterId
+ * @description Filter by `FlowRun.id`.
+ */
+ FlowRunFilterId: {
+ /**
+ * Any
+ * @description A list of flow run ids to include
+ */
+ any_?: string[] | null;
+ /**
+ * Not Any
+ * @description A list of flow run ids to exclude
+ */
+ not_any_?: string[] | null;
+ };
+ /**
+ * FlowRunFilterIdempotencyKey
+ * @description Filter by FlowRun.idempotency_key.
+ */
+ FlowRunFilterIdempotencyKey: {
+ /**
+ * Any
+ * @description A list of flow run idempotency keys to include
+ */
+ any_?: string[] | null;
+ /**
+ * Not Any
+ * @description A list of flow run idempotency keys to exclude
+ */
+ not_any_?: string[] | null;
+ };
+ /**
+ * FlowRunFilterName
+ * @description Filter by `FlowRun.name`.
+ */
+ FlowRunFilterName: {
+ /**
+ * Any
+ * @description A list of flow run names to include
+ */
+ any_?: string[] | null;
+ /**
+ * Like
+ * @description A case-insensitive partial match. For example, passing 'marvin' will match 'marvin', 'sad-Marvin', and 'marvin-robot'.
+ */
+ like_?: string | null;
+ };
+ /**
+ * FlowRunFilterNextScheduledStartTime
+ * @description Filter by `FlowRun.next_scheduled_start_time`.
+ */
+ FlowRunFilterNextScheduledStartTime: {
+ /**
+ * Before
+ * @description Only include flow runs with a next_scheduled_start_time or before this time
+ */
+ before_?: string | null;
+ /**
+ * After
+ * @description Only include flow runs with a next_scheduled_start_time at or after this time
+ */
+ after_?: string | null;
+ };
+ /**
+ * FlowRunFilterParentFlowRunId
+ * @description Filter for subflows of a given flow run
+ */
+ FlowRunFilterParentFlowRunId: {
+ /**
+ * @description Operator for combining filter criteria. Defaults to 'and_'.
+ * @default and_
+ */
+ operator: components["schemas"]["Operator"];
+ /**
+ * Any
+ * @description A list of parent flow run ids to include
+ */
+ any_?: string[] | null;
+ };
+ /**
+ * FlowRunFilterParentTaskRunId
+ * @description Filter by `FlowRun.parent_task_run_id`.
+ */
+ FlowRunFilterParentTaskRunId: {
+ /**
+ * @description Operator for combining filter criteria. Defaults to 'and_'.
+ * @default and_
+ */
+ operator: components["schemas"]["Operator"];
+ /**
+ * Any
+ * @description A list of flow run parent_task_run_ids to include
+ */
+ any_?: string[] | null;
+ /**
+ * Is Null
+ * @description If true, only include flow runs without parent_task_run_id
+ */
+ is_null_?: boolean | null;
+ };
+ /**
+ * FlowRunFilterStartTime
+ * @description Filter by `FlowRun.start_time`.
+ */
+ FlowRunFilterStartTime: {
+ /**
+ * Before
+ * @description Only include flow runs starting at or before this time
+ */
+ before_?: string | null;
+ /**
+ * After
+ * @description Only include flow runs starting at or after this time
+ */
+ after_?: string | null;
+ /**
+ * Is Null
+ * @description If true, only return flow runs without a start time
+ */
+ is_null_?: boolean | null;
+ };
+ /**
+ * FlowRunFilterState
+ * @description Filter by `FlowRun.state_type` and `FlowRun.state_name`.
+ */
+ FlowRunFilterState: {
+ /**
+ * @description Operator for combining filter criteria. Defaults to 'and_'.
+ * @default and_
+ */
+ operator: components["schemas"]["Operator"];
+ /** @description Filter criteria for `FlowRun.state_type` */
+ type?: components["schemas"]["FlowRunFilterStateType"] | null;
+ /** @description Filter criteria for `FlowRun.state_name` */
+ name?: components["schemas"]["FlowRunFilterStateName"] | null;
+ };
+ /**
+ * FlowRunFilterStateName
+ * @description Filter by `FlowRun.state_name`.
+ */
+ FlowRunFilterStateName: {
+ /**
+ * Any
+ * @description A list of flow run state names to include
+ */
+ any_?: string[] | null;
+ /**
+ * Not Any
+ * @description A list of flow run state names to exclude
+ */
+ not_any_?: string[] | null;
+ };
+ /**
+ * FlowRunFilterStateType
+ * @description Filter by `FlowRun.state_type`.
+ */
+ FlowRunFilterStateType: {
+ /**
+ * Any
+ * @description A list of flow run state types to include
+ */
+ any_?: components["schemas"]["StateType"][] | null;
+ /**
+ * Not Any
+ * @description A list of flow run state types to exclude
+ */
+ not_any_?: components["schemas"]["StateType"][] | null;
+ };
+ /**
+ * FlowRunFilterTags
+ * @description Filter by `FlowRun.tags`.
+ */
+ FlowRunFilterTags: {
+ /**
+ * @description Operator for combining filter criteria. Defaults to 'and_'.
+ * @default and_
+ */
+ operator: components["schemas"]["Operator"];
+ /**
+ * All
+ * @description A list of tags. Flow runs will be returned only if their tags are a superset of the list
+ */
+ all_?: string[] | null;
+ /**
+ * Is Null
+ * @description If true, only include flow runs without tags
+ */
+ is_null_?: boolean | null;
+ };
+ /**
+ * FlowRunFilterWorkQueueName
+ * @description Filter by `FlowRun.work_queue_name`.
+ */
+ FlowRunFilterWorkQueueName: {
+ /**
+ * @description Operator for combining filter criteria. Defaults to 'and_'.
+ * @default and_
+ */
+ operator: components["schemas"]["Operator"];
+ /**
+ * Any
+ * @description A list of work queue names to include
+ */
+ any_?: string[] | null;
+ /**
+ * Is Null
+ * @description If true, only include flow runs without work queue names
+ */
+ is_null_?: boolean | null;
+ };
+ /** FlowRunInput */
+ FlowRunInput: {
+ /**
+ * Id
+ * Format: uuid
+ */
+ id?: string;
+ /** Created */
+ created?: string | null;
+ /** Updated */
+ updated?: string | null;
+ /**
+ * Flow Run Id
+ * Format: uuid
+ * @description The flow run ID associated with the input.
+ */
+ flow_run_id: string;
+ /**
+ * Key
+ * @description The key of the input.
+ */
+ key: string;
+ /**
+ * Value
+ * @description The value of the input.
+ */
+ value: string;
+ /**
+ * Sender
+ * @description The sender of the input.
+ */
+ sender?: string | null;
+ };
+ /**
+ * FlowRunNotificationPolicy
+ * @description An ORM representation of a flow run notification.
+ */
+ FlowRunNotificationPolicy: {
+ /**
+ * Id
+ * Format: uuid
+ */
+ id?: string;
+ /** Created */
+ created?: string | null;
+ /** Updated */
+ updated?: string | null;
+ /**
+ * Is Active
+ * @description Whether the policy is currently active
+ * @default true
+ */
+ is_active: boolean;
+ /**
+ * State Names
+ * @description The flow run states that trigger notifications
+ */
+ state_names: string[];
+ /**
+ * Tags
+ * @description The flow run tags that trigger notifications (set [] to disable)
+ */
+ tags: string[];
+ /**
+ * Block Document Id
+ * Format: uuid
+ * @description The block document ID used for sending notifications
+ */
+ block_document_id: string;
+ /**
+ * Message Template
+ * @description A templatable notification message. Use {braces} to add variables. Valid variables include: 'flow_id', 'flow_name', 'flow_run_id', 'flow_run_name', 'flow_run_notification_policy_id', 'flow_run_parameters', 'flow_run_state_message', 'flow_run_state_name', 'flow_run_state_timestamp', 'flow_run_state_type', 'flow_run_url'
+ */
+ message_template?: string | null;
+ };
+ /**
+ * FlowRunNotificationPolicyCreate
+ * @description Data used by the Prefect REST API to create a flow run notification policy.
+ */
+ FlowRunNotificationPolicyCreate: {
+ /**
+ * Is Active
+ * @description Whether the policy is currently active
+ * @default true
+ */
+ is_active: boolean;
+ /**
+ * State Names
+ * @description The flow run states that trigger notifications
+ */
+ state_names: string[];
+ /**
+ * Tags
+ * @description The flow run tags that trigger notifications (set [] to disable)
+ */
+ tags: string[];
+ /**
+ * Block Document Id
+ * Format: uuid
+ * @description The block document ID used for sending notifications
+ */
+ block_document_id: string;
+ /**
+ * Message Template
+ * @description A templatable notification message. Use {braces} to add variables. Valid variables include: 'flow_id', 'flow_name', 'flow_run_id', 'flow_run_name', 'flow_run_notification_policy_id', 'flow_run_parameters', 'flow_run_state_message', 'flow_run_state_name', 'flow_run_state_timestamp', 'flow_run_state_type', 'flow_run_url'
+ */
+ message_template?: string | null;
+ };
+ /**
+ * FlowRunNotificationPolicyFilter
+ * @description Filter FlowRunNotificationPolicies.
+ */
+ FlowRunNotificationPolicyFilter: {
+ /**
+ * @description Filter criteria for `FlowRunNotificationPolicy.is_active`.
+ * @default {
+ * "eq_": false
+ * }
+ */
+ is_active:
+ | components["schemas"]["FlowRunNotificationPolicyFilterIsActive"]
+ | null;
+ };
+ /**
+ * FlowRunNotificationPolicyFilterIsActive
+ * @description Filter by `FlowRunNotificationPolicy.is_active`.
+ */
+ FlowRunNotificationPolicyFilterIsActive: {
+ /**
+ * Eq
+ * @description Filter notification policies for only those that are or are not active.
+ */
+ eq_?: boolean | null;
+ };
+ /**
+ * FlowRunNotificationPolicyUpdate
+ * @description Data used by the Prefect REST API to update a flow run notification policy.
+ */
+ FlowRunNotificationPolicyUpdate: {
+ /** Is Active */
+ is_active?: boolean | null;
+ /** State Names */
+ state_names?: string[] | null;
+ /** Tags */
+ tags?: string[] | null;
+ /** Block Document Id */
+ block_document_id?: string | null;
+ /** Message Template */
+ message_template?: string | null;
+ };
+ /** FlowRunPaginationResponse */
+ FlowRunPaginationResponse: {
+ /** Results */
+ results: components["schemas"]["FlowRunResponse"][];
+ /** Count */
+ count: number;
+ /** Limit */
+ limit: number;
+ /** Pages */
+ pages: number;
+ /** Page */
+ page: number;
+ };
+ /**
+ * FlowRunPolicy
+ * @description Defines of how a flow run should retry.
+ */
+ FlowRunPolicy: {
+ /**
+ * Max Retries
+ * @deprecated
+ * @description The maximum number of retries. Field is not used. Please use `retries` instead.
+ * @default 0
+ */
+ max_retries: number;
+ /**
+ * Retry Delay Seconds
+ * @deprecated
+ * @description The delay between retries. Field is not used. Please use `retry_delay` instead.
+ * @default 0
+ */
+ retry_delay_seconds: number;
+ /**
+ * Retries
+ * @description The number of retries.
+ */
+ retries?: number | null;
+ /**
+ * Retry Delay
+ * @description The delay time between retries, in seconds.
+ */
+ retry_delay?: number | null;
+ /**
+ * Pause Keys
+ * @description Tracks pauses this run has observed.
+ */
+ pause_keys?: unknown[] | null;
+ /**
+ * Resuming
+ * @description Indicates if this run is resuming from a pause.
+ * @default false
+ */
+ resuming: boolean | null;
+ };
+ /** FlowRunResponse */
+ FlowRunResponse: {
+ /**
+ * Id
+ * Format: uuid
+ */
+ id?: string;
+ /** Created */
+ created?: string | null;
+ /** Updated */
+ updated?: string | null;
+ /**
+ * Name
+ * @description The name of the flow run. Defaults to a random slug if not specified.
+ */
+ name?: string;
+ /**
+ * Flow Id
+ * Format: uuid
+ * @description The id of the flow being run.
+ */
+ flow_id: string;
+ /**
+ * State Id
+ * @description The id of the flow run's current state.
+ */
+ state_id?: string | null;
+ /**
+ * Deployment Id
+ * @description The id of the deployment associated with this flow run, if available.
+ */
+ deployment_id?: string | null;
+ /**
+ * Deployment Version
+ * @description The version of the deployment associated with this flow run.
+ */
+ deployment_version?: string | null;
+ /**
+ * Work Queue Id
+ * @description The id of the run's work pool queue.
+ */
+ work_queue_id?: string | null;
+ /**
+ * Work Queue Name
+ * @description The work queue that handled this flow run.
+ */
+ work_queue_name?: string | null;
+ /**
+ * Flow Version
+ * @description The version of the flow executed in this flow run.
+ */
+ flow_version?: string | null;
+ /**
+ * Parameters
+ * @description Parameters for the flow run.
+ */
+ parameters?: Record;
+ /**
+ * Idempotency Key
+ * @description An optional idempotency key for the flow run. Used to ensure the same flow run is not created multiple times.
+ */
+ idempotency_key?: string | null;
+ /**
+ * Context
+ * @description Additional context for the flow run.
+ */
+ context?: Record;
+ empirical_policy?: components["schemas"]["FlowRunPolicy"];
+ /**
+ * Tags
+ * @description A list of tags on the flow run
+ */
+ tags?: string[];
+ /**
+ * Parent Task Run Id
+ * @description If the flow run is a subflow, the id of the 'dummy' task in the parent flow used to track subflow state.
+ */
+ parent_task_run_id?: string | null;
+ /** @description The type of the current flow run state. */
+ state_type?: components["schemas"]["StateType"] | null;
+ /**
+ * State Name
+ * @description The name of the current flow run state.
+ */
+ state_name?: string | null;
+ /**
+ * Run Count
+ * @description The number of times the flow run was executed.
+ * @default 0
+ */
+ run_count: number;
+ /**
+ * Expected Start Time
+ * @description The flow run's expected start time.
+ */
+ expected_start_time?: string | null;
+ /**
+ * Next Scheduled Start Time
+ * @description The next time the flow run is scheduled to start.
+ */
+ next_scheduled_start_time?: string | null;
+ /**
+ * Start Time
+ * @description The actual start time.
+ */
+ start_time?: string | null;
+ /**
+ * End Time
+ * @description The actual end time.
+ */
+ end_time?: string | null;
+ /**
+ * Total Run Time
+ * @description Total run time. If the flow run was executed multiple times, the time of each run will be summed.
+ * @default 0
+ */
+ total_run_time: number;
+ /**
+ * Estimated Run Time
+ * @description A real-time estimate of the total run time.
+ * @default 0
+ */
+ estimated_run_time: number;
+ /**
+ * Estimated Start Time Delta
+ * @description The difference between actual and expected start time.
+ * @default 0
+ */
+ estimated_start_time_delta: number;
+ /**
+ * Auto Scheduled
+ * @description Whether or not the flow run was automatically scheduled.
+ * @default false
+ */
+ auto_scheduled: boolean;
+ /**
+ * Infrastructure Document Id
+ * @description The block document defining infrastructure to use this flow run.
+ */
+ infrastructure_document_id?: string | null;
+ /**
+ * Infrastructure Pid
+ * @description The id of the flow run as returned by an infrastructure block.
+ */
+ infrastructure_pid?: string | null;
+ /** @description Optional information about the creator of this flow run. */
+ created_by?: components["schemas"]["CreatedBy"] | null;
+ /**
+ * Work Pool Id
+ * @description The id of the flow run's work pool.
+ */
+ work_pool_id?: string | null;
+ /**
+ * Work Pool Name
+ * @description The name of the flow run's work pool.
+ */
+ work_pool_name?: string | null;
+ /** @description The current state of the flow run. */
+ state?: components["schemas"]["State"] | null;
+ /**
+ * Job Variables
+ * @description Variables used as overrides in the base job template
+ */
+ job_variables?: Record | null;
+ };
+ /**
+ * FlowRunSort
+ * @description Defines flow run sorting options.
+ * @enum {string}
+ */
+ FlowRunSort:
+ | "ID_DESC"
+ | "START_TIME_ASC"
+ | "START_TIME_DESC"
+ | "EXPECTED_START_TIME_ASC"
+ | "EXPECTED_START_TIME_DESC"
+ | "NAME_ASC"
+ | "NAME_DESC"
+ | "NEXT_SCHEDULED_START_TIME_ASC"
+ | "END_TIME_DESC";
+ /**
+ * FlowRunUpdate
+ * @description Data used by the Prefect REST API to update a flow run.
+ */
+ FlowRunUpdate: {
+ /** Name */
+ name?: string | null;
+ /** Flow Version */
+ flow_version?: string | null;
+ /** Parameters */
+ parameters?: Record;
+ empirical_policy?: components["schemas"]["FlowRunPolicy"];
+ /** Tags */
+ tags?: string[];
+ /** Infrastructure Pid */
+ infrastructure_pid?: string | null;
+ /** Job Variables */
+ job_variables?: Record | null;
+ };
+ /**
+ * FlowSort
+ * @description Defines flow sorting options.
+ * @enum {string}
+ */
+ FlowSort: "CREATED_DESC" | "UPDATED_DESC" | "NAME_ASC" | "NAME_DESC";
+ /**
+ * FlowUpdate
+ * @description Data used by the Prefect REST API to update a flow.
+ */
+ FlowUpdate: {
+ /**
+ * Tags
+ * @description A list of flow tags
+ */
+ tags?: string[];
+ };
+ /**
+ * GlobalConcurrencyLimitResponse
+ * @description A response object for global concurrency limits.
+ */
+ GlobalConcurrencyLimitResponse: {
+ /**
+ * Id
+ * Format: uuid
+ */
+ id?: string;
+ /** Created */
+ created?: string | null;
+ /** Updated */
+ updated?: string | null;
+ /**
+ * Active
+ * @description Whether the global concurrency limit is active.
+ * @default true
+ */
+ active: boolean;
+ /**
+ * Name
+ * @description The name of the global concurrency limit.
+ */
+ name: string;
+ /**
+ * Limit
+ * @description The concurrency limit.
+ */
+ limit: number;
+ /**
+ * Active Slots
+ * @description The number of active slots.
+ */
+ active_slots: number;
+ /**
+ * Slot Decay Per Second
+ * @description The decay rate for active slots when used as a rate limit.
+ * @default 2
+ */
+ slot_decay_per_second: number;
+ };
+ /** Graph */
+ Graph: {
+ /**
+ * Start Time
+ * Format: date-time
+ */
+ start_time: string;
+ /** End Time */
+ end_time: string | null;
+ /** Root Node Ids */
+ root_node_ids: string[];
+ /** Nodes */
+ nodes: [string, components["schemas"]["Node"]][];
+ /** Artifacts */
+ artifacts: components["schemas"]["GraphArtifact"][];
+ /** States */
+ states: components["schemas"]["GraphState"][];
+ };
+ /** GraphArtifact */
+ GraphArtifact: {
+ /**
+ * Id
+ * Format: uuid
+ */
+ id: string;
+ /**
+ * Created
+ * Format: date-time
+ */
+ created: string;
+ /** Key */
+ key: string | null;
+ /** Type */
+ type: string;
+ /** Is Latest */
+ is_latest: boolean;
+ /** Data */
+ data: unknown | null;
+ };
+ /** GraphState */
+ GraphState: {
+ /**
+ * Id
+ * Format: uuid
+ */
+ id: string;
+ /**
+ * Timestamp
+ * Format: date-time
+ */
+ timestamp: string;
+ type: components["schemas"]["StateType"];
+ /** Name */
+ name: string;
+ };
+ /** HTTPValidationError */
+ HTTPValidationError: {
+ /** Detail */
+ detail?: components["schemas"]["ValidationError"][];
+ };
+ /**
+ * HistoryResponse
+ * @description Represents a history of aggregation states over an interval
+ */
+ HistoryResponse: {
+ /**
+ * Interval Start
+ * Format: date-time
+ * @description The start date of the interval.
+ */
+ interval_start: string;
+ /**
+ * Interval End
+ * Format: date-time
+ * @description The end date of the interval.
+ */
+ interval_end: string;
+ /**
+ * States
+ * @description A list of state histories during the interval.
+ */
+ states: components["schemas"]["HistoryResponseState"][];
+ };
+ /**
+ * HistoryResponseState
+ * @description Represents a single state's history over an interval.
+ */
+ HistoryResponseState: {
+ /** @description The state type. */
+ state_type: components["schemas"]["StateType"];
+ /**
+ * State Name
+ * @description The state name.
+ */
+ state_name: string;
+ /**
+ * Count Runs
+ * @description The number of runs in the specified state during the interval.
+ */
+ count_runs: number;
+ /**
+ * Sum Estimated Run Time
+ * @description The total estimated run time of all runs during the interval.
+ */
+ sum_estimated_run_time: number;
+ /**
+ * Sum Estimated Lateness
+ * @description The sum of differences between actual and expected start time during the interval.
+ */
+ sum_estimated_lateness: number;
+ };
+ /**
+ * IntervalSchedule
+ * @description A schedule formed by adding `interval` increments to an `anchor_date`. If no
+ * `anchor_date` is supplied, the current UTC time is used. If a
+ * timezone-naive datetime is provided for `anchor_date`, it is assumed to be
+ * in the schedule's timezone (or UTC). Even if supplied with an IANA timezone,
+ * anchor dates are always stored as UTC offsets, so a `timezone` can be
+ * provided to determine localization behaviors like DST boundary handling. If
+ * none is provided it will be inferred from the anchor date.
+ *
+ * NOTE: If the `IntervalSchedule` `anchor_date` or `timezone` is provided in a
+ * DST-observing timezone, then the schedule will adjust itself appropriately.
+ * Intervals greater than 24 hours will follow DST conventions, while intervals
+ * of less than 24 hours will follow UTC intervals. For example, an hourly
+ * schedule will fire every UTC hour, even across DST boundaries. When clocks
+ * are set back, this will result in two runs that *appear* to both be
+ * scheduled for 1am local time, even though they are an hour apart in UTC
+ * time. For longer intervals, like a daily schedule, the interval schedule
+ * will adjust for DST boundaries so that the clock-hour remains constant. This
+ * means that a daily schedule that always fires at 9am will observe DST and
+ * continue to fire at 9am in the local time zone.
+ *
+ * Args:
+ * interval (datetime.timedelta): an interval to schedule on.
+ * anchor_date (DateTime, optional): an anchor date to schedule increments against;
+ * if not provided, the current timestamp will be used.
+ * timezone (str, optional): a valid timezone string.
+ */
+ IntervalSchedule: {
+ /** Interval */
+ interval: number;
+ /**
+ * Anchor Date
+ * Format: date-time
+ */
+ anchor_date?: string;
+ /** Timezone */
+ timezone?: string | null;
+ };
+ /**
+ * Log
+ * @description An ORM representation of log data.
+ */
+ Log: {
+ /**
+ * Id
+ * Format: uuid
+ */
+ id?: string;
+ /** Created */
+ created?: string | null;
+ /** Updated */
+ updated?: string | null;
+ /**
+ * Name
+ * @description The logger name.
+ */
+ name: string;
+ /**
+ * Level
+ * @description The log level.
+ */
+ level: number;
+ /**
+ * Message
+ * @description The log message.
+ */
+ message: string;
+ /**
+ * Timestamp
+ * Format: date-time
+ * @description The log timestamp.
+ */
+ timestamp: string;
+ /**
+ * Flow Run Id
+ * @description The flow run ID associated with the log.
+ */
+ flow_run_id?: string | null;
+ /**
+ * Task Run Id
+ * @description The task run ID associated with the log.
+ */
+ task_run_id?: string | null;
+ };
+ /**
+ * LogCreate
+ * @description Data used by the Prefect REST API to create a log.
+ */
+ LogCreate: {
+ /**
+ * Name
+ * @description The logger name.
+ */
+ name: string;
+ /**
+ * Level
+ * @description The log level.
+ */
+ level: number;
+ /**
+ * Message
+ * @description The log message.
+ */
+ message: string;
+ /**
+ * Timestamp
+ * Format: date-time
+ * @description The log timestamp.
+ */
+ timestamp: string;
+ /** Flow Run Id */
+ flow_run_id?: string | null;
+ /** Task Run Id */
+ task_run_id?: string | null;
+ };
+ /**
+ * LogFilter
+ * @description Filter logs. Only logs matching all criteria will be returned
+ */
+ LogFilter: {
+ /**
+ * @description Operator for combining filter criteria. Defaults to 'and_'.
+ * @default and_
+ */
+ operator: components["schemas"]["Operator"];
+ /** @description Filter criteria for `Log.level` */
+ level?: components["schemas"]["LogFilterLevel"] | null;
+ /** @description Filter criteria for `Log.timestamp` */
+ timestamp?: components["schemas"]["LogFilterTimestamp"] | null;
+ /** @description Filter criteria for `Log.flow_run_id` */
+ flow_run_id?: components["schemas"]["LogFilterFlowRunId"] | null;
+ /** @description Filter criteria for `Log.task_run_id` */
+ task_run_id?: components["schemas"]["LogFilterTaskRunId"] | null;
+ };
+ /**
+ * LogFilterFlowRunId
+ * @description Filter by `Log.flow_run_id`.
+ */
+ LogFilterFlowRunId: {
+ /**
+ * Any
+ * @description A list of flow run IDs to include
+ */
+ any_?: string[] | null;
+ };
+ /**
+ * LogFilterLevel
+ * @description Filter by `Log.level`.
+ */
+ LogFilterLevel: {
+ /**
+ * Ge
+ * @description Include logs with a level greater than or equal to this level
+ */
+ ge_?: number | null;
+ /**
+ * Le
+ * @description Include logs with a level less than or equal to this level
+ */
+ le_?: number | null;
+ };
+ /**
+ * LogFilterTaskRunId
+ * @description Filter by `Log.task_run_id`.
+ */
+ LogFilterTaskRunId: {
+ /**
+ * Any
+ * @description A list of task run IDs to include
+ */
+ any_?: string[] | null;
+ /**
+ * Is Null
+ * @description If true, only include logs without a task run id
+ */
+ is_null_?: boolean | null;
+ };
+ /**
+ * LogFilterTimestamp
+ * @description Filter by `Log.timestamp`.
+ */
+ LogFilterTimestamp: {
+ /**
+ * Before
+ * @description Only include logs with a timestamp at or before this time
+ */
+ before_?: string | null;
+ /**
+ * After
+ * @description Only include logs with a timestamp at or after this time
+ */
+ after_?: string | null;
+ };
+ /**
+ * LogSort
+ * @description Defines log sorting options.
+ * @enum {string}
+ */
+ LogSort: "TIMESTAMP_ASC" | "TIMESTAMP_DESC";
+ /** MinimalConcurrencyLimitResponse */
+ MinimalConcurrencyLimitResponse: {
+ /**
+ * Id
+ * Format: uuid
+ */
+ id: string;
+ /** Name */
+ name: string;
+ /** Limit */
+ limit: number;
+ };
+ /** Node */
+ Node: {
+ /**
+ * Kind
+ * @enum {string}
+ */
+ kind: "flow-run" | "task-run";
+ /**
+ * Id
+ * Format: uuid
+ */
+ id: string;
+ /** Label */
+ label: string;
+ state_type: components["schemas"]["StateType"];
+ /**
+ * Start Time
+ * Format: date-time
+ */
+ start_time: string;
+ /** End Time */
+ end_time: string | null;
+ /** Parents */
+ parents: components["schemas"]["Edge"][];
+ /** Children */
+ children: components["schemas"]["Edge"][];
+ /** Encapsulating */
+ encapsulating: components["schemas"]["Edge"][];
+ /** Artifacts */
+ artifacts: components["schemas"]["GraphArtifact"][];
+ };
+ /**
+ * Operator
+ * @description Operators for combining filter criteria.
+ * @enum {string}
+ */
+ Operator: "and_" | "or_";
+ /**
+ * OrchestrationResult
+ * @description A container for the output of state orchestration.
+ */
+ OrchestrationResult: {
+ state: components["schemas"]["State"] | null;
+ status: components["schemas"]["SetStateStatus"];
+ /** Details */
+ details:
+ | components["schemas"]["StateAcceptDetails"]
+ | components["schemas"]["StateWaitDetails"]
+ | components["schemas"]["StateRejectDetails"]
+ | components["schemas"]["StateAbortDetails"];
+ };
+ /**
+ * Parameter
+ * @description Represents a parameter input to a task run.
+ */
+ Parameter: {
+ /**
+ * Input Type
+ * @default parameter
+ * @constant
+ * @enum {string}
+ */
+ input_type: "parameter";
+ /** Name */
+ name: string;
+ };
+ /**
+ * PauseAutomation
+ * @description Pauses a Work Queue
+ */
+ PauseAutomation: {
+ /**
+ * Type
+ * @default pause-automation
+ * @constant
+ * @enum {string}
+ */
+ type: "pause-automation";
+ /**
+ * Source
+ * @description Whether this Action applies to a specific selected automation (given by `automation_id`), or to an automation that is inferred from the triggering event. If the source is 'inferred', the `automation_id` may not be set. If the source is 'selected', the `automation_id` must be set.
+ * @default selected
+ * @enum {string}
+ */
+ source: "selected" | "inferred";
+ /**
+ * Automation Id
+ * @description The identifier of the automation to act on
+ */
+ automation_id?: string | null;
+ };
+ /**
+ * PauseDeployment
+ * @description Pauses the given Deployment
+ */
+ PauseDeployment: {
+ /**
+ * Type
+ * @default pause-deployment
+ * @constant
+ * @enum {string}
+ */
+ type: "pause-deployment";
+ /**
+ * Source
+ * @description Whether this Action applies to a specific selected deployment (given by `deployment_id`), or to a deployment that is inferred from the triggering event. If the source is 'inferred', the `deployment_id` may not be set. If the source is 'selected', the `deployment_id` must be set.
+ * @default selected
+ * @enum {string}
+ */
+ source: "selected" | "inferred";
+ /**
+ * Deployment Id
+ * @description The identifier of the deployment
+ */
+ deployment_id?: string | null;
+ };
+ /**
+ * PauseWorkPool
+ * @description Pauses a Work Pool
+ */
+ PauseWorkPool: {
+ /**
+ * Type
+ * @default pause-work-pool
+ * @constant
+ * @enum {string}
+ */
+ type: "pause-work-pool";
+ /**
+ * Source
+ * @description Whether this Action applies to a specific selected work pool (given by `work_pool_id`), or to a work pool that is inferred from the triggering event. If the source is 'inferred', the `work_pool_id` may not be set. If the source is 'selected', the `work_pool_id` must be set.
+ * @default selected
+ * @enum {string}
+ */
+ source: "selected" | "inferred";
+ /**
+ * Work Pool Id
+ * @description The identifier of the work pool to pause
+ */
+ work_pool_id?: string | null;
+ };
+ /**
+ * PauseWorkQueue
+ * @description Pauses a Work Queue
+ */
+ PauseWorkQueue: {
+ /**
+ * Type
+ * @default pause-work-queue
+ * @constant
+ * @enum {string}
+ */
+ type: "pause-work-queue";
+ /**
+ * Source
+ * @description Whether this Action applies to a specific selected work queue (given by `work_queue_id`), or to a work queue that is inferred from the triggering event. If the source is 'inferred', the `work_queue_id` may not be set. If the source is 'selected', the `work_queue_id` must be set.
+ * @default selected
+ * @enum {string}
+ */
+ source: "selected" | "inferred";
+ /**
+ * Work Queue Id
+ * @description The identifier of the work queue to pause
+ */
+ work_queue_id?: string | null;
+ };
+ /**
+ * QueueFilter
+ * @description Filter criteria definition for a work queue.
+ */
+ QueueFilter: {
+ /**
+ * Tags
+ * @description Only include flow runs with these tags in the work queue.
+ */
+ tags?: string[] | null;
+ /**
+ * Deployment Ids
+ * @description Only include flow runs from these deployments in the work queue.
+ */
+ deployment_ids?: string[] | null;
+ };
+ /**
+ * RRuleSchedule
+ * @description RRule schedule, based on the iCalendar standard
+ * ([RFC 5545](https://datatracker.ietf.org/doc/html/rfc5545)) as
+ * implemented in `dateutils.rrule`.
+ *
+ * RRules are appropriate for any kind of calendar-date manipulation, including
+ * irregular intervals, repetition, exclusions, week day or day-of-month
+ * adjustments, and more.
+ *
+ * Note that as a calendar-oriented standard, `RRuleSchedules` are sensitive to
+ * to the initial timezone provided. A 9am daily schedule with a daylight saving
+ * time-aware start date will maintain a local 9am time through DST boundaries;
+ * a 9am daily schedule with a UTC start date will maintain a 9am UTC time.
+ *
+ * Args:
+ * rrule (str): a valid RRule string
+ * timezone (str, optional): a valid timezone string
+ */
+ RRuleSchedule: {
+ /** Rrule */
+ rrule: string;
+ /**
+ * Timezone
+ * @default UTC
+ */
+ timezone: string | null;
+ };
+ /**
+ * ReceivedEvent
+ * @description The server-side view of an event that has happened to a Resource after it has
+ * been received by the server
+ */
+ ReceivedEvent: {
+ /**
+ * Occurred
+ * Format: date-time
+ * @description When the event happened from the sender's perspective
+ */
+ occurred: string;
+ /**
+ * Event
+ * @description The name of the event that happened
+ */
+ event: string;
+ /** @description The primary Resource this event concerns */
+ resource: components["schemas"]["Resource"];
+ /**
+ * Related
+ * @description A list of additional Resources involved in this event
+ */
+ related?: components["schemas"]["RelatedResource"][];
+ /**
+ * Payload
+ * @description An open-ended set of data describing what happened
+ */
+ payload?: Record;
+ /**
+ * Id
+ * Format: uuid
+ * @description The client-provided identifier of this event
+ */
+ id: string;
+ /**
+ * Follows
+ * @description The ID of an event that is known to have occurred prior to this one. If set, this may be used to establish a more precise ordering of causally-related events when they occur close enough together in time that the system may receive them out-of-order.
+ */
+ follows?: string | null;
+ /**
+ * Received
+ * Format: date-time
+ * @description When the event was received by Prefect Cloud
+ */
+ received?: string;
+ };
+ /**
+ * RelatedResource
+ * @description A Resource with a specific role in an Event
+ */
+ RelatedResource: {
+ [key: string]: string;
+ };
+ /**
+ * Resource
+ * @description An observable business object of interest to the user
+ */
+ Resource: {
+ [key: string]: string;
+ };
+ /** ResourceSpecification */
+ ResourceSpecification: {
+ [key: string]: string | string[];
+ };
+ /**
+ * ResumeAutomation
+ * @description Resumes a Work Queue
+ */
+ ResumeAutomation: {
+ /**
+ * Type
+ * @default resume-automation
+ * @constant
+ * @enum {string}
+ */
+ type: "resume-automation";
+ /**
+ * Source
+ * @description Whether this Action applies to a specific selected automation (given by `automation_id`), or to an automation that is inferred from the triggering event. If the source is 'inferred', the `automation_id` may not be set. If the source is 'selected', the `automation_id` must be set.
+ * @default selected
+ * @enum {string}
+ */
+ source: "selected" | "inferred";
+ /**
+ * Automation Id
+ * @description The identifier of the automation to act on
+ */
+ automation_id?: string | null;
+ };
+ /**
+ * ResumeDeployment
+ * @description Resumes the given Deployment
+ */
+ ResumeDeployment: {
+ /**
+ * Type
+ * @default resume-deployment
+ * @constant
+ * @enum {string}
+ */
+ type: "resume-deployment";
+ /**
+ * Source
+ * @description Whether this Action applies to a specific selected deployment (given by `deployment_id`), or to a deployment that is inferred from the triggering event. If the source is 'inferred', the `deployment_id` may not be set. If the source is 'selected', the `deployment_id` must be set.
+ * @default selected
+ * @enum {string}
+ */
+ source: "selected" | "inferred";
+ /**
+ * Deployment Id
+ * @description The identifier of the deployment
+ */
+ deployment_id?: string | null;
+ };
+ /**
+ * ResumeFlowRun
+ * @description Resumes a paused or suspended flow run associated with the trigger
+ */
+ ResumeFlowRun: {
+ /**
+ * Type
+ * @default resume-flow-run
+ * @constant
+ * @enum {string}
+ */
+ type: "resume-flow-run";
+ };
+ /**
+ * ResumeWorkPool
+ * @description Resumes a Work Pool
+ */
+ ResumeWorkPool: {
+ /**
+ * Type
+ * @default resume-work-pool
+ * @constant
+ * @enum {string}
+ */
+ type: "resume-work-pool";
+ /**
+ * Source
+ * @description Whether this Action applies to a specific selected work pool (given by `work_pool_id`), or to a work pool that is inferred from the triggering event. If the source is 'inferred', the `work_pool_id` may not be set. If the source is 'selected', the `work_pool_id` must be set.
+ * @default selected
+ * @enum {string}
+ */
+ source: "selected" | "inferred";
+ /**
+ * Work Pool Id
+ * @description The identifier of the work pool to pause
+ */
+ work_pool_id?: string | null;
+ };
+ /**
+ * ResumeWorkQueue
+ * @description Resumes a Work Queue
+ */
+ ResumeWorkQueue: {
+ /**
+ * Type
+ * @default resume-work-queue
+ * @constant
+ * @enum {string}
+ */
+ type: "resume-work-queue";
+ /**
+ * Source
+ * @description Whether this Action applies to a specific selected work queue (given by `work_queue_id`), or to a work queue that is inferred from the triggering event. If the source is 'inferred', the `work_queue_id` may not be set. If the source is 'selected', the `work_queue_id` must be set.
+ * @default selected
+ * @enum {string}
+ */
+ source: "selected" | "inferred";
+ /**
+ * Work Queue Id
+ * @description The identifier of the work queue to pause
+ */
+ work_queue_id?: string | null;
+ };
+ /**
+ * RunDeployment
+ * @description Runs the given deployment with the given parameters
+ */
+ RunDeployment: {
+ /**
+ * Type
+ * @default run-deployment
+ * @constant
+ * @enum {string}
+ */
+ type: "run-deployment";
+ /**
+ * Source
+ * @description Whether this Action applies to a specific selected deployment (given by `deployment_id`), or to a deployment that is inferred from the triggering event. If the source is 'inferred', the `deployment_id` may not be set. If the source is 'selected', the `deployment_id` must be set.
+ * @default selected
+ * @enum {string}
+ */
+ source: "selected" | "inferred";
+ /**
+ * Deployment Id
+ * @description The identifier of the deployment
+ */
+ deployment_id?: string | null;
+ /**
+ * Parameters
+ * @description The parameters to pass to the deployment, or None to use the deployment's default parameters
+ */
+ parameters?: Record | null;
+ /**
+ * Job Variables
+ * @description The job variables to pass to the created flow run, or None to use the deployment's default job variables
+ */
+ job_variables?: Record | null;
+ };
+ /**
+ * SavedSearch
+ * @description An ORM representation of saved search data. Represents a set of filter criteria.
+ */
+ SavedSearch: {
+ /**
+ * Id
+ * Format: uuid
+ */
+ id?: string;
+ /** Created */
+ created?: string | null;
+ /** Updated */
+ updated?: string | null;
+ /**
+ * Name
+ * @description The name of the saved search.
+ */
+ name: string;
+ /**
+ * Filters
+ * @description The filter set for the saved search.
+ */
+ filters?: components["schemas"]["SavedSearchFilter"][];
+ };
+ /**
+ * SavedSearchCreate
+ * @description Data used by the Prefect REST API to create a saved search.
+ */
+ SavedSearchCreate: {
+ /**
+ * Name
+ * @description The name of the saved search.
+ */
+ name: string;
+ /**
+ * Filters
+ * @description The filter set for the saved search.
+ */
+ filters?: components["schemas"]["SavedSearchFilter"][];
+ };
+ /**
+ * SavedSearchFilter
+ * @description A filter for a saved search model. Intended for use by the Prefect UI.
+ */
+ SavedSearchFilter: {
+ /**
+ * Object
+ * @description The object over which to filter.
+ */
+ object: string;
+ /**
+ * Property
+ * @description The property of the object on which to filter.
+ */
+ property: string;
+ /**
+ * Type
+ * @description The type of the property.
+ */
+ type: string;
+ /**
+ * Operation
+ * @description The operator to apply to the object. For example, `equals`.
+ */
+ operation: string;
+ /**
+ * Value
+ * @description A JSON-compatible value for the filter.
+ */
+ value: unknown;
+ };
+ /**
+ * SendNotification
+ * @description Send a notification when an Automation is triggered
+ */
+ SendNotification: {
+ /**
+ * Type
+ * @default send-notification
+ * @constant
+ * @enum {string}
+ */
+ type: "send-notification";
+ /**
+ * Block Document Id
+ * Format: uuid
+ * @description The identifier of the notification block to use
+ */
+ block_document_id: string;
+ /**
+ * Subject
+ * @default Prefect automated notification
+ */
+ subject: string;
+ /**
+ * Body
+ * @description The text of the notification to send
+ */
+ body: string;
+ };
+ /**
+ * SequenceTrigger
+ * @description A composite trigger that requires some number of triggers to have fired
+ * within the given time period in a specific order
+ */
+ "SequenceTrigger-Input": {
+ /**
+ * Type
+ * @default sequence
+ * @constant
+ * @enum {string}
+ */
+ type: "sequence";
+ /**
+ * Id
+ * Format: uuid
+ * @description The unique ID of this trigger
+ */
+ id?: string;
+ /** Triggers */
+ triggers: (
+ | components["schemas"]["EventTrigger"]
+ | components["schemas"]["CompoundTrigger-Input"]
+ | components["schemas"]["SequenceTrigger-Input"]
+ )[];
+ /** Within */
+ within: number | null;
+ };
+ /**
+ * SequenceTrigger
+ * @description A composite trigger that requires some number of triggers to have fired
+ * within the given time period in a specific order
+ */
+ "SequenceTrigger-Output": {
+ /**
+ * Type
+ * @default sequence
+ * @constant
+ * @enum {string}
+ */
+ type: "sequence";
+ /**
+ * Id
+ * Format: uuid
+ * @description The unique ID of this trigger
+ */
+ id?: string;
+ /** Triggers */
+ triggers: (
+ | components["schemas"]["EventTrigger"]
+ | components["schemas"]["CompoundTrigger-Output"]
+ | components["schemas"]["SequenceTrigger-Output"]
+ )[];
+ /** Within */
+ within: number | null;
+ };
+ /**
+ * SetStateStatus
+ * @description Enumerates return statuses for setting run states.
+ * @enum {string}
+ */
+ SetStateStatus: "ACCEPT" | "REJECT" | "ABORT" | "WAIT";
+ /**
+ * Settings
+ * @description Settings for Prefect using Pydantic settings.
+ *
+ * See https://docs.pydantic.dev/latest/concepts/pydantic_settings
+ */
+ Settings: unknown;
+ /** SimpleFlowRun */
+ SimpleFlowRun: {
+ /**
+ * Id
+ * Format: uuid
+ * @description The flow run id.
+ */
+ id: string;
+ /** @description The state type. */
+ state_type: components["schemas"]["StateType"];
+ /**
+ * Timestamp
+ * Format: date-time
+ * @description The start time of the run, or the expected start time if it hasn't run yet.
+ */
+ timestamp: string;
+ /**
+ * Duration
+ * @description The total run time of the run.
+ */
+ duration: number;
+ /**
+ * Lateness
+ * @description The delay between the expected and actual start time.
+ */
+ lateness: number;
+ };
+ /** SimpleNextFlowRun */
+ SimpleNextFlowRun: {
+ /**
+ * Id
+ * Format: uuid
+ * @description The flow run id.
+ */
+ id: string;
+ /**
+ * Flow Id
+ * Format: uuid
+ * @description The flow id.
+ */
+ flow_id: string;
+ /**
+ * Name
+ * @description The flow run name
+ */
+ name: string;
+ /**
+ * State Name
+ * @description The state name.
+ */
+ state_name: string;
+ /** @description The state type. */
+ state_type: components["schemas"]["StateType"];
+ /**
+ * Next Scheduled Start Time
+ * Format: date-time
+ * @description The next scheduled start time
+ */
+ next_scheduled_start_time: string;
+ };
+ /**
+ * State
+ * @description Represents the state of a run.
+ */
+ State: {
+ /**
+ * Id
+ * Format: uuid
+ */
+ id?: string;
+ type: components["schemas"]["StateType"];
+ /** Name */
+ name?: string | null;
+ /**
+ * Timestamp
+ * Format: date-time
+ */
+ timestamp?: string;
+ /** Message */
+ message?: string | null;
+ /**
+ * Data
+ * @description Data associated with the state, e.g. a result. Content must be storable as JSON.
+ */
+ data?: unknown | null;
+ state_details?: components["schemas"]["StateDetails"];
+ };
+ /**
+ * StateAbortDetails
+ * @description Details associated with an ABORT state transition.
+ */
+ StateAbortDetails: {
+ /**
+ * Type
+ * @description The type of state transition detail. Used to ensure pydantic does not coerce into a different type.
+ * @default abort_details
+ * @constant
+ * @enum {string}
+ */
+ type: "abort_details";
+ /**
+ * Reason
+ * @description The reason why the state transition was aborted.
+ */
+ reason?: string | null;
+ };
+ /**
+ * StateAcceptDetails
+ * @description Details associated with an ACCEPT state transition.
+ */
+ StateAcceptDetails: {
+ /**
+ * Type
+ * @description The type of state transition detail. Used to ensure pydantic does not coerce into a different type.
+ * @default accept_details
+ * @constant
+ * @enum {string}
+ */
+ type: "accept_details";
+ };
+ /**
+ * StateCreate
+ * @description Data used by the Prefect REST API to create a new state.
+ */
+ StateCreate: {
+ /** @description The type of the state to create */
+ type: components["schemas"]["StateType"];
+ /**
+ * Name
+ * @description The name of the state to create
+ */
+ name?: string | null;
+ /**
+ * Message
+ * @description The message of the state to create
+ */
+ message?: string | null;
+ /**
+ * Data
+ * @description The data of the state to create
+ */
+ data?: unknown | null;
+ /** @description The details of the state to create */
+ state_details?: components["schemas"]["StateDetails"];
+ };
+ /** StateDetails */
+ StateDetails: {
+ /** Flow Run Id */
+ flow_run_id?: string | null;
+ /** Task Run Id */
+ task_run_id?: string | null;
+ /** Child Flow Run Id */
+ child_flow_run_id?: string | null;
+ /** Scheduled Time */
+ scheduled_time?: string | null;
+ /** Cache Key */
+ cache_key?: string | null;
+ /** Cache Expiration */
+ cache_expiration?: string | null;
+ /**
+ * Deferred
+ * @default false
+ */
+ deferred: boolean | null;
+ /**
+ * Untrackable Result
+ * @default false
+ */
+ untrackable_result: boolean;
+ /** Pause Timeout */
+ pause_timeout?: string | null;
+ /**
+ * Pause Reschedule
+ * @default false
+ */
+ pause_reschedule: boolean;
+ /** Pause Key */
+ pause_key?: string | null;
+ /** Run Input Keyset */
+ run_input_keyset?: {
+ [key: string]: string;
+ } | null;
+ /** Refresh Cache */
+ refresh_cache?: boolean | null;
+ /** Retriable */
+ retriable?: boolean | null;
+ /** Transition Id */
+ transition_id?: string | null;
+ /** Task Parameters Id */
+ task_parameters_id?: string | null;
+ };
+ /**
+ * StateRejectDetails
+ * @description Details associated with a REJECT state transition.
+ */
+ StateRejectDetails: {
+ /**
+ * Type
+ * @description The type of state transition detail. Used to ensure pydantic does not coerce into a different type.
+ * @default reject_details
+ * @constant
+ * @enum {string}
+ */
+ type: "reject_details";
+ /**
+ * Reason
+ * @description The reason why the state transition was rejected.
+ */
+ reason?: string | null;
+ };
+ /**
+ * StateType
+ * @description Enumeration of state types.
+ * @enum {string}
+ */
+ StateType:
+ | "SCHEDULED"
+ | "PENDING"
+ | "RUNNING"
+ | "COMPLETED"
+ | "FAILED"
+ | "CANCELLED"
+ | "CRASHED"
+ | "PAUSED"
+ | "CANCELLING";
+ /**
+ * StateWaitDetails
+ * @description Details associated with a WAIT state transition.
+ */
+ StateWaitDetails: {
+ /**
+ * Type
+ * @description The type of state transition detail. Used to ensure pydantic does not coerce into a different type.
+ * @default wait_details
+ * @constant
+ * @enum {string}
+ */
+ type: "wait_details";
+ /**
+ * Delay Seconds
+ * @description The length of time in seconds the client should wait before transitioning states.
+ */
+ delay_seconds: number;
+ /**
+ * Reason
+ * @description The reason why the state transition should wait.
+ */
+ reason?: string | null;
+ };
+ /**
+ * SuspendFlowRun
+ * @description Suspends a flow run associated with the trigger
+ */
+ SuspendFlowRun: {
+ /**
+ * Type
+ * @default suspend-flow-run
+ * @constant
+ * @enum {string}
+ */
+ type: "suspend-flow-run";
+ };
+ /**
+ * TaskRun
+ * @description An ORM representation of task run data.
+ */
+ TaskRun: {
+ /**
+ * Id
+ * Format: uuid
+ */
+ id?: string;
+ /** Created */
+ created?: string | null;
+ /** Updated */
+ updated?: string | null;
+ /** Name */
+ name?: string;
+ /**
+ * Flow Run Id
+ * @description The flow run id of the task run.
+ */
+ flow_run_id?: string | null;
+ /**
+ * Task Key
+ * @description A unique identifier for the task being run.
+ */
+ task_key: string;
+ /**
+ * Dynamic Key
+ * @description A dynamic key used to differentiate between multiple runs of the same task within the same flow run.
+ */
+ dynamic_key: string;
+ /**
+ * Cache Key
+ * @description An optional cache key. If a COMPLETED state associated with this cache key is found, the cached COMPLETED state will be used instead of executing the task run.
+ */
+ cache_key?: string | null;
+ /**
+ * Cache Expiration
+ * @description Specifies when the cached state should expire.
+ */
+ cache_expiration?: string | null;
+ /**
+ * Task Version
+ * @description The version of the task being run.
+ */
+ task_version?: string | null;
+ empirical_policy?: components["schemas"]["TaskRunPolicy"];
+ /**
+ * Tags
+ * @description A list of tags for the task run.
+ */
+ tags?: string[];
+ /**
+ * State Id
+ * @description The id of the current task run state.
+ */
+ state_id?: string | null;
+ /**
+ * Task Inputs
+ * @description Tracks the source of inputs to a task run. Used for internal bookkeeping.
+ */
+ task_inputs?: {
+ [key: string]: (
+ | components["schemas"]["TaskRunResult"]
+ | components["schemas"]["Parameter"]
+ | components["schemas"]["Constant"]
+ )[];
+ };
+ /** @description The type of the current task run state. */
+ state_type?: components["schemas"]["StateType"] | null;
+ /**
+ * State Name
+ * @description The name of the current task run state.
+ */
+ state_name?: string | null;
+ /**
+ * Run Count
+ * @description The number of times the task run has been executed.
+ * @default 0
+ */
+ run_count: number;
+ /**
+ * Flow Run Run Count
+ * @description If the parent flow has retried, this indicates the flow retry this run is associated with.
+ * @default 0
+ */
+ flow_run_run_count: number;
+ /**
+ * Expected Start Time
+ * @description The task run's expected start time.
+ */
+ expected_start_time?: string | null;
+ /**
+ * Next Scheduled Start Time
+ * @description The next time the task run is scheduled to start.
+ */
+ next_scheduled_start_time?: string | null;
+ /**
+ * Start Time
+ * @description The actual start time.
+ */
+ start_time?: string | null;
+ /**
+ * End Time
+ * @description The actual end time.
+ */
+ end_time?: string | null;
+ /**
+ * Total Run Time
+ * @description Total run time. If the task run was executed multiple times, the time of each run will be summed.
+ * @default 0
+ */
+ total_run_time: number;
+ /**
+ * Estimated Run Time
+ * @description A real-time estimate of total run time.
+ * @default 0
+ */
+ estimated_run_time: number;
+ /**
+ * Estimated Start Time Delta
+ * @description The difference between actual and expected start time.
+ * @default 0
+ */
+ estimated_start_time_delta: number;
+ /** @description The current task run state. */
+ state?: components["schemas"]["State"] | null;
+ };
+ /** TaskRunCount */
+ TaskRunCount: Record;
+ /**
+ * TaskRunCreate
+ * @description Data used by the Prefect REST API to create a task run
+ */
+ TaskRunCreate: {
+ /**
+ * Id
+ * @description The ID to assign to the task run. If not provided, a random UUID will be generated.
+ */
+ id?: string | null;
+ /** @description The state of the task run to create */
+ state?: components["schemas"]["StateCreate"] | null;
+ /** Name */
+ name?: string;
+ /**
+ * Flow Run Id
+ * @description The flow run id of the task run.
+ */
+ flow_run_id?: string | null;
+ /**
+ * Task Key
+ * @description A unique identifier for the task being run.
+ */
+ task_key: string;
+ /**
+ * Dynamic Key
+ * @description A dynamic key used to differentiate between multiple runs of the same task within the same flow run.
+ */
+ dynamic_key: string;
+ /**
+ * Cache Key
+ * @description An optional cache key. If a COMPLETED state associated with this cache key is found, the cached COMPLETED state will be used instead of executing the task run.
+ */
+ cache_key?: string | null;
+ /**
+ * Cache Expiration
+ * @description Specifies when the cached state should expire.
+ */
+ cache_expiration?: string | null;
+ /**
+ * Task Version
+ * @description The version of the task being run.
+ */
+ task_version?: string | null;
+ empirical_policy?: components["schemas"]["TaskRunPolicy"];
+ /**
+ * Tags
+ * @description A list of tags for the task run.
+ */
+ tags?: string[];
+ /**
+ * Task Inputs
+ * @description The inputs to the task run.
+ */
+ task_inputs?: {
+ [key: string]: (
+ | components["schemas"]["TaskRunResult"]
+ | components["schemas"]["Parameter"]
+ | components["schemas"]["Constant"]
+ )[];
+ };
+ };
+ /**
+ * TaskRunFilter
+ * @description Filter task runs. Only task runs matching all criteria will be returned
+ */
+ TaskRunFilter: {
+ /**
+ * @description Operator for combining filter criteria. Defaults to 'and_'.
+ * @default and_
+ */
+ operator: components["schemas"]["Operator"];
+ /** @description Filter criteria for `TaskRun.id` */
+ id?: components["schemas"]["TaskRunFilterId"] | null;
+ /** @description Filter criteria for `TaskRun.name` */
+ name?: components["schemas"]["TaskRunFilterName"] | null;
+ /** @description Filter criteria for `TaskRun.tags` */
+ tags?: components["schemas"]["TaskRunFilterTags"] | null;
+ /** @description Filter criteria for `TaskRun.state` */
+ state?: components["schemas"]["TaskRunFilterState"] | null;
+ /** @description Filter criteria for `TaskRun.start_time` */
+ start_time?: components["schemas"]["TaskRunFilterStartTime"] | null;
+ /** @description Filter criteria for `TaskRun.expected_start_time` */
+ expected_start_time?:
+ | components["schemas"]["TaskRunFilterExpectedStartTime"]
+ | null;
+ /** @description Filter criteria for `TaskRun.subflow_run` */
+ subflow_runs?: components["schemas"]["TaskRunFilterSubFlowRuns"] | null;
+ /** @description Filter criteria for `TaskRun.flow_run_id` */
+ flow_run_id?: components["schemas"]["TaskRunFilterFlowRunId"] | null;
+ };
+ /**
+ * TaskRunFilterExpectedStartTime
+ * @description Filter by `TaskRun.expected_start_time`.
+ */
+ TaskRunFilterExpectedStartTime: {
+ /**
+ * Before
+ * @description Only include task runs expected to start at or before this time
+ */
+ before_?: string | null;
+ /**
+ * After
+ * @description Only include task runs expected to start at or after this time
+ */
+ after_?: string | null;
+ };
+ /**
+ * TaskRunFilterFlowRunId
+ * @description Filter by `TaskRun.flow_run_id`.
+ */
+ TaskRunFilterFlowRunId: {
+ /**
+ * @description Operator for combining filter criteria. Defaults to 'and_'.
+ * @default and_
+ */
+ operator: components["schemas"]["Operator"];
+ /**
+ * Any
+ * @description A list of task run flow run ids to include
+ */
+ any_?: string[] | null;
+ /**
+ * Is Null
+ * @description Filter for task runs with None as their flow run id
+ * @default false
+ */
+ is_null_: boolean | null;
+ };
+ /**
+ * TaskRunFilterId
+ * @description Filter by `TaskRun.id`.
+ */
+ TaskRunFilterId: {
+ /**
+ * Any
+ * @description A list of task run ids to include
+ */
+ any_?: string[] | null;
+ };
+ /**
+ * TaskRunFilterName
+ * @description Filter by `TaskRun.name`.
+ */
+ TaskRunFilterName: {
+ /**
+ * Any
+ * @description A list of task run names to include
+ */
+ any_?: string[] | null;
+ /**
+ * Like
+ * @description A case-insensitive partial match. For example, passing 'marvin' will match 'marvin', 'sad-Marvin', and 'marvin-robot'.
+ */
+ like_?: string | null;
+ };
+ /**
+ * TaskRunFilterStartTime
+ * @description Filter by `TaskRun.start_time`.
+ */
+ TaskRunFilterStartTime: {
+ /**
+ * Before
+ * @description Only include task runs starting at or before this time
+ */
+ before_?: string | null;
+ /**
+ * After
+ * @description Only include task runs starting at or after this time
+ */
+ after_?: string | null;
+ /**
+ * Is Null
+ * @description If true, only return task runs without a start time
+ */
+ is_null_?: boolean | null;
+ };
+ /**
+ * TaskRunFilterState
+ * @description Filter by `TaskRun.type` and `TaskRun.name`.
+ */
+ TaskRunFilterState: {
+ /**
+ * @description Operator for combining filter criteria. Defaults to 'and_'.
+ * @default and_
+ */
+ operator: components["schemas"]["Operator"];
+ /** @description Filter criteria for `TaskRun.state_type` */
+ type?: components["schemas"]["TaskRunFilterStateType"] | null;
+ /** @description Filter criteria for `TaskRun.state_name` */
+ name?: components["schemas"]["TaskRunFilterStateName"] | null;
+ };
+ /**
+ * TaskRunFilterStateName
+ * @description Filter by `TaskRun.state_name`.
+ */
+ TaskRunFilterStateName: {
+ /**
+ * Any
+ * @description A list of task run state names to include
+ */
+ any_?: string[] | null;
+ };
+ /**
+ * TaskRunFilterStateType
+ * @description Filter by `TaskRun.state_type`.
+ */
+ TaskRunFilterStateType: {
+ /**
+ * Any
+ * @description A list of task run state types to include
+ */
+ any_?: components["schemas"]["StateType"][] | null;
+ };
+ /**
+ * TaskRunFilterSubFlowRuns
+ * @description Filter by `TaskRun.subflow_run`.
+ */
+ TaskRunFilterSubFlowRuns: {
+ /**
+ * Exists
+ * @description If true, only include task runs that are subflow run parents; if false, exclude parent task runs
+ */
+ exists_?: boolean | null;
+ };
+ /**
+ * TaskRunFilterTags
+ * @description Filter by `TaskRun.tags`.
+ */
+ TaskRunFilterTags: {
+ /**
+ * @description Operator for combining filter criteria. Defaults to 'and_'.
+ * @default and_
+ */
+ operator: components["schemas"]["Operator"];
+ /**
+ * All
+ * @description A list of tags. Task runs will be returned only if their tags are a superset of the list
+ */
+ all_?: string[] | null;
+ /**
+ * Is Null
+ * @description If true, only include task runs without tags
+ */
+ is_null_?: boolean | null;
+ };
+ /**
+ * TaskRunPolicy
+ * @description Defines of how a task run should retry.
+ */
+ TaskRunPolicy: {
+ /**
+ * Max Retries
+ * @deprecated
+ * @description The maximum number of retries. Field is not used. Please use `retries` instead.
+ * @default 0
+ */
+ max_retries: number;
+ /**
+ * Retry Delay Seconds
+ * @deprecated
+ * @description The delay between retries. Field is not used. Please use `retry_delay` instead.
+ * @default 0
+ */
+ retry_delay_seconds: number;
+ /**
+ * Retries
+ * @description The number of retries.
+ */
+ retries?: number | null;
+ /**
+ * Retry Delay
+ * @description A delay time or list of delay times between retries, in seconds.
+ */
+ retry_delay?: number | number[] | null;
+ /**
+ * Retry Jitter Factor
+ * @description Determines the amount a retry should jitter
+ */
+ retry_jitter_factor?: number | null;
+ };
+ /**
+ * TaskRunResult
+ * @description Represents a task run result input to another task run.
+ */
+ TaskRunResult: {
+ /**
+ * Input Type
+ * @default task_run
+ * @constant
+ * @enum {string}
+ */
+ input_type: "task_run";
+ /**
+ * Id
+ * Format: uuid
+ */
+ id: string;
+ };
+ /**
+ * TaskRunSort
+ * @description Defines task run sorting options.
+ * @enum {string}
+ */
+ TaskRunSort:
+ | "ID_DESC"
+ | "EXPECTED_START_TIME_ASC"
+ | "EXPECTED_START_TIME_DESC"
+ | "NAME_ASC"
+ | "NAME_DESC"
+ | "NEXT_SCHEDULED_START_TIME_ASC"
+ | "END_TIME_DESC";
+ /**
+ * TaskRunUpdate
+ * @description Data used by the Prefect REST API to update a task run
+ */
+ TaskRunUpdate: {
+ /** Name */
+ name?: string;
+ };
+ /** TaskWorkerFilter */
+ TaskWorkerFilter: {
+ /** Task Keys */
+ task_keys: string[];
+ };
+ /** TaskWorkerResponse */
+ TaskWorkerResponse: {
+ /** Identifier */
+ identifier: string;
+ /** Task Keys */
+ task_keys: string[];
+ /**
+ * Timestamp
+ * Format: date-time
+ */
+ timestamp: string;
+ };
+ /**
+ * TimeUnit
+ * @enum {string}
+ */
+ TimeUnit: "week" | "day" | "hour" | "minute" | "second";
+ /** UpdatedBy */
+ UpdatedBy: {
+ /**
+ * Id
+ * @description The id of the updater of the object.
+ */
+ id?: string | null;
+ /**
+ * Type
+ * @description The type of the updater of the object.
+ */
+ type?: string | null;
+ /**
+ * Display Value
+ * @description The display value for the updater.
+ */
+ display_value?: string | null;
+ };
+ /** ValidationError */
+ ValidationError: {
+ /** Location */
+ loc: (string | number)[];
+ /** Message */
+ msg: string;
+ /** Error Type */
+ type: string;
+ };
+ /** Variable */
+ Variable: {
+ /**
+ * Id
+ * Format: uuid
+ */
+ id?: string;
+ /** Created */
+ created?: string | null;
+ /** Updated */
+ updated?: string | null;
+ /**
+ * Name
+ * @description The name of the variable
+ */
+ name: string;
+ /**
+ * Value
+ * @description The value of the variable
+ */
+ value:
+ | string
+ | number
+ | boolean
+ | Record
+ | unknown[]
+ | null;
+ /**
+ * Tags
+ * @description A list of variable tags
+ */
+ tags?: string[];
+ };
+ /**
+ * VariableCreate
+ * @description Data used by the Prefect REST API to create a Variable.
+ */
+ VariableCreate: {
+ /**
+ * Name
+ * @description The name of the variable
+ */
+ name: string;
+ /**
+ * Value
+ * @description The value of the variable
+ */
+ value:
+ | string
+ | number
+ | boolean
+ | Record
+ | unknown[]
+ | null;
+ /**
+ * Tags
+ * @description A list of variable tags
+ */
+ tags?: string[];
+ };
+ /**
+ * VariableFilter
+ * @description Filter variables. Only variables matching all criteria will be returned
+ */
+ VariableFilter: {
+ /**
+ * @description Operator for combining filter criteria. Defaults to 'and_'.
+ * @default and_
+ */
+ operator: components["schemas"]["Operator"];
+ /** @description Filter criteria for `Variable.id` */
+ id?: components["schemas"]["VariableFilterId"] | null;
+ /** @description Filter criteria for `Variable.name` */
+ name?: components["schemas"]["VariableFilterName"] | null;
+ /** @description Filter criteria for `Variable.tags` */
+ tags?: components["schemas"]["VariableFilterTags"] | null;
+ };
+ /**
+ * VariableFilterId
+ * @description Filter by `Variable.id`.
+ */
+ VariableFilterId: {
+ /**
+ * Any
+ * @description A list of variable ids to include
+ */
+ any_?: string[] | null;
+ };
+ /**
+ * VariableFilterName
+ * @description Filter by `Variable.name`.
+ */
+ VariableFilterName: {
+ /**
+ * Any
+ * @description A list of variables names to include
+ */
+ any_?: string[] | null;
+ /**
+ * Like
+ * @description A string to match variable names against. This can include SQL wildcard characters like `%` and `_`.
+ */
+ like_?: string | null;
+ };
+ /**
+ * VariableFilterTags
+ * @description Filter by `Variable.tags`.
+ */
+ VariableFilterTags: {
+ /**
+ * @description Operator for combining filter criteria. Defaults to 'and_'.
+ * @default and_
+ */
+ operator: components["schemas"]["Operator"];
+ /**
+ * All
+ * @description A list of tags. Variables will be returned only if their tags are a superset of the list
+ */
+ all_?: string[] | null;
+ /**
+ * Is Null
+ * @description If true, only include Variables without tags
+ */
+ is_null_?: boolean | null;
+ };
+ /**
+ * VariableSort
+ * @description Defines variables sorting options.
+ * @enum {string}
+ */
+ VariableSort: "CREATED_DESC" | "UPDATED_DESC" | "NAME_DESC" | "NAME_ASC";
+ /**
+ * VariableUpdate
+ * @description Data used by the Prefect REST API to update a Variable.
+ */
+ VariableUpdate: {
+ /**
+ * Name
+ * @description The name of the variable
+ */
+ name?: string | null;
+ /**
+ * Value
+ * @description The value of the variable
+ */
+ value?:
+ | string
+ | number
+ | boolean
+ | Record
+ | unknown[]
+ | null;
+ /**
+ * Tags
+ * @description A list of variable tags
+ */
+ tags?: string[] | null;
+ };
+ /**
+ * WorkPool
+ * @description An ORM representation of a work pool
+ */
+ WorkPool: {
+ /**
+ * Id
+ * Format: uuid
+ */
+ id?: string;
+ /** Created */
+ created?: string | null;
+ /** Updated */
+ updated?: string | null;
+ /**
+ * Name
+ * @description The name of the work pool.
+ */
+ name: string;
+ /**
+ * Description
+ * @description A description of the work pool.
+ */
+ description?: string | null;
+ /**
+ * Type
+ * @description The work pool type.
+ */
+ type: string;
+ /**
+ * Base Job Template
+ * @description The work pool's base job template.
+ */
+ base_job_template?: Record;
+ /**
+ * Is Paused
+ * @description Pausing the work pool stops the delivery of all work.
+ * @default false
+ */
+ is_paused: boolean;
+ /**
+ * Concurrency Limit
+ * @description A concurrency limit for the work pool.
+ */
+ concurrency_limit?: number | null;
+ /** @description The current status of the work pool. */
+ status?: components["schemas"]["WorkPoolStatus"] | null;
+ /**
+ * Default Queue Id
+ * @description The id of the pool's default queue.
+ */
+ default_queue_id?: string | null;
+ };
+ /**
+ * WorkPoolCreate
+ * @description Data used by the Prefect REST API to create a work pool.
+ */
+ WorkPoolCreate: {
+ /**
+ * Name
+ * @description The name of the work pool.
+ */
+ name: string;
+ /**
+ * Description
+ * @description The work pool description.
+ */
+ description?: string | null;
+ /**
+ * Type
+ * @description The work pool type.
+ * @default prefect-agent
+ */
+ type: string;
+ /**
+ * Base Job Template
+ * @description The work pool's base job template.
+ */
+ base_job_template?: Record;
+ /**
+ * Is Paused
+ * @description Pausing the work pool stops the delivery of all work.
+ * @default false
+ */
+ is_paused: boolean;
+ /**
+ * Concurrency Limit
+ * @description A concurrency limit for the work pool.
+ */
+ concurrency_limit?: number | null;
+ };
+ /**
+ * WorkPoolFilter
+ * @description Filter work pools. Only work pools matching all criteria will be returned
+ */
+ WorkPoolFilter: {
+ /**
+ * @description Operator for combining filter criteria. Defaults to 'and_'.
+ * @default and_
+ */
+ operator: components["schemas"]["Operator"];
+ /** @description Filter criteria for `WorkPool.id` */
+ id?: components["schemas"]["WorkPoolFilterId"] | null;
+ /** @description Filter criteria for `WorkPool.name` */
+ name?: components["schemas"]["WorkPoolFilterName"] | null;
+ /** @description Filter criteria for `WorkPool.type` */
+ type?: components["schemas"]["WorkPoolFilterType"] | null;
+ };
+ /**
+ * WorkPoolFilterId
+ * @description Filter by `WorkPool.id`.
+ */
+ WorkPoolFilterId: {
+ /**
+ * Any
+ * @description A list of work pool ids to include
+ */
+ any_?: string[] | null;
+ };
+ /**
+ * WorkPoolFilterName
+ * @description Filter by `WorkPool.name`.
+ */
+ WorkPoolFilterName: {
+ /**
+ * Any
+ * @description A list of work pool names to include
+ */
+ any_?: string[] | null;
+ };
+ /**
+ * WorkPoolFilterType
+ * @description Filter by `WorkPool.type`.
+ */
+ WorkPoolFilterType: {
+ /**
+ * Any
+ * @description A list of work pool types to include
+ */
+ any_?: string[] | null;
+ };
+ /**
+ * WorkPoolStatus
+ * @description Enumeration of work pool statuses.
+ * @enum {string}
+ */
+ WorkPoolStatus: "READY" | "NOT_READY" | "PAUSED";
+ /**
+ * WorkPoolUpdate
+ * @description Data used by the Prefect REST API to update a work pool.
+ */
+ WorkPoolUpdate: {
+ /** Description */
+ description?: string | null;
+ /** Is Paused */
+ is_paused?: boolean | null;
+ /** Base Job Template */
+ base_job_template?: Record | null;
+ /** Concurrency Limit */
+ concurrency_limit?: number | null;
+ };
+ /**
+ * WorkQueue
+ * @description An ORM representation of a work queue
+ */
+ WorkQueue: {
+ /**
+ * Id
+ * Format: uuid
+ */
+ id?: string;
+ /** Created */
+ created?: string | null;
+ /** Updated */
+ updated?: string | null;
+ /**
+ * Name
+ * @description The name of the work queue.
+ */
+ name: string;
+ /**
+ * Description
+ * @description An optional description for the work queue.
+ * @default
+ */
+ description: string | null;
+ /**
+ * Is Paused
+ * @description Whether or not the work queue is paused.
+ * @default false
+ */
+ is_paused: boolean;
+ /**
+ * Concurrency Limit
+ * @description An optional concurrency limit for the work queue.
+ */
+ concurrency_limit?: number | null;
+ /**
+ * Priority
+ * @description The queue's priority. Lower values are higher priority (1 is the highest).
+ * @default 1
+ */
+ priority: number;
+ /**
+ * Work Pool Id
+ * @description The work pool with which the queue is associated.
+ */
+ work_pool_id?: string | null;
+ /**
+ * @deprecated
+ * @description DEPRECATED: Filter criteria for the work queue.
+ */
+ filter?: components["schemas"]["QueueFilter"] | null;
+ /**
+ * Last Polled
+ * @description The last time an agent polled this queue for work.
+ */
+ last_polled?: string | null;
+ };
+ /**
+ * WorkQueueCreate
+ * @description Data used by the Prefect REST API to create a work queue.
+ */
+ WorkQueueCreate: {
+ /**
+ * Name
+ * @description The name of the work queue.
+ */
+ name: string;
+ /**
+ * Description
+ * @description An optional description for the work queue.
+ * @default
+ */
+ description: string | null;
+ /**
+ * Is Paused
+ * @description Whether or not the work queue is paused.
+ * @default false
+ */
+ is_paused: boolean;
+ /**
+ * Concurrency Limit
+ * @description The work queue's concurrency limit.
+ */
+ concurrency_limit?: number | null;
+ /**
+ * Priority
+ * @description The queue's priority. Lower values are higher priority (1 is the highest).
+ */
+ priority?: number | null;
+ /**
+ * @deprecated
+ * @description DEPRECATED: Filter criteria for the work queue.
+ */
+ filter?: components["schemas"]["QueueFilter"] | null;
+ };
+ /**
+ * WorkQueueFilter
+ * @description Filter work queues. Only work queues matching all criteria will be
+ * returned
+ */
+ WorkQueueFilter: {
+ /**
+ * @description Operator for combining filter criteria. Defaults to 'and_'.
+ * @default and_
+ */
+ operator: components["schemas"]["Operator"];
+ /** @description Filter criteria for `WorkQueue.id` */
+ id?: components["schemas"]["WorkQueueFilterId"] | null;
+ /** @description Filter criteria for `WorkQueue.name` */
+ name?: components["schemas"]["WorkQueueFilterName"] | null;
+ };
+ /**
+ * WorkQueueFilterId
+ * @description Filter by `WorkQueue.id`.
+ */
+ WorkQueueFilterId: {
+ /**
+ * Any
+ * @description A list of work queue ids to include
+ */
+ any_?: string[] | null;
+ };
+ /**
+ * WorkQueueFilterName
+ * @description Filter by `WorkQueue.name`.
+ */
+ WorkQueueFilterName: {
+ /**
+ * Any
+ * @description A list of work queue names to include
+ */
+ any_?: string[] | null;
+ /**
+ * Startswith
+ * @description A list of case-insensitive starts-with matches. For example, passing 'marvin' will match 'marvin', and 'Marvin-robot', but not 'sad-marvin'.
+ */
+ startswith_?: string[] | null;
+ };
+ /** WorkQueueHealthPolicy */
+ WorkQueueHealthPolicy: {
+ /**
+ * Maximum Late Runs
+ * @description The maximum number of late runs in the work queue before it is deemed unhealthy. Defaults to `0`.
+ * @default 0
+ */
+ maximum_late_runs: number | null;
+ /**
+ * Maximum Seconds Since Last Polled
+ * @description The maximum number of time in seconds elapsed since work queue has been polled before it is deemed unhealthy. Defaults to `60`.
+ * @default 60
+ */
+ maximum_seconds_since_last_polled: number | null;
+ };
+ /** WorkQueueResponse */
+ WorkQueueResponse: {
+ /**
+ * Id
+ * Format: uuid
+ */
+ id?: string;
+ /** Created */
+ created?: string | null;
+ /** Updated */
+ updated?: string | null;
+ /**
+ * Name
+ * @description The name of the work queue.
+ */
+ name: string;
+ /**
+ * Description
+ * @description An optional description for the work queue.
+ * @default
+ */
+ description: string | null;
+ /**
+ * Is Paused
+ * @description Whether or not the work queue is paused.
+ * @default false
+ */
+ is_paused: boolean;
+ /**
+ * Concurrency Limit
+ * @description An optional concurrency limit for the work queue.
+ */
+ concurrency_limit?: number | null;
+ /**
+ * Priority
+ * @description The queue's priority. Lower values are higher priority (1 is the highest).
+ * @default 1
+ */
+ priority: number;
+ /**
+ * Work Pool Id
+ * @description The work pool with which the queue is associated.
+ */
+ work_pool_id?: string | null;
+ /**
+ * @deprecated
+ * @description DEPRECATED: Filter criteria for the work queue.
+ */
+ filter?: components["schemas"]["QueueFilter"] | null;
+ /**
+ * Last Polled
+ * @description The last time an agent polled this queue for work.
+ */
+ last_polled?: string | null;
+ /**
+ * Work Pool Name
+ * @description The name of the work pool the work pool resides within.
+ */
+ work_pool_name?: string | null;
+ /** @description The queue status. */
+ status?: components["schemas"]["WorkQueueStatus"] | null;
+ };
+ /**
+ * WorkQueueStatus
+ * @description Enumeration of work queue statuses.
+ * @enum {string}
+ */
+ WorkQueueStatus: "READY" | "NOT_READY" | "PAUSED";
+ /** WorkQueueStatusDetail */
+ WorkQueueStatusDetail: {
+ /**
+ * Healthy
+ * @description Whether or not the work queue is healthy.
+ */
+ healthy: boolean;
+ /**
+ * Late Runs Count
+ * @description The number of late flow runs in the work queue.
+ * @default 0
+ */
+ late_runs_count: number;
+ /**
+ * Last Polled
+ * @description The last time an agent polled this queue for work.
+ */
+ last_polled?: string | null;
+ /** @description The policy used to determine whether or not the work queue is healthy. */
+ health_check_policy: components["schemas"]["WorkQueueHealthPolicy"];
+ };
+ /**
+ * WorkQueueUpdate
+ * @description Data used by the Prefect REST API to update a work queue.
+ */
+ WorkQueueUpdate: {
+ /** Name */
+ name?: string | null;
+ /** Description */
+ description?: string | null;
+ /**
+ * Is Paused
+ * @description Whether or not the work queue is paused.
+ * @default false
+ */
+ is_paused: boolean;
+ /** Concurrency Limit */
+ concurrency_limit?: number | null;
+ /** Priority */
+ priority?: number | null;
+ /** Last Polled */
+ last_polled?: string | null;
+ /**
+ * @deprecated
+ * @description DEPRECATED: Filter criteria for the work queue.
+ */
+ filter?: components["schemas"]["QueueFilter"] | null;
+ };
+ /**
+ * WorkerFilter
+ * @description Filter by `Worker.last_heartbeat_time`.
+ */
+ WorkerFilter: {
+ /**
+ * @description Operator for combining filter criteria. Defaults to 'and_'.
+ * @default and_
+ */
+ operator: components["schemas"]["Operator"];
+ /** @description Filter criteria for `Worker.last_heartbeat_time` */
+ last_heartbeat_time?:
+ | components["schemas"]["WorkerFilterLastHeartbeatTime"]
+ | null;
+ /** @description Filter criteria for `Worker.status` */
+ status?: components["schemas"]["WorkerFilterStatus"] | null;
+ };
+ /**
+ * WorkerFilterLastHeartbeatTime
+ * @description Filter by `Worker.last_heartbeat_time`.
+ */
+ WorkerFilterLastHeartbeatTime: {
+ /**
+ * Before
+ * @description Only include processes whose last heartbeat was at or before this time
+ */
+ before_?: string | null;
+ /**
+ * After
+ * @description Only include processes whose last heartbeat was at or after this time
+ */
+ after_?: string | null;
+ };
+ /**
+ * WorkerFilterStatus
+ * @description Filter by `Worker.status`.
+ */
+ WorkerFilterStatus: {
+ /**
+ * Any
+ * @description A list of worker statuses to include
+ */
+ any_?: components["schemas"]["WorkerStatus"][] | null;
+ /**
+ * Not Any
+ * @description A list of worker statuses to exclude
+ */
+ not_any_?: components["schemas"]["WorkerStatus"][] | null;
+ };
+ /** WorkerFlowRunResponse */
+ WorkerFlowRunResponse: {
+ /**
+ * Work Pool Id
+ * Format: uuid
+ */
+ work_pool_id: string;
+ /**
+ * Work Queue Id
+ * Format: uuid
+ */
+ work_queue_id: string;
+ flow_run: components["schemas"]["FlowRun"];
+ };
+ /** WorkerResponse */
+ WorkerResponse: {
+ /**
+ * Id
+ * Format: uuid
+ */
+ id?: string;
+ /** Created */
+ created?: string | null;
+ /** Updated */
+ updated?: string | null;
+ /**
+ * Name
+ * @description The name of the worker.
+ */
+ name: string;
+ /**
+ * Work Pool Id
+ * Format: uuid
+ * @description The work pool with which the queue is associated.
+ */
+ work_pool_id: string;
+ /**
+ * Last Heartbeat Time
+ * Format: date-time
+ * @description The last time the worker process sent a heartbeat.
+ */
+ last_heartbeat_time?: string;
+ /**
+ * Heartbeat Interval Seconds
+ * @description The number of seconds to expect between heartbeats sent by the worker.
+ */
+ heartbeat_interval_seconds?: number | null;
+ /**
+ * @description Current status of the worker.
+ * @default OFFLINE
+ */
+ status: components["schemas"]["WorkerStatus"];
+ };
+ /**
+ * WorkerStatus
+ * @description Enumeration of worker statuses.
+ * @enum {string}
+ */
+ WorkerStatus: "ONLINE" | "OFFLINE";
+ };
+ responses: never;
+ parameters: never;
+ requestBodies: never;
+ headers: never;
+ pathItems: never;
+}
+export type $defs = Record;
+export interface operations {
+ health_check_health_get: {
+ parameters: {
+ query?: never;
+ header?: never;
+ path?: never;
+ cookie?: never;
+ };
+ requestBody?: never;
+ responses: {
+ /** @description Successful Response */
+ 200: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": unknown;
+ };
+ };
+ };
+ };
+ server_version_version_get: {
+ parameters: {
+ query?: never;
+ header?: never;
+ path?: never;
+ cookie?: never;
+ };
+ requestBody?: never;
+ responses: {
+ /** @description Successful Response */
+ 200: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": unknown;
+ };
+ };
+ };
+ };
+ create_flow_flows__post: {
+ parameters: {
+ query?: never;
+ header?: {
+ "x-prefect-api-version"?: string;
+ };
+ path?: never;
+ cookie?: never;
+ };
+ requestBody: {
+ content: {
+ "application/json": components["schemas"]["FlowCreate"];
+ };
+ };
+ responses: {
+ /** @description Successful Response */
+ 200: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["Flow"];
+ };
+ };
+ /** @description Validation Error */
+ 422: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["HTTPValidationError"];
+ };
+ };
+ };
+ };
+ read_flow_flows__id__get: {
+ parameters: {
+ query?: never;
+ header?: {
+ "x-prefect-api-version"?: string;
+ };
+ path: {
+ /** @description The flow id */
+ id: string;
+ };
+ cookie?: never;
+ };
+ requestBody?: never;
+ responses: {
+ /** @description Successful Response */
+ 200: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["Flow"];
+ };
+ };
+ /** @description Validation Error */
+ 422: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["HTTPValidationError"];
+ };
+ };
+ };
+ };
+ delete_flow_flows__id__delete: {
+ parameters: {
+ query?: never;
+ header?: {
+ "x-prefect-api-version"?: string;
+ };
+ path: {
+ /** @description The flow id */
+ id: string;
+ };
+ cookie?: never;
+ };
+ requestBody?: never;
+ responses: {
+ /** @description Successful Response */
+ 204: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content?: never;
+ };
+ /** @description Validation Error */
+ 422: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["HTTPValidationError"];
+ };
+ };
+ };
+ };
+ update_flow_flows__id__patch: {
+ parameters: {
+ query?: never;
+ header?: {
+ "x-prefect-api-version"?: string;
+ };
+ path: {
+ /** @description The flow id */
+ id: string;
+ };
+ cookie?: never;
+ };
+ requestBody: {
+ content: {
+ "application/json": components["schemas"]["FlowUpdate"];
+ };
+ };
+ responses: {
+ /** @description Successful Response */
+ 204: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content?: never;
+ };
+ /** @description Validation Error */
+ 422: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["HTTPValidationError"];
+ };
+ };
+ };
+ };
+ count_flows_flows_count_post: {
+ parameters: {
+ query?: never;
+ header?: {
+ "x-prefect-api-version"?: string;
+ };
+ path?: never;
+ cookie?: never;
+ };
+ requestBody?: {
+ content: {
+ "application/json": components["schemas"]["Body_count_flows_flows_count_post"];
+ };
+ };
+ responses: {
+ /** @description Successful Response */
+ 200: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": number;
+ };
+ };
+ /** @description Validation Error */
+ 422: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["HTTPValidationError"];
+ };
+ };
+ };
+ };
+ read_flow_by_name_flows_name__name__get: {
+ parameters: {
+ query?: never;
+ header?: {
+ "x-prefect-api-version"?: string;
+ };
+ path: {
+ /** @description The name of the flow */
+ name: string;
+ };
+ cookie?: never;
+ };
+ requestBody?: never;
+ responses: {
+ /** @description Successful Response */
+ 200: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["Flow"];
+ };
+ };
+ /** @description Validation Error */
+ 422: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["HTTPValidationError"];
+ };
+ };
+ };
+ };
+ read_flows_flows_filter_post: {
+ parameters: {
+ query?: never;
+ header?: {
+ "x-prefect-api-version"?: string;
+ };
+ path?: never;
+ cookie?: never;
+ };
+ requestBody?: {
+ content: {
+ "application/json": components["schemas"]["Body_read_flows_flows_filter_post"];
+ };
+ };
+ responses: {
+ /** @description Successful Response */
+ 200: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["Flow"][];
+ };
+ };
+ /** @description Validation Error */
+ 422: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["HTTPValidationError"];
+ };
+ };
+ };
+ };
+ paginate_flows_flows_paginate_post: {
+ parameters: {
+ query?: never;
+ header?: {
+ "x-prefect-api-version"?: string;
+ };
+ path?: never;
+ cookie?: never;
+ };
+ requestBody?: {
+ content: {
+ "application/json": components["schemas"]["Body_paginate_flows_flows_paginate_post"];
+ };
+ };
+ responses: {
+ /** @description Successful Response */
+ 200: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["FlowPaginationResponse"];
+ };
+ };
+ /** @description Validation Error */
+ 422: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["HTTPValidationError"];
+ };
+ };
+ };
+ };
+ create_flow_run_flow_runs__post: {
+ parameters: {
+ query?: never;
+ header?: {
+ "x-prefect-api-version"?: string;
+ };
+ path?: never;
+ cookie?: never;
+ };
+ requestBody: {
+ content: {
+ "application/json": components["schemas"]["FlowRunCreate"];
+ };
+ };
+ responses: {
+ /** @description Successful Response */
+ 200: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["FlowRunResponse"];
+ };
+ };
+ /** @description Validation Error */
+ 422: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["HTTPValidationError"];
+ };
+ };
+ };
+ };
+ read_flow_run_flow_runs__id__get: {
+ parameters: {
+ query?: never;
+ header?: {
+ "x-prefect-api-version"?: string;
+ };
+ path: {
+ /** @description The flow run id */
+ id: string;
+ };
+ cookie?: never;
+ };
+ requestBody?: never;
+ responses: {
+ /** @description Successful Response */
+ 200: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["FlowRunResponse"];
+ };
+ };
+ /** @description Validation Error */
+ 422: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["HTTPValidationError"];
+ };
+ };
+ };
+ };
+ delete_flow_run_flow_runs__id__delete: {
+ parameters: {
+ query?: never;
+ header?: {
+ "x-prefect-api-version"?: string;
+ };
+ path: {
+ /** @description The flow run id */
+ id: string;
+ };
+ cookie?: never;
+ };
+ requestBody?: never;
+ responses: {
+ /** @description Successful Response */
+ 204: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content?: never;
+ };
+ /** @description Validation Error */
+ 422: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["HTTPValidationError"];
+ };
+ };
+ };
+ };
+ update_flow_run_flow_runs__id__patch: {
+ parameters: {
+ query?: never;
+ header?: {
+ "x-prefect-api-version"?: string;
+ };
+ path: {
+ /** @description The flow run id */
+ id: string;
+ };
+ cookie?: never;
+ };
+ requestBody: {
+ content: {
+ "application/json": components["schemas"]["FlowRunUpdate"];
+ };
+ };
+ responses: {
+ /** @description Successful Response */
+ 204: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content?: never;
+ };
+ /** @description Validation Error */
+ 422: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["HTTPValidationError"];
+ };
+ };
+ };
+ };
+ count_flow_runs_flow_runs_count_post: {
+ parameters: {
+ query?: never;
+ header?: {
+ "x-prefect-api-version"?: string;
+ };
+ path?: never;
+ cookie?: never;
+ };
+ requestBody?: {
+ content: {
+ "application/json": components["schemas"]["Body_count_flow_runs_flow_runs_count_post"];
+ };
+ };
+ responses: {
+ /** @description Successful Response */
+ 200: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": number;
+ };
+ };
+ /** @description Validation Error */
+ 422: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["HTTPValidationError"];
+ };
+ };
+ };
+ };
+ average_flow_run_lateness_flow_runs_lateness_post: {
+ parameters: {
+ query?: never;
+ header?: {
+ "x-prefect-api-version"?: string;
+ };
+ path?: never;
+ cookie?: never;
+ };
+ requestBody?: {
+ content: {
+ "application/json": components["schemas"]["Body_average_flow_run_lateness_flow_runs_lateness_post"];
+ };
+ };
+ responses: {
+ /** @description Successful Response */
+ 200: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": number | null;
+ };
+ };
+ /** @description Validation Error */
+ 422: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["HTTPValidationError"];
+ };
+ };
+ };
+ };
+ flow_run_history_flow_runs_history_post: {
+ parameters: {
+ query?: never;
+ header?: {
+ "x-prefect-api-version"?: string;
+ };
+ path?: never;
+ cookie?: never;
+ };
+ requestBody: {
+ content: {
+ "application/json": components["schemas"]["Body_flow_run_history_flow_runs_history_post"];
+ };
+ };
+ responses: {
+ /** @description Successful Response */
+ 200: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["HistoryResponse"][];
+ };
+ };
+ /** @description Validation Error */
+ 422: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["HTTPValidationError"];
+ };
+ };
+ };
+ };
+ read_flow_run_graph_v1_flow_runs__id__graph_get: {
+ parameters: {
+ query?: never;
+ header?: {
+ "x-prefect-api-version"?: string;
+ };
+ path: {
+ /** @description The flow run id */
+ id: string;
+ };
+ cookie?: never;
+ };
+ requestBody?: never;
+ responses: {
+ /** @description Successful Response */
+ 200: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["DependencyResult"][];
+ };
+ };
+ /** @description Validation Error */
+ 422: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["HTTPValidationError"];
+ };
+ };
+ };
+ };
+ read_flow_run_graph_v2_flow_runs__id__graph_v2_get: {
+ parameters: {
+ query?: {
+ /** @description Only include runs that start or end after this time. */
+ since?: string;
+ };
+ header?: {
+ "x-prefect-api-version"?: string;
+ };
+ path: {
+ /** @description The flow run id */
+ id: string;
+ };
+ cookie?: never;
+ };
+ requestBody?: never;
+ responses: {
+ /** @description Successful Response */
+ 200: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["Graph"];
+ };
+ };
+ /** @description Validation Error */
+ 422: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["HTTPValidationError"];
+ };
+ };
+ };
+ };
+ resume_flow_run_flow_runs__id__resume_post: {
+ parameters: {
+ query?: never;
+ header?: {
+ "x-prefect-api-version"?: string;
+ };
+ path: {
+ /** @description The flow run id */
+ id: string;
+ };
+ cookie?: never;
+ };
+ requestBody?: {
+ content: {
+ "application/json": components["schemas"]["Body_resume_flow_run_flow_runs__id__resume_post"];
+ };
+ };
+ responses: {
+ /** @description Successful Response */
+ 200: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["OrchestrationResult"];
+ };
+ };
+ /** @description Validation Error */
+ 422: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["HTTPValidationError"];
+ };
+ };
+ };
+ };
+ read_flow_runs_flow_runs_filter_post: {
+ parameters: {
+ query?: never;
+ header?: {
+ "x-prefect-api-version"?: string;
+ };
+ path?: never;
+ cookie?: never;
+ };
+ requestBody?: {
+ content: {
+ "application/json": components["schemas"]["Body_read_flow_runs_flow_runs_filter_post"];
+ };
+ };
+ responses: {
+ /** @description Successful Response */
+ 200: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["FlowRunResponse"][];
+ };
+ };
+ /** @description Validation Error */
+ 422: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["HTTPValidationError"];
+ };
+ };
+ };
+ };
+ set_flow_run_state_flow_runs__id__set_state_post: {
+ parameters: {
+ query?: never;
+ header?: {
+ "x-prefect-api-version"?: string;
+ };
+ path: {
+ /** @description The flow run id */
+ id: string;
+ };
+ cookie?: never;
+ };
+ requestBody: {
+ content: {
+ "application/json": components["schemas"]["Body_set_flow_run_state_flow_runs__id__set_state_post"];
+ };
+ };
+ responses: {
+ /** @description Successful Response */
+ 200: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["OrchestrationResult"];
+ };
+ };
+ /** @description Validation Error */
+ 422: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["HTTPValidationError"];
+ };
+ };
+ };
+ };
+ create_flow_run_input_flow_runs__id__input_post: {
+ parameters: {
+ query?: never;
+ header?: {
+ "x-prefect-api-version"?: string;
+ };
+ path: {
+ /** @description The flow run id */
+ id: string;
+ };
+ cookie?: never;
+ };
+ requestBody: {
+ content: {
+ "application/json": components["schemas"]["Body_create_flow_run_input_flow_runs__id__input_post"];
+ };
+ };
+ responses: {
+ /** @description Successful Response */
+ 201: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": unknown;
+ };
+ };
+ /** @description Validation Error */
+ 422: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["HTTPValidationError"];
+ };
+ };
+ };
+ };
+ filter_flow_run_input_flow_runs__id__input_filter_post: {
+ parameters: {
+ query?: never;
+ header?: {
+ "x-prefect-api-version"?: string;
+ };
+ path: {
+ /** @description The flow run id */
+ id: string;
+ };
+ cookie?: never;
+ };
+ requestBody: {
+ content: {
+ "application/json": components["schemas"]["Body_filter_flow_run_input_flow_runs__id__input_filter_post"];
+ };
+ };
+ responses: {
+ /** @description Successful Response */
+ 200: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["FlowRunInput"][];
+ };
+ };
+ /** @description Validation Error */
+ 422: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["HTTPValidationError"];
+ };
+ };
+ };
+ };
+ read_flow_run_input_flow_runs__id__input__key__get: {
+ parameters: {
+ query?: never;
+ header?: {
+ "x-prefect-api-version"?: string;
+ };
+ path: {
+ /** @description The flow run id */
+ id: string;
+ /** @description The input key */
+ key: string;
+ };
+ cookie?: never;
+ };
+ requestBody?: never;
+ responses: {
+ /** @description Successful Response */
+ 200: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": unknown;
+ };
+ };
+ /** @description Validation Error */
+ 422: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["HTTPValidationError"];
+ };
+ };
+ };
+ };
+ delete_flow_run_input_flow_runs__id__input__key__delete: {
+ parameters: {
+ query?: never;
+ header?: {
+ "x-prefect-api-version"?: string;
+ };
+ path: {
+ /** @description The flow run id */
+ id: string;
+ /** @description The input key */
+ key: string;
+ };
+ cookie?: never;
+ };
+ requestBody?: never;
+ responses: {
+ /** @description Successful Response */
+ 204: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content?: never;
+ };
+ /** @description Validation Error */
+ 422: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["HTTPValidationError"];
+ };
+ };
+ };
+ };
+ paginate_flow_runs_flow_runs_paginate_post: {
+ parameters: {
+ query?: never;
+ header?: {
+ "x-prefect-api-version"?: string;
+ };
+ path?: never;
+ cookie?: never;
+ };
+ requestBody?: {
+ content: {
+ "application/json": components["schemas"]["Body_paginate_flow_runs_flow_runs_paginate_post"];
+ };
+ };
+ responses: {
+ /** @description Successful Response */
+ 200: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["FlowRunPaginationResponse"];
+ };
+ };
+ /** @description Validation Error */
+ 422: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["HTTPValidationError"];
+ };
+ };
+ };
+ };
+ download_logs_flow_runs__id__logs_download_get: {
+ parameters: {
+ query?: never;
+ header?: {
+ "x-prefect-api-version"?: string;
+ };
+ path: {
+ /** @description The flow run id */
+ id: string;
+ };
+ cookie?: never;
+ };
+ requestBody?: never;
+ responses: {
+ /** @description Successful Response */
+ 200: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": unknown;
+ };
+ };
+ /** @description Validation Error */
+ 422: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["HTTPValidationError"];
+ };
+ };
+ };
+ };
+ create_task_run_task_runs__post: {
+ parameters: {
+ query?: never;
+ header?: {
+ "x-prefect-api-version"?: string;
+ };
+ path?: never;
+ cookie?: never;
+ };
+ requestBody: {
+ content: {
+ "application/json": components["schemas"]["TaskRunCreate"];
+ };
+ };
+ responses: {
+ /** @description Successful Response */
+ 200: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["TaskRun"];
+ };
+ };
+ /** @description Validation Error */
+ 422: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["HTTPValidationError"];
+ };
+ };
+ };
+ };
+ read_task_run_task_runs__id__get: {
+ parameters: {
+ query?: never;
+ header?: {
+ "x-prefect-api-version"?: string;
+ };
+ path: {
+ /** @description The task run id */
+ id: string;
+ };
+ cookie?: never;
+ };
+ requestBody?: never;
+ responses: {
+ /** @description Successful Response */
+ 200: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["TaskRun"];
+ };
+ };
+ /** @description Validation Error */
+ 422: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["HTTPValidationError"];
+ };
+ };
+ };
+ };
+ delete_task_run_task_runs__id__delete: {
+ parameters: {
+ query?: never;
+ header?: {
+ "x-prefect-api-version"?: string;
+ };
+ path: {
+ /** @description The task run id */
+ id: string;
+ };
+ cookie?: never;
+ };
+ requestBody?: never;
+ responses: {
+ /** @description Successful Response */
+ 204: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content?: never;
+ };
+ /** @description Validation Error */
+ 422: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["HTTPValidationError"];
+ };
+ };
+ };
+ };
+ update_task_run_task_runs__id__patch: {
+ parameters: {
+ query?: never;
+ header?: {
+ "x-prefect-api-version"?: string;
+ };
+ path: {
+ /** @description The task run id */
+ id: string;
+ };
+ cookie?: never;
+ };
+ requestBody: {
+ content: {
+ "application/json": components["schemas"]["TaskRunUpdate"];
+ };
+ };
+ responses: {
+ /** @description Successful Response */
+ 204: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content?: never;
+ };
+ /** @description Validation Error */
+ 422: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["HTTPValidationError"];
+ };
+ };
+ };
+ };
+ count_task_runs_task_runs_count_post: {
+ parameters: {
+ query?: never;
+ header?: {
+ "x-prefect-api-version"?: string;
+ };
+ path?: never;
+ cookie?: never;
+ };
+ requestBody?: {
+ content: {
+ "application/json": components["schemas"]["Body_count_task_runs_task_runs_count_post"];
+ };
+ };
+ responses: {
+ /** @description Successful Response */
+ 200: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": number;
+ };
+ };
+ /** @description Validation Error */
+ 422: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["HTTPValidationError"];
+ };
+ };
+ };
+ };
+ task_run_history_task_runs_history_post: {
+ parameters: {
+ query?: never;
+ header?: {
+ "x-prefect-api-version"?: string;
+ };
+ path?: never;
+ cookie?: never;
+ };
+ requestBody: {
+ content: {
+ "application/json": components["schemas"]["Body_task_run_history_task_runs_history_post"];
+ };
+ };
+ responses: {
+ /** @description Successful Response */
+ 200: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["HistoryResponse"][];
+ };
+ };
+ /** @description Validation Error */
+ 422: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["HTTPValidationError"];
+ };
+ };
+ };
+ };
+ read_task_runs_task_runs_filter_post: {
+ parameters: {
+ query?: never;
+ header?: {
+ "x-prefect-api-version"?: string;
+ };
+ path?: never;
+ cookie?: never;
+ };
+ requestBody?: {
+ content: {
+ "application/json": components["schemas"]["Body_read_task_runs_task_runs_filter_post"];
+ };
+ };
+ responses: {
+ /** @description Successful Response */
+ 200: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["TaskRun"][];
+ };
+ };
+ /** @description Validation Error */
+ 422: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["HTTPValidationError"];
+ };
+ };
+ };
+ };
+ set_task_run_state_task_runs__id__set_state_post: {
+ parameters: {
+ query?: never;
+ header?: {
+ "x-prefect-api-version"?: string;
+ };
+ path: {
+ /** @description The task run id */
+ id: string;
+ };
+ cookie?: never;
+ };
+ requestBody: {
+ content: {
+ "application/json": components["schemas"]["Body_set_task_run_state_task_runs__id__set_state_post"];
+ };
+ };
+ responses: {
+ /** @description Successful Response */
+ 200: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["OrchestrationResult"];
+ };
+ };
+ /** @description Validation Error */
+ 422: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["HTTPValidationError"];
+ };
+ };
+ };
+ };
+ read_flow_run_state_flow_run_states__id__get: {
+ parameters: {
+ query?: never;
+ header?: {
+ "x-prefect-api-version"?: string;
+ };
+ path: {
+ /** @description The flow run state id */
+ id: string;
+ };
+ cookie?: never;
+ };
+ requestBody?: never;
+ responses: {
+ /** @description Successful Response */
+ 200: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["State"];
+ };
+ };
+ /** @description Validation Error */
+ 422: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["HTTPValidationError"];
+ };
+ };
+ };
+ };
+ read_flow_run_states_flow_run_states__get: {
+ parameters: {
+ query: {
+ flow_run_id: string;
+ };
+ header?: {
+ "x-prefect-api-version"?: string;
+ };
+ path?: never;
+ cookie?: never;
+ };
+ requestBody?: never;
+ responses: {
+ /** @description Successful Response */
+ 200: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["State"][];
+ };
+ };
+ /** @description Validation Error */
+ 422: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["HTTPValidationError"];
+ };
+ };
+ };
+ };
+ read_task_run_state_task_run_states__id__get: {
+ parameters: {
+ query?: never;
+ header?: {
+ "x-prefect-api-version"?: string;
+ };
+ path: {
+ /** @description The task run state id */
+ id: string;
+ };
+ cookie?: never;
+ };
+ requestBody?: never;
+ responses: {
+ /** @description Successful Response */
+ 200: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["State"];
+ };
+ };
+ /** @description Validation Error */
+ 422: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["HTTPValidationError"];
+ };
+ };
+ };
+ };
+ read_task_run_states_task_run_states__get: {
+ parameters: {
+ query: {
+ task_run_id: string;
+ };
+ header?: {
+ "x-prefect-api-version"?: string;
+ };
+ path?: never;
+ cookie?: never;
+ };
+ requestBody?: never;
+ responses: {
+ /** @description Successful Response */
+ 200: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["State"][];
+ };
+ };
+ /** @description Validation Error */
+ 422: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["HTTPValidationError"];
+ };
+ };
+ };
+ };
+ create_flow_run_notification_policy_flow_run_notification_policies__post: {
+ parameters: {
+ query?: never;
+ header?: {
+ "x-prefect-api-version"?: string;
+ };
+ path?: never;
+ cookie?: never;
+ };
+ requestBody: {
+ content: {
+ "application/json": components["schemas"]["FlowRunNotificationPolicyCreate"];
+ };
+ };
+ responses: {
+ /** @description Successful Response */
+ 201: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["FlowRunNotificationPolicy"];
+ };
+ };
+ /** @description Validation Error */
+ 422: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["HTTPValidationError"];
+ };
+ };
+ };
+ };
+ read_flow_run_notification_policy_flow_run_notification_policies__id__get: {
+ parameters: {
+ query?: never;
+ header?: {
+ "x-prefect-api-version"?: string;
+ };
+ path: {
+ /** @description The flow run notification policy id */
+ id: string;
+ };
+ cookie?: never;
+ };
+ requestBody?: never;
+ responses: {
+ /** @description Successful Response */
+ 200: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["FlowRunNotificationPolicy"];
+ };
+ };
+ /** @description Validation Error */
+ 422: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["HTTPValidationError"];
+ };
+ };
+ };
+ };
+ delete_flow_run_notification_policy_flow_run_notification_policies__id__delete: {
+ parameters: {
+ query?: never;
+ header?: {
+ "x-prefect-api-version"?: string;
+ };
+ path: {
+ /** @description The flow run notification policy id */
+ id: string;
+ };
+ cookie?: never;
+ };
+ requestBody?: never;
+ responses: {
+ /** @description Successful Response */
+ 204: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content?: never;
+ };
+ /** @description Validation Error */
+ 422: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["HTTPValidationError"];
+ };
+ };
+ };
+ };
+ update_flow_run_notification_policy_flow_run_notification_policies__id__patch: {
+ parameters: {
+ query?: never;
+ header?: {
+ "x-prefect-api-version"?: string;
+ };
+ path: {
+ /** @description The flow run notification policy id */
+ id: string;
+ };
+ cookie?: never;
+ };
+ requestBody: {
+ content: {
+ "application/json": components["schemas"]["FlowRunNotificationPolicyUpdate"];
+ };
+ };
+ responses: {
+ /** @description Successful Response */
+ 204: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content?: never;
+ };
+ /** @description Validation Error */
+ 422: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["HTTPValidationError"];
+ };
+ };
+ };
+ };
+ read_flow_run_notification_policies_flow_run_notification_policies_filter_post: {
+ parameters: {
+ query?: never;
+ header?: {
+ "x-prefect-api-version"?: string;
+ };
+ path?: never;
+ cookie?: never;
+ };
+ requestBody?: {
+ content: {
+ "application/json": components["schemas"]["Body_read_flow_run_notification_policies_flow_run_notification_policies_filter_post"];
+ };
+ };
+ responses: {
+ /** @description Successful Response */
+ 200: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["FlowRunNotificationPolicy"][];
+ };
+ };
+ /** @description Validation Error */
+ 422: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["HTTPValidationError"];
+ };
+ };
+ };
+ };
+ create_deployment_deployments__post: {
+ parameters: {
+ query?: never;
+ header?: {
+ "x-prefect-api-version"?: string;
+ };
+ path?: never;
+ cookie?: never;
+ };
+ requestBody: {
+ content: {
+ "application/json": components["schemas"]["DeploymentCreate"];
+ };
+ };
+ responses: {
+ /** @description Successful Response */
+ 200: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["DeploymentResponse"];
+ };
+ };
+ /** @description Validation Error */
+ 422: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["HTTPValidationError"];
+ };
+ };
+ };
+ };
+ read_deployment_deployments__id__get: {
+ parameters: {
+ query?: never;
+ header?: {
+ "x-prefect-api-version"?: string;
+ };
+ path: {
+ /** @description The deployment id */
+ id: string;
+ };
+ cookie?: never;
+ };
+ requestBody?: never;
+ responses: {
+ /** @description Successful Response */
+ 200: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["DeploymentResponse"];
+ };
+ };
+ /** @description Validation Error */
+ 422: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["HTTPValidationError"];
+ };
+ };
+ };
+ };
+ delete_deployment_deployments__id__delete: {
+ parameters: {
+ query?: never;
+ header?: {
+ "x-prefect-api-version"?: string;
+ };
+ path: {
+ /** @description The deployment id */
+ id: string;
+ };
+ cookie?: never;
+ };
+ requestBody?: never;
+ responses: {
+ /** @description Successful Response */
+ 204: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content?: never;
+ };
+ /** @description Validation Error */
+ 422: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["HTTPValidationError"];
+ };
+ };
+ };
+ };
+ update_deployment_deployments__id__patch: {
+ parameters: {
+ query?: never;
+ header?: {
+ "x-prefect-api-version"?: string;
+ };
+ path: {
+ /** @description The deployment id */
+ id: string;
+ };
+ cookie?: never;
+ };
+ requestBody: {
+ content: {
+ "application/json": components["schemas"]["DeploymentUpdate"];
+ };
+ };
+ responses: {
+ /** @description Successful Response */
+ 204: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content?: never;
+ };
+ /** @description Validation Error */
+ 422: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["HTTPValidationError"];
+ };
+ };
+ };
+ };
+ read_deployment_by_name_deployments_name__flow_name___deployment_name__get: {
+ parameters: {
+ query?: never;
+ header?: {
+ "x-prefect-api-version"?: string;
+ };
+ path: {
+ /** @description The name of the flow */
+ flow_name: string;
+ /** @description The name of the deployment */
+ deployment_name: string;
+ };
+ cookie?: never;
+ };
+ requestBody?: never;
+ responses: {
+ /** @description Successful Response */
+ 200: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["DeploymentResponse"];
+ };
+ };
+ /** @description Validation Error */
+ 422: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["HTTPValidationError"];
+ };
+ };
+ };
+ };
+ read_deployments_deployments_filter_post: {
+ parameters: {
+ query?: never;
+ header?: {
+ "x-prefect-api-version"?: string;
+ };
+ path?: never;
+ cookie?: never;
+ };
+ requestBody?: {
+ content: {
+ "application/json": components["schemas"]["Body_read_deployments_deployments_filter_post"];
+ };
+ };
+ responses: {
+ /** @description Successful Response */
+ 200: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["DeploymentResponse"][];
+ };
+ };
+ /** @description Validation Error */
+ 422: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["HTTPValidationError"];
+ };
+ };
+ };
+ };
+ paginate_deployments_deployments_paginate_post: {
+ parameters: {
+ query?: never;
+ header?: {
+ "x-prefect-api-version"?: string;
+ };
+ path?: never;
+ cookie?: never;
+ };
+ requestBody?: {
+ content: {
+ "application/json": components["schemas"]["Body_paginate_deployments_deployments_paginate_post"];
+ };
+ };
+ responses: {
+ /** @description Successful Response */
+ 200: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["DeploymentPaginationResponse"];
+ };
+ };
+ /** @description Validation Error */
+ 422: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["HTTPValidationError"];
+ };
+ };
+ };
+ };
+ get_scheduled_flow_runs_for_deployments_deployments_get_scheduled_flow_runs_post: {
+ parameters: {
+ query?: never;
+ header?: {
+ "x-prefect-api-version"?: string;
+ };
+ path?: never;
+ cookie?: never;
+ };
+ requestBody: {
+ content: {
+ "application/json": components["schemas"]["Body_get_scheduled_flow_runs_for_deployments_deployments_get_scheduled_flow_runs_post"];
+ };
+ };
+ responses: {
+ /** @description Successful Response */
+ 200: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["FlowRunResponse"][];
+ };
+ };
+ /** @description Validation Error */
+ 422: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["HTTPValidationError"];
+ };
+ };
+ };
+ };
+ count_deployments_deployments_count_post: {
+ parameters: {
+ query?: never;
+ header?: {
+ "x-prefect-api-version"?: string;
+ };
+ path?: never;
+ cookie?: never;
+ };
+ requestBody?: {
+ content: {
+ "application/json": components["schemas"]["Body_count_deployments_deployments_count_post"];
+ };
+ };
+ responses: {
+ /** @description Successful Response */
+ 200: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": number;
+ };
+ };
+ /** @description Validation Error */
+ 422: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["HTTPValidationError"];
+ };
+ };
+ };
+ };
+ schedule_deployment_deployments__id__schedule_post: {
+ parameters: {
+ query?: never;
+ header?: {
+ "x-prefect-api-version"?: string;
+ };
+ path: {
+ /** @description The deployment id */
+ id: string;
+ };
+ cookie?: never;
+ };
+ requestBody?: {
+ content: {
+ "application/json": components["schemas"]["Body_schedule_deployment_deployments__id__schedule_post"];
+ };
+ };
+ responses: {
+ /** @description Successful Response */
+ 200: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": unknown;
+ };
+ };
+ /** @description Validation Error */
+ 422: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["HTTPValidationError"];
+ };
+ };
+ };
+ };
+ resume_deployment_deployments__id__resume_deployment_post: {
+ parameters: {
+ query?: never;
+ header?: {
+ "x-prefect-api-version"?: string;
+ };
+ path: {
+ /** @description The deployment id */
+ id: string;
+ };
+ cookie?: never;
+ };
+ requestBody?: never;
+ responses: {
+ /** @description Successful Response */
+ 200: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": unknown;
+ };
+ };
+ /** @description Validation Error */
+ 422: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["HTTPValidationError"];
+ };
+ };
+ };
+ };
+ pause_deployment_deployments__id__pause_deployment_post: {
+ parameters: {
+ query?: never;
+ header?: {
+ "x-prefect-api-version"?: string;
+ };
+ path: {
+ /** @description The deployment id */
+ id: string;
+ };
+ cookie?: never;
+ };
+ requestBody?: never;
+ responses: {
+ /** @description Successful Response */
+ 200: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": unknown;
+ };
+ };
+ /** @description Validation Error */
+ 422: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["HTTPValidationError"];
+ };
+ };
+ };
+ };
+ create_flow_run_from_deployment_deployments__id__create_flow_run_post: {
+ parameters: {
+ query?: never;
+ header?: {
+ "x-prefect-api-version"?: string;
+ };
+ path: {
+ /** @description The deployment id */
+ id: string;
+ };
+ cookie?: never;
+ };
+ requestBody: {
+ content: {
+ "application/json": components["schemas"]["DeploymentFlowRunCreate"];
+ };
+ };
+ responses: {
+ /** @description Successful Response */
+ 200: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["FlowRunResponse"];
+ };
+ };
+ /** @description Validation Error */
+ 422: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["HTTPValidationError"];
+ };
+ };
+ };
+ };
+ work_queue_check_for_deployment_deployments__id__work_queue_check_get: {
+ parameters: {
+ query?: never;
+ header?: {
+ "x-prefect-api-version"?: string;
+ };
+ path: {
+ /** @description The deployment id */
+ id: string;
+ };
+ cookie?: never;
+ };
+ requestBody?: never;
+ responses: {
+ /** @description Successful Response */
+ 200: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["WorkQueue"][];
+ };
+ };
+ /** @description Validation Error */
+ 422: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["HTTPValidationError"];
+ };
+ };
+ };
+ };
+ read_deployment_schedules_deployments__id__schedules_get: {
+ parameters: {
+ query?: never;
+ header?: {
+ "x-prefect-api-version"?: string;
+ };
+ path: {
+ /** @description The deployment id */
+ id: string;
+ };
+ cookie?: never;
+ };
+ requestBody?: never;
+ responses: {
+ /** @description Successful Response */
+ 200: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["DeploymentSchedule"][];
+ };
+ };
+ /** @description Validation Error */
+ 422: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["HTTPValidationError"];
+ };
+ };
+ };
+ };
+ create_deployment_schedules_deployments__id__schedules_post: {
+ parameters: {
+ query?: never;
+ header?: {
+ "x-prefect-api-version"?: string;
+ };
+ path: {
+ /** @description The deployment id */
+ id: string;
+ };
+ cookie?: never;
+ };
+ requestBody: {
+ content: {
+ "application/json": components["schemas"]["DeploymentScheduleCreate"][];
+ };
+ };
+ responses: {
+ /** @description Successful Response */
+ 201: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["DeploymentSchedule"][];
+ };
+ };
+ /** @description Validation Error */
+ 422: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["HTTPValidationError"];
+ };
+ };
+ };
+ };
+ delete_deployment_schedule_deployments__id__schedules__schedule_id__delete: {
+ parameters: {
+ query?: never;
+ header?: {
+ "x-prefect-api-version"?: string;
+ };
+ path: {
+ /** @description The deployment id */
+ id: string;
+ /** @description The schedule id */
+ schedule_id: string;
+ };
+ cookie?: never;
+ };
+ requestBody?: never;
+ responses: {
+ /** @description Successful Response */
+ 204: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content?: never;
+ };
+ /** @description Validation Error */
+ 422: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["HTTPValidationError"];
+ };
+ };
+ };
+ };
+ update_deployment_schedule_deployments__id__schedules__schedule_id__patch: {
+ parameters: {
+ query?: never;
+ header?: {
+ "x-prefect-api-version"?: string;
+ };
+ path: {
+ /** @description The deployment id */
+ id: string;
+ /** @description The schedule id */
+ schedule_id: string;
+ };
+ cookie?: never;
+ };
+ requestBody: {
+ content: {
+ "application/json": components["schemas"]["DeploymentScheduleUpdate"];
+ };
+ };
+ responses: {
+ /** @description Successful Response */
+ 204: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content?: never;
+ };
+ /** @description Validation Error */
+ 422: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["HTTPValidationError"];
+ };
+ };
+ };
+ };
+ create_saved_search_saved_searches__put: {
+ parameters: {
+ query?: never;
+ header?: {
+ "x-prefect-api-version"?: string;
+ };
+ path?: never;
+ cookie?: never;
+ };
+ requestBody: {
+ content: {
+ "application/json": components["schemas"]["SavedSearchCreate"];
+ };
+ };
+ responses: {
+ /** @description Successful Response */
+ 200: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["SavedSearch"];
+ };
+ };
+ /** @description Validation Error */
+ 422: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["HTTPValidationError"];
+ };
+ };
+ };
+ };
+ read_saved_search_saved_searches__id__get: {
+ parameters: {
+ query?: never;
+ header?: {
+ "x-prefect-api-version"?: string;
+ };
+ path: {
+ /** @description The saved search id */
+ id: string;
+ };
+ cookie?: never;
+ };
+ requestBody?: never;
+ responses: {
+ /** @description Successful Response */
+ 200: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["SavedSearch"];
+ };
+ };
+ /** @description Validation Error */
+ 422: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["HTTPValidationError"];
+ };
+ };
+ };
+ };
+ delete_saved_search_saved_searches__id__delete: {
+ parameters: {
+ query?: never;
+ header?: {
+ "x-prefect-api-version"?: string;
+ };
+ path: {
+ /** @description The saved search id */
+ id: string;
+ };
+ cookie?: never;
+ };
+ requestBody?: never;
+ responses: {
+ /** @description Successful Response */
+ 204: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content?: never;
+ };
+ /** @description Validation Error */
+ 422: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["HTTPValidationError"];
+ };
+ };
+ };
+ };
+ read_saved_searches_saved_searches_filter_post: {
+ parameters: {
+ query?: never;
+ header?: {
+ "x-prefect-api-version"?: string;
+ };
+ path?: never;
+ cookie?: never;
+ };
+ requestBody?: {
+ content: {
+ "application/json": components["schemas"]["Body_read_saved_searches_saved_searches_filter_post"];
+ };
+ };
+ responses: {
+ /** @description Successful Response */
+ 200: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["SavedSearch"][];
+ };
+ };
+ /** @description Validation Error */
+ 422: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["HTTPValidationError"];
+ };
+ };
+ };
+ };
+ create_logs_logs__post: {
+ parameters: {
+ query?: never;
+ header?: {
+ "x-prefect-api-version"?: string;
+ };
+ path?: never;
+ cookie?: never;
+ };
+ requestBody: {
+ content: {
+ "application/json": components["schemas"]["LogCreate"][];
+ };
+ };
+ responses: {
+ /** @description Successful Response */
+ 201: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": unknown;
+ };
+ };
+ /** @description Validation Error */
+ 422: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["HTTPValidationError"];
+ };
+ };
+ };
+ };
+ read_logs_logs_filter_post: {
+ parameters: {
+ query?: never;
+ header?: {
+ "x-prefect-api-version"?: string;
+ };
+ path?: never;
+ cookie?: never;
+ };
+ requestBody?: {
+ content: {
+ "application/json": components["schemas"]["Body_read_logs_logs_filter_post"];
+ };
+ };
+ responses: {
+ /** @description Successful Response */
+ 200: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["Log"][];
+ };
+ };
+ /** @description Validation Error */
+ 422: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["HTTPValidationError"];
+ };
+ };
+ };
+ };
+ create_concurrency_limit_concurrency_limits__post: {
+ parameters: {
+ query?: never;
+ header?: {
+ "x-prefect-api-version"?: string;
+ };
+ path?: never;
+ cookie?: never;
+ };
+ requestBody: {
+ content: {
+ "application/json": components["schemas"]["ConcurrencyLimitCreate"];
+ };
+ };
+ responses: {
+ /** @description Successful Response */
+ 200: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["ConcurrencyLimit"];
+ };
+ };
+ /** @description Validation Error */
+ 422: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["HTTPValidationError"];
+ };
+ };
+ };
+ };
+ read_concurrency_limit_concurrency_limits__id__get: {
+ parameters: {
+ query?: never;
+ header?: {
+ "x-prefect-api-version"?: string;
+ };
+ path: {
+ /** @description The concurrency limit id */
+ id: string;
+ };
+ cookie?: never;
+ };
+ requestBody?: never;
+ responses: {
+ /** @description Successful Response */
+ 200: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["ConcurrencyLimit"];
+ };
+ };
+ /** @description Validation Error */
+ 422: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["HTTPValidationError"];
+ };
+ };
+ };
+ };
+ delete_concurrency_limit_concurrency_limits__id__delete: {
+ parameters: {
+ query?: never;
+ header?: {
+ "x-prefect-api-version"?: string;
+ };
+ path: {
+ /** @description The concurrency limit id */
+ id: string;
+ };
+ cookie?: never;
+ };
+ requestBody?: never;
+ responses: {
+ /** @description Successful Response */
+ 200: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": unknown;
+ };
+ };
+ /** @description Validation Error */
+ 422: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["HTTPValidationError"];
+ };
+ };
+ };
+ };
+ read_concurrency_limit_by_tag_concurrency_limits_tag__tag__get: {
+ parameters: {
+ query?: never;
+ header?: {
+ "x-prefect-api-version"?: string;
+ };
+ path: {
+ /** @description The tag name */
+ tag: string;
+ };
+ cookie?: never;
+ };
+ requestBody?: never;
+ responses: {
+ /** @description Successful Response */
+ 200: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["ConcurrencyLimit"];
+ };
+ };
+ /** @description Validation Error */
+ 422: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["HTTPValidationError"];
+ };
+ };
+ };
+ };
+ delete_concurrency_limit_by_tag_concurrency_limits_tag__tag__delete: {
+ parameters: {
+ query?: never;
+ header?: {
+ "x-prefect-api-version"?: string;
+ };
+ path: {
+ /** @description The tag name */
+ tag: string;
+ };
+ cookie?: never;
+ };
+ requestBody?: never;
+ responses: {
+ /** @description Successful Response */
+ 200: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": unknown;
+ };
+ };
+ /** @description Validation Error */
+ 422: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["HTTPValidationError"];
+ };
+ };
+ };
+ };
+ read_concurrency_limits_concurrency_limits_filter_post: {
+ parameters: {
+ query?: never;
+ header?: {
+ "x-prefect-api-version"?: string;
+ };
+ path?: never;
+ cookie?: never;
+ };
+ requestBody?: {
+ content: {
+ "application/json": components["schemas"]["Body_read_concurrency_limits_concurrency_limits_filter_post"];
+ };
+ };
+ responses: {
+ /** @description Successful Response */
+ 200: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["ConcurrencyLimit"][];
+ };
+ };
+ /** @description Validation Error */
+ 422: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["HTTPValidationError"];
+ };
+ };
+ };
+ };
+ reset_concurrency_limit_by_tag_concurrency_limits_tag__tag__reset_post: {
+ parameters: {
+ query?: never;
+ header?: {
+ "x-prefect-api-version"?: string;
+ };
+ path: {
+ /** @description The tag name */
+ tag: string;
+ };
+ cookie?: never;
+ };
+ requestBody?: {
+ content: {
+ "application/json": components["schemas"]["Body_reset_concurrency_limit_by_tag_concurrency_limits_tag__tag__reset_post"];
+ };
+ };
+ responses: {
+ /** @description Successful Response */
+ 200: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": unknown;
+ };
+ };
+ /** @description Validation Error */
+ 422: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["HTTPValidationError"];
+ };
+ };
+ };
+ };
+ increment_concurrency_limits_v1_concurrency_limits_increment_post: {
+ parameters: {
+ query?: never;
+ header?: {
+ "x-prefect-api-version"?: string;
+ };
+ path?: never;
+ cookie?: never;
+ };
+ requestBody: {
+ content: {
+ "application/json": components["schemas"]["Body_increment_concurrency_limits_v1_concurrency_limits_increment_post"];
+ };
+ };
+ responses: {
+ /** @description Successful Response */
+ 200: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["MinimalConcurrencyLimitResponse"][];
+ };
+ };
+ /** @description Validation Error */
+ 422: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["HTTPValidationError"];
+ };
+ };
+ };
+ };
+ decrement_concurrency_limits_v1_concurrency_limits_decrement_post: {
+ parameters: {
+ query?: never;
+ header?: {
+ "x-prefect-api-version"?: string;
+ };
+ path?: never;
+ cookie?: never;
+ };
+ requestBody: {
+ content: {
+ "application/json": components["schemas"]["Body_decrement_concurrency_limits_v1_concurrency_limits_decrement_post"];
+ };
+ };
+ responses: {
+ /** @description Successful Response */
+ 200: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": unknown;
+ };
+ };
+ /** @description Validation Error */
+ 422: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["HTTPValidationError"];
+ };
+ };
+ };
+ };
+ create_concurrency_limit_v2_v2_concurrency_limits__post: {
+ parameters: {
+ query?: never;
+ header?: {
+ "x-prefect-api-version"?: string;
+ };
+ path?: never;
+ cookie?: never;
+ };
+ requestBody: {
+ content: {
+ "application/json": components["schemas"]["ConcurrencyLimitV2Create"];
+ };
+ };
+ responses: {
+ /** @description Successful Response */
+ 201: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["ConcurrencyLimitV2"];
+ };
+ };
+ /** @description Validation Error */
+ 422: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["HTTPValidationError"];
+ };
+ };
+ };
+ };
+ read_concurrency_limit_v2_v2_concurrency_limits__id_or_name__get: {
+ parameters: {
+ query?: never;
+ header?: {
+ "x-prefect-api-version"?: string;
+ };
+ path: {
+ /** @description The ID or name of the concurrency limit */
+ id_or_name: string;
+ };
+ cookie?: never;
+ };
+ requestBody?: never;
+ responses: {
+ /** @description Successful Response */
+ 200: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["GlobalConcurrencyLimitResponse"];
+ };
+ };
+ /** @description Validation Error */
+ 422: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["HTTPValidationError"];
+ };
+ };
+ };
+ };
+ delete_concurrency_limit_v2_v2_concurrency_limits__id_or_name__delete: {
+ parameters: {
+ query?: never;
+ header?: {
+ "x-prefect-api-version"?: string;
+ };
+ path: {
+ /** @description The ID or name of the concurrency limit */
+ id_or_name: string;
+ };
+ cookie?: never;
+ };
+ requestBody?: never;
+ responses: {
+ /** @description Successful Response */
+ 204: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content?: never;
+ };
+ /** @description Validation Error */
+ 422: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["HTTPValidationError"];
+ };
+ };
+ };
+ };
+ update_concurrency_limit_v2_v2_concurrency_limits__id_or_name__patch: {
+ parameters: {
+ query?: never;
+ header?: {
+ "x-prefect-api-version"?: string;
+ };
+ path: {
+ /** @description The ID or name of the concurrency limit */
+ id_or_name: string;
+ };
+ cookie?: never;
+ };
+ requestBody: {
+ content: {
+ "application/json": components["schemas"]["ConcurrencyLimitV2Update"];
+ };
+ };
+ responses: {
+ /** @description Successful Response */
+ 204: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content?: never;
+ };
+ /** @description Validation Error */
+ 422: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["HTTPValidationError"];
+ };
+ };
+ };
+ };
+ read_all_concurrency_limits_v2_v2_concurrency_limits_filter_post: {
+ parameters: {
+ query?: never;
+ header?: {
+ "x-prefect-api-version"?: string;
+ };
+ path?: never;
+ cookie?: never;
+ };
+ requestBody?: {
+ content: {
+ "application/json": components["schemas"]["Body_read_all_concurrency_limits_v2_v2_concurrency_limits_filter_post"];
+ };
+ };
+ responses: {
+ /** @description Successful Response */
+ 200: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["GlobalConcurrencyLimitResponse"][];
+ };
+ };
+ /** @description Validation Error */
+ 422: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["HTTPValidationError"];
+ };
+ };
+ };
+ };
+ bulk_increment_active_slots_v2_concurrency_limits_increment_post: {
+ parameters: {
+ query?: never;
+ header?: {
+ "x-prefect-api-version"?: string;
+ };
+ path?: never;
+ cookie?: never;
+ };
+ requestBody: {
+ content: {
+ "application/json": components["schemas"]["Body_bulk_increment_active_slots_v2_concurrency_limits_increment_post"];
+ };
+ };
+ responses: {
+ /** @description Successful Response */
+ 200: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["MinimalConcurrencyLimitResponse"][];
+ };
+ };
+ /** @description Validation Error */
+ 422: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["HTTPValidationError"];
+ };
+ };
+ };
+ };
+ bulk_decrement_active_slots_v2_concurrency_limits_decrement_post: {
+ parameters: {
+ query?: never;
+ header?: {
+ "x-prefect-api-version"?: string;
+ };
+ path?: never;
+ cookie?: never;
+ };
+ requestBody: {
+ content: {
+ "application/json": components["schemas"]["Body_bulk_decrement_active_slots_v2_concurrency_limits_decrement_post"];
+ };
+ };
+ responses: {
+ /** @description Successful Response */
+ 200: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["MinimalConcurrencyLimitResponse"][];
+ };
+ };
+ /** @description Validation Error */
+ 422: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["HTTPValidationError"];
+ };
+ };
+ };
+ };
+ create_block_type_block_types__post: {
+ parameters: {
+ query?: never;
+ header?: {
+ "x-prefect-api-version"?: string;
+ };
+ path?: never;
+ cookie?: never;
+ };
+ requestBody: {
+ content: {
+ "application/json": components["schemas"]["BlockTypeCreate"];
+ };
+ };
+ responses: {
+ /** @description Successful Response */
+ 201: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["BlockType"];
+ };
+ };
+ /** @description Validation Error */
+ 422: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["HTTPValidationError"];
+ };
+ };
+ };
+ };
+ read_block_type_by_id_block_types__id__get: {
+ parameters: {
+ query?: never;
+ header?: {
+ "x-prefect-api-version"?: string;
+ };
+ path: {
+ /** @description The block type ID */
+ id: string;
+ };
+ cookie?: never;
+ };
+ requestBody?: never;
+ responses: {
+ /** @description Successful Response */
+ 200: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["BlockType"];
+ };
+ };
+ /** @description Validation Error */
+ 422: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["HTTPValidationError"];
+ };
+ };
+ };
+ };
+ delete_block_type_block_types__id__delete: {
+ parameters: {
+ query?: never;
+ header?: {
+ "x-prefect-api-version"?: string;
+ };
+ path: {
+ /** @description The block type ID */
+ id: string;
+ };
+ cookie?: never;
+ };
+ requestBody?: never;
+ responses: {
+ /** @description Successful Response */
+ 204: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content?: never;
+ };
+ /** @description Validation Error */
+ 422: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["HTTPValidationError"];
+ };
+ };
+ };
+ };
+ update_block_type_block_types__id__patch: {
+ parameters: {
+ query?: never;
+ header?: {
+ "x-prefect-api-version"?: string;
+ };
+ path: {
+ /** @description The block type ID */
+ id: string;
+ };
+ cookie?: never;
+ };
+ requestBody: {
+ content: {
+ "application/json": components["schemas"]["BlockTypeUpdate"];
+ };
+ };
+ responses: {
+ /** @description Successful Response */
+ 204: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content?: never;
+ };
+ /** @description Validation Error */
+ 422: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["HTTPValidationError"];
+ };
+ };
+ };
+ };
+ read_block_type_by_slug_block_types_slug__slug__get: {
+ parameters: {
+ query?: never;
+ header?: {
+ "x-prefect-api-version"?: string;
+ };
+ path: {
+ /** @description The block type name */
+ slug: string;
+ };
+ cookie?: never;
+ };
+ requestBody?: never;
+ responses: {
+ /** @description Successful Response */
+ 200: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["BlockType"];
+ };
+ };
+ /** @description Validation Error */
+ 422: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["HTTPValidationError"];
+ };
+ };
+ };
+ };
+ read_block_types_block_types_filter_post: {
+ parameters: {
+ query?: never;
+ header?: {
+ "x-prefect-api-version"?: string;
+ };
+ path?: never;
+ cookie?: never;
+ };
+ requestBody?: {
+ content: {
+ "application/json": components["schemas"]["Body_read_block_types_block_types_filter_post"];
+ };
+ };
+ responses: {
+ /** @description Successful Response */
+ 200: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["BlockType"][];
+ };
+ };
+ /** @description Validation Error */
+ 422: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["HTTPValidationError"];
+ };
+ };
+ };
+ };
+ read_block_documents_for_block_type_block_types_slug__slug__block_documents_get: {
+ parameters: {
+ query?: {
+ /** @description Whether to include sensitive values in the block document. */
+ include_secrets?: boolean;
+ };
+ header?: {
+ "x-prefect-api-version"?: string;
+ };
+ path: {
+ /** @description The block type name */
+ slug: string;
+ };
+ cookie?: never;
+ };
+ requestBody?: never;
+ responses: {
+ /** @description Successful Response */
+ 200: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["BlockDocument"][];
+ };
+ };
+ /** @description Validation Error */
+ 422: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["HTTPValidationError"];
+ };
+ };
+ };
+ };
+ read_block_document_by_name_for_block_type_block_types_slug__slug__block_documents_name__block_document_name__get: {
+ parameters: {
+ query?: {
+ /** @description Whether to include sensitive values in the block document. */
+ include_secrets?: boolean;
+ };
+ header?: {
+ "x-prefect-api-version"?: string;
+ };
+ path: {
+ /** @description The block type name */
+ slug: string;
+ /** @description The block type name */
+ block_document_name: string;
+ };
+ cookie?: never;
+ };
+ requestBody?: never;
+ responses: {
+ /** @description Successful Response */
+ 200: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["BlockDocument"];
+ };
+ };
+ /** @description Validation Error */
+ 422: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["HTTPValidationError"];
+ };
+ };
+ };
+ };
+ install_system_block_types_block_types_install_system_block_types_post: {
+ parameters: {
+ query?: never;
+ header?: {
+ "x-prefect-api-version"?: string;
+ };
+ path?: never;
+ cookie?: never;
+ };
+ requestBody?: never;
+ responses: {
+ /** @description Successful Response */
+ 200: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": unknown;
+ };
+ };
+ /** @description Validation Error */
+ 422: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["HTTPValidationError"];
+ };
+ };
+ };
+ };
+ create_block_document_block_documents__post: {
+ parameters: {
+ query?: never;
+ header?: {
+ "x-prefect-api-version"?: string;
+ };
+ path?: never;
+ cookie?: never;
+ };
+ requestBody: {
+ content: {
+ "application/json": components["schemas"]["BlockDocumentCreate"];
+ };
+ };
+ responses: {
+ /** @description Successful Response */
+ 201: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["BlockDocument"];
+ };
+ };
+ /** @description Validation Error */
+ 422: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["HTTPValidationError"];
+ };
+ };
+ };
+ };
+ read_block_documents_block_documents_filter_post: {
+ parameters: {
+ query?: never;
+ header?: {
+ "x-prefect-api-version"?: string;
+ };
+ path?: never;
+ cookie?: never;
+ };
+ requestBody?: {
+ content: {
+ "application/json": components["schemas"]["Body_read_block_documents_block_documents_filter_post"];
+ };
+ };
+ responses: {
+ /** @description Successful Response */
+ 200: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["BlockDocument"][];
+ };
+ };
+ /** @description Validation Error */
+ 422: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["HTTPValidationError"];
+ };
+ };
+ };
+ };
+ count_block_documents_block_documents_count_post: {
+ parameters: {
+ query?: never;
+ header?: {
+ "x-prefect-api-version"?: string;
+ };
+ path?: never;
+ cookie?: never;
+ };
+ requestBody?: {
+ content: {
+ "application/json": components["schemas"]["Body_count_block_documents_block_documents_count_post"];
+ };
+ };
+ responses: {
+ /** @description Successful Response */
+ 200: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": number;
+ };
+ };
+ /** @description Validation Error */
+ 422: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["HTTPValidationError"];
+ };
+ };
+ };
+ };
+ read_block_document_by_id_block_documents__id__get: {
+ parameters: {
+ query?: {
+ /** @description Whether to include sensitive values in the block document. */
+ include_secrets?: boolean;
+ };
+ header?: {
+ "x-prefect-api-version"?: string;
+ };
+ path: {
+ /** @description The block document id */
+ id: string;
+ };
+ cookie?: never;
+ };
+ requestBody?: never;
+ responses: {
+ /** @description Successful Response */
+ 200: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["BlockDocument"];
+ };
+ };
+ /** @description Validation Error */
+ 422: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["HTTPValidationError"];
+ };
+ };
+ };
+ };
+ delete_block_document_block_documents__id__delete: {
+ parameters: {
+ query?: never;
+ header?: {
+ "x-prefect-api-version"?: string;
+ };
+ path: {
+ /** @description The block document id */
+ id: string;
+ };
+ cookie?: never;
+ };
+ requestBody?: never;
+ responses: {
+ /** @description Successful Response */
+ 204: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content?: never;
+ };
+ /** @description Validation Error */
+ 422: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["HTTPValidationError"];
+ };
+ };
+ };
+ };
+ update_block_document_data_block_documents__id__patch: {
+ parameters: {
+ query?: never;
+ header?: {
+ "x-prefect-api-version"?: string;
+ };
+ path: {
+ /** @description The block document id */
+ id: string;
+ };
+ cookie?: never;
+ };
+ requestBody: {
+ content: {
+ "application/json": components["schemas"]["BlockDocumentUpdate"];
+ };
+ };
+ responses: {
+ /** @description Successful Response */
+ 204: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content?: never;
+ };
+ /** @description Validation Error */
+ 422: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["HTTPValidationError"];
+ };
+ };
+ };
+ };
+ create_work_pool_work_pools__post: {
+ parameters: {
+ query?: never;
+ header?: {
+ "x-prefect-api-version"?: string;
+ };
+ path?: never;
+ cookie?: never;
+ };
+ requestBody: {
+ content: {
+ "application/json": components["schemas"]["WorkPoolCreate"];
+ };
+ };
+ responses: {
+ /** @description Successful Response */
+ 201: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["WorkPool"];
+ };
+ };
+ /** @description Validation Error */
+ 422: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["HTTPValidationError"];
+ };
+ };
+ };
+ };
+ read_work_pool_work_pools__name__get: {
+ parameters: {
+ query?: never;
+ header?: {
+ "x-prefect-api-version"?: string;
+ };
+ path: {
+ /** @description The work pool name */
+ name: string;
+ };
+ cookie?: never;
+ };
+ requestBody?: never;
+ responses: {
+ /** @description Successful Response */
+ 200: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["WorkPool"];
+ };
+ };
+ /** @description Validation Error */
+ 422: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["HTTPValidationError"];
+ };
+ };
+ };
+ };
+ delete_work_pool_work_pools__name__delete: {
+ parameters: {
+ query?: never;
+ header?: {
+ "x-prefect-api-version"?: string;
+ };
+ path: {
+ /** @description The work pool name */
+ name: string;
+ };
+ cookie?: never;
+ };
+ requestBody?: never;
+ responses: {
+ /** @description Successful Response */
+ 204: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content?: never;
+ };
+ /** @description Validation Error */
+ 422: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["HTTPValidationError"];
+ };
+ };
+ };
+ };
+ update_work_pool_work_pools__name__patch: {
+ parameters: {
+ query?: never;
+ header?: {
+ "x-prefect-api-version"?: string;
+ };
+ path: {
+ /** @description The work pool name */
+ name: string;
+ };
+ cookie?: never;
+ };
+ requestBody: {
+ content: {
+ "application/json": components["schemas"]["WorkPoolUpdate"];
+ };
+ };
+ responses: {
+ /** @description Successful Response */
+ 204: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content?: never;
+ };
+ /** @description Validation Error */
+ 422: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["HTTPValidationError"];
+ };
+ };
+ };
+ };
+ read_work_pools_work_pools_filter_post: {
+ parameters: {
+ query?: never;
+ header?: {
+ "x-prefect-api-version"?: string;
+ };
+ path?: never;
+ cookie?: never;
+ };
+ requestBody?: {
+ content: {
+ "application/json": components["schemas"]["Body_read_work_pools_work_pools_filter_post"];
+ };
+ };
+ responses: {
+ /** @description Successful Response */
+ 200: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["WorkPool"][];
+ };
+ };
+ /** @description Validation Error */
+ 422: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["HTTPValidationError"];
+ };
+ };
+ };
+ };
+ count_work_pools_work_pools_count_post: {
+ parameters: {
+ query?: never;
+ header?: {
+ "x-prefect-api-version"?: string;
+ };
+ path?: never;
+ cookie?: never;
+ };
+ requestBody?: {
+ content: {
+ "application/json": components["schemas"]["Body_count_work_pools_work_pools_count_post"];
+ };
+ };
+ responses: {
+ /** @description Successful Response */
+ 200: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": number;
+ };
+ };
+ /** @description Validation Error */
+ 422: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["HTTPValidationError"];
+ };
+ };
+ };
+ };
+ get_scheduled_flow_runs_work_pools__name__get_scheduled_flow_runs_post: {
+ parameters: {
+ query?: never;
+ header?: {
+ "x-prefect-api-version"?: string;
+ };
+ path: {
+ /** @description The work pool name */
+ name: string;
+ };
+ cookie?: never;
+ };
+ requestBody?: {
+ content: {
+ "application/json": components["schemas"]["Body_get_scheduled_flow_runs_work_pools__name__get_scheduled_flow_runs_post"];
+ };
+ };
+ responses: {
+ /** @description Successful Response */
+ 200: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["WorkerFlowRunResponse"][];
+ };
+ };
+ /** @description Validation Error */
+ 422: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["HTTPValidationError"];
+ };
+ };
+ };
+ };
+ create_work_queue_work_pools__work_pool_name__queues_post: {
+ parameters: {
+ query?: never;
+ header?: {
+ "x-prefect-api-version"?: string;
+ };
+ path: {
+ /** @description The work pool name */
+ work_pool_name: string;
+ };
+ cookie?: never;
+ };
+ requestBody: {
+ content: {
+ "application/json": components["schemas"]["WorkQueueCreate"];
+ };
+ };
+ responses: {
+ /** @description Successful Response */
+ 201: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["WorkQueueResponse"];
+ };
+ };
+ /** @description Validation Error */
+ 422: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["HTTPValidationError"];
+ };
+ };
+ };
+ };
+ read_work_queue_work_pools__work_pool_name__queues__name__get: {
+ parameters: {
+ query?: never;
+ header?: {
+ "x-prefect-api-version"?: string;
+ };
+ path: {
+ /** @description The work pool name */
+ work_pool_name: string;
+ /** @description The work pool queue name */
+ name: string;
+ };
+ cookie?: never;
+ };
+ requestBody?: never;
+ responses: {
+ /** @description Successful Response */
+ 200: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["WorkQueueResponse"];
+ };
+ };
+ /** @description Validation Error */
+ 422: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["HTTPValidationError"];
+ };
+ };
+ };
+ };
+ delete_work_queue_work_pools__work_pool_name__queues__name__delete: {
+ parameters: {
+ query?: never;
+ header?: {
+ "x-prefect-api-version"?: string;
+ };
+ path: {
+ /** @description The work pool name */
+ work_pool_name: string;
+ /** @description The work pool queue name */
+ name: string;
+ };
+ cookie?: never;
+ };
+ requestBody?: never;
+ responses: {
+ /** @description Successful Response */
+ 204: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content?: never;
+ };
+ /** @description Validation Error */
+ 422: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["HTTPValidationError"];
+ };
+ };
+ };
+ };
+ update_work_queue_work_pools__work_pool_name__queues__name__patch: {
+ parameters: {
+ query?: never;
+ header?: {
+ "x-prefect-api-version"?: string;
+ };
+ path: {
+ /** @description The work pool name */
+ work_pool_name: string;
+ /** @description The work pool queue name */
+ name: string;
+ };
+ cookie?: never;
+ };
+ requestBody: {
+ content: {
+ "application/json": components["schemas"]["WorkQueueUpdate"];
+ };
+ };
+ responses: {
+ /** @description Successful Response */
+ 204: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content?: never;
+ };
+ /** @description Validation Error */
+ 422: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["HTTPValidationError"];
+ };
+ };
+ };
+ };
+ read_work_queues_work_pools__work_pool_name__queues_filter_post: {
+ parameters: {
+ query?: never;
+ header?: {
+ "x-prefect-api-version"?: string;
+ };
+ path: {
+ /** @description The work pool name */
+ work_pool_name: string;
+ };
+ cookie?: never;
+ };
+ requestBody?: {
+ content: {
+ "application/json": components["schemas"]["Body_read_work_queues_work_pools__work_pool_name__queues_filter_post"];
+ };
+ };
+ responses: {
+ /** @description Successful Response */
+ 200: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["WorkQueueResponse"][];
+ };
+ };
+ /** @description Validation Error */
+ 422: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["HTTPValidationError"];
+ };
+ };
+ };
+ };
+ worker_heartbeat_work_pools__work_pool_name__workers_heartbeat_post: {
+ parameters: {
+ query?: never;
+ header?: {
+ "x-prefect-api-version"?: string;
+ };
+ path: {
+ /** @description The work pool name */
+ work_pool_name: string;
+ };
+ cookie?: never;
+ };
+ requestBody: {
+ content: {
+ "application/json": components["schemas"]["Body_worker_heartbeat_work_pools__work_pool_name__workers_heartbeat_post"];
+ };
+ };
+ responses: {
+ /** @description Successful Response */
+ 204: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content?: never;
+ };
+ /** @description Validation Error */
+ 422: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["HTTPValidationError"];
+ };
+ };
+ };
+ };
+ read_workers_work_pools__work_pool_name__workers_filter_post: {
+ parameters: {
+ query?: never;
+ header?: {
+ "x-prefect-api-version"?: string;
+ };
+ path: {
+ /** @description The work pool name */
+ work_pool_name: string;
+ };
+ cookie?: never;
+ };
+ requestBody?: {
+ content: {
+ "application/json": components["schemas"]["Body_read_workers_work_pools__work_pool_name__workers_filter_post"];
+ };
+ };
+ responses: {
+ /** @description Successful Response */
+ 200: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["WorkerResponse"][];
+ };
+ };
+ /** @description Validation Error */
+ 422: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["HTTPValidationError"];
+ };
+ };
+ };
+ };
+ delete_worker_work_pools__work_pool_name__workers__name__delete: {
+ parameters: {
+ query?: never;
+ header?: {
+ "x-prefect-api-version"?: string;
+ };
+ path: {
+ /** @description The work pool name */
+ work_pool_name: string;
+ /** @description The work pool's worker name */
+ name: string;
+ };
+ cookie?: never;
+ };
+ requestBody?: never;
+ responses: {
+ /** @description Successful Response */
+ 204: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content?: never;
+ };
+ /** @description Validation Error */
+ 422: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["HTTPValidationError"];
+ };
+ };
+ };
+ };
+ read_task_workers_task_workers_filter_post: {
+ parameters: {
+ query?: never;
+ header?: {
+ "x-prefect-api-version"?: string;
+ };
+ path?: never;
+ cookie?: never;
+ };
+ requestBody?: {
+ content: {
+ "application/json": components["schemas"]["Body_read_task_workers_task_workers_filter_post"];
+ };
+ };
+ responses: {
+ /** @description Successful Response */
+ 200: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["TaskWorkerResponse"][];
+ };
+ };
+ /** @description Validation Error */
+ 422: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["HTTPValidationError"];
+ };
+ };
+ };
+ };
+ create_work_queue_work_queues__post: {
+ parameters: {
+ query?: never;
+ header?: {
+ "x-prefect-api-version"?: string;
+ };
+ path?: never;
+ cookie?: never;
+ };
+ requestBody: {
+ content: {
+ "application/json": components["schemas"]["WorkQueueCreate"];
+ };
+ };
+ responses: {
+ /** @description Successful Response */
+ 201: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["WorkQueueResponse"];
+ };
+ };
+ /** @description Validation Error */
+ 422: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["HTTPValidationError"];
+ };
+ };
+ };
+ };
+ read_work_queue_work_queues__id__get: {
+ parameters: {
+ query?: never;
+ header?: {
+ "x-prefect-api-version"?: string;
+ };
+ path: {
+ /** @description The work queue id */
+ id: string;
+ };
+ cookie?: never;
+ };
+ requestBody?: never;
+ responses: {
+ /** @description Successful Response */
+ 200: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["WorkQueueResponse"];
+ };
+ };
+ /** @description Validation Error */
+ 422: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["HTTPValidationError"];
+ };
+ };
+ };
+ };
+ delete_work_queue_work_queues__id__delete: {
+ parameters: {
+ query?: never;
+ header?: {
+ "x-prefect-api-version"?: string;
+ };
+ path: {
+ /** @description The work queue id */
+ id: string;
+ };
+ cookie?: never;
+ };
+ requestBody?: never;
+ responses: {
+ /** @description Successful Response */
+ 204: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content?: never;
+ };
+ /** @description Validation Error */
+ 422: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["HTTPValidationError"];
+ };
+ };
+ };
+ };
+ update_work_queue_work_queues__id__patch: {
+ parameters: {
+ query?: never;
+ header?: {
+ "x-prefect-api-version"?: string;
+ };
+ path: {
+ /** @description The work queue id */
+ id: string;
+ };
+ cookie?: never;
+ };
+ requestBody: {
+ content: {
+ "application/json": components["schemas"]["WorkQueueUpdate"];
+ };
+ };
+ responses: {
+ /** @description Successful Response */
+ 204: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content?: never;
+ };
+ /** @description Validation Error */
+ 422: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["HTTPValidationError"];
+ };
+ };
+ };
+ };
+ read_work_queue_by_name_work_queues_name__name__get: {
+ parameters: {
+ query?: never;
+ header?: {
+ "x-prefect-api-version"?: string;
+ };
+ path: {
+ /** @description The work queue name */
+ name: string;
+ };
+ cookie?: never;
+ };
+ requestBody?: never;
+ responses: {
+ /** @description Successful Response */
+ 200: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["WorkQueueResponse"];
+ };
+ };
+ /** @description Validation Error */
+ 422: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["HTTPValidationError"];
+ };
+ };
+ };
+ };
+ read_work_queue_runs_work_queues__id__get_runs_post: {
+ parameters: {
+ query?: never;
+ header?: {
+ /** @description A header to indicate this request came from the Prefect UI. */
+ "x-prefect-ui"?: boolean | null;
+ "x-prefect-api-version"?: string;
+ };
+ path: {
+ /** @description The work queue id */
+ id: string;
+ };
+ cookie?: never;
+ };
+ requestBody?: {
+ content: {
+ "application/json": components["schemas"]["Body_read_work_queue_runs_work_queues__id__get_runs_post"];
+ };
+ };
+ responses: {
+ /** @description Successful Response */
+ 200: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["FlowRunResponse"][];
+ };
+ };
+ /** @description Validation Error */
+ 422: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["HTTPValidationError"];
+ };
+ };
+ };
+ };
+ read_work_queues_work_queues_filter_post: {
+ parameters: {
+ query?: never;
+ header?: {
+ "x-prefect-api-version"?: string;
+ };
+ path?: never;
+ cookie?: never;
+ };
+ requestBody?: {
+ content: {
+ "application/json": components["schemas"]["Body_read_work_queues_work_queues_filter_post"];
+ };
+ };
+ responses: {
+ /** @description Successful Response */
+ 200: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["WorkQueueResponse"][];
+ };
+ };
+ /** @description Validation Error */
+ 422: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["HTTPValidationError"];
+ };
+ };
+ };
+ };
+ read_work_queue_status_work_queues__id__status_get: {
+ parameters: {
+ query?: never;
+ header?: {
+ "x-prefect-api-version"?: string;
+ };
+ path: {
+ /** @description The work queue id */
+ id: string;
+ };
+ cookie?: never;
+ };
+ requestBody?: never;
+ responses: {
+ /** @description Successful Response */
+ 200: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["WorkQueueStatusDetail"];
+ };
+ };
+ /** @description Validation Error */
+ 422: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["HTTPValidationError"];
+ };
+ };
+ };
+ };
+ create_artifact_artifacts__post: {
+ parameters: {
+ query?: never;
+ header?: {
+ "x-prefect-api-version"?: string;
+ };
+ path?: never;
+ cookie?: never;
+ };
+ requestBody: {
+ content: {
+ "application/json": components["schemas"]["ArtifactCreate"];
+ };
+ };
+ responses: {
+ /** @description Successful Response */
+ 200: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["Artifact"];
+ };
+ };
+ /** @description Validation Error */
+ 422: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["HTTPValidationError"];
+ };
+ };
+ };
+ };
+ read_artifact_artifacts__id__get: {
+ parameters: {
+ query?: never;
+ header?: {
+ "x-prefect-api-version"?: string;
+ };
+ path: {
+ /** @description The ID of the artifact to retrieve. */
+ id: string;
+ };
+ cookie?: never;
+ };
+ requestBody?: never;
+ responses: {
+ /** @description Successful Response */
+ 200: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["Artifact"];
+ };
+ };
+ /** @description Validation Error */
+ 422: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["HTTPValidationError"];
+ };
+ };
+ };
+ };
+ delete_artifact_artifacts__id__delete: {
+ parameters: {
+ query?: never;
+ header?: {
+ "x-prefect-api-version"?: string;
+ };
+ path: {
+ /** @description The ID of the artifact to delete. */
+ id: string;
+ };
+ cookie?: never;
+ };
+ requestBody?: never;
+ responses: {
+ /** @description Successful Response */
+ 204: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content?: never;
+ };
+ /** @description Validation Error */
+ 422: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["HTTPValidationError"];
+ };
+ };
+ };
+ };
+ update_artifact_artifacts__id__patch: {
+ parameters: {
+ query?: never;
+ header?: {
+ "x-prefect-api-version"?: string;
+ };
+ path: {
+ /** @description The ID of the artifact to update. */
+ id: string;
+ };
+ cookie?: never;
+ };
+ requestBody: {
+ content: {
+ "application/json": components["schemas"]["ArtifactUpdate"];
+ };
+ };
+ responses: {
+ /** @description Successful Response */
+ 204: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content?: never;
+ };
+ /** @description Validation Error */
+ 422: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["HTTPValidationError"];
+ };
+ };
+ };
+ };
+ read_latest_artifact_artifacts__key__latest_get: {
+ parameters: {
+ query?: never;
+ header?: {
+ "x-prefect-api-version"?: string;
+ };
+ path: {
+ /** @description The key of the artifact to retrieve. */
+ key: string;
+ };
+ cookie?: never;
+ };
+ requestBody?: never;
+ responses: {
+ /** @description Successful Response */
+ 200: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["Artifact"];
+ };
+ };
+ /** @description Validation Error */
+ 422: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["HTTPValidationError"];
+ };
+ };
+ };
+ };
+ read_artifacts_artifacts_filter_post: {
+ parameters: {
+ query?: never;
+ header?: {
+ "x-prefect-api-version"?: string;
+ };
+ path?: never;
+ cookie?: never;
+ };
+ requestBody?: {
+ content: {
+ "application/json": components["schemas"]["Body_read_artifacts_artifacts_filter_post"];
+ };
+ };
+ responses: {
+ /** @description Successful Response */
+ 200: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["Artifact"][];
+ };
+ };
+ /** @description Validation Error */
+ 422: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["HTTPValidationError"];
+ };
+ };
+ };
+ };
+ read_latest_artifacts_artifacts_latest_filter_post: {
+ parameters: {
+ query?: never;
+ header?: {
+ "x-prefect-api-version"?: string;
+ };
+ path?: never;
+ cookie?: never;
+ };
+ requestBody?: {
+ content: {
+ "application/json": components["schemas"]["Body_read_latest_artifacts_artifacts_latest_filter_post"];
+ };
+ };
+ responses: {
+ /** @description Successful Response */
+ 200: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["ArtifactCollection"][];
+ };
+ };
+ /** @description Validation Error */
+ 422: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["HTTPValidationError"];
+ };
+ };
+ };
+ };
+ count_artifacts_artifacts_count_post: {
+ parameters: {
+ query?: never;
+ header?: {
+ "x-prefect-api-version"?: string;
+ };
+ path?: never;
+ cookie?: never;
+ };
+ requestBody?: {
+ content: {
+ "application/json": components["schemas"]["Body_count_artifacts_artifacts_count_post"];
+ };
+ };
+ responses: {
+ /** @description Successful Response */
+ 200: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": number;
+ };
+ };
+ /** @description Validation Error */
+ 422: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["HTTPValidationError"];
+ };
+ };
+ };
+ };
+ count_latest_artifacts_artifacts_latest_count_post: {
+ parameters: {
+ query?: never;
+ header?: {
+ "x-prefect-api-version"?: string;
+ };
+ path?: never;
+ cookie?: never;
+ };
+ requestBody?: {
+ content: {
+ "application/json": components["schemas"]["Body_count_latest_artifacts_artifacts_latest_count_post"];
+ };
+ };
+ responses: {
+ /** @description Successful Response */
+ 200: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": number;
+ };
+ };
+ /** @description Validation Error */
+ 422: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["HTTPValidationError"];
+ };
+ };
+ };
+ };
+ create_block_schema_block_schemas__post: {
+ parameters: {
+ query?: never;
+ header?: {
+ "x-prefect-api-version"?: string;
+ };
+ path?: never;
+ cookie?: never;
+ };
+ requestBody: {
+ content: {
+ "application/json": components["schemas"]["BlockSchemaCreate"];
+ };
+ };
+ responses: {
+ /** @description Successful Response */
+ 201: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["BlockSchema"];
+ };
+ };
+ /** @description Validation Error */
+ 422: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["HTTPValidationError"];
+ };
+ };
+ };
+ };
+ read_block_schema_by_id_block_schemas__id__get: {
+ parameters: {
+ query?: never;
+ header?: {
+ "x-prefect-api-version"?: string;
+ };
+ path: {
+ /** @description The block schema id */
+ id: string;
+ };
+ cookie?: never;
+ };
+ requestBody?: never;
+ responses: {
+ /** @description Successful Response */
+ 200: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["BlockSchema"];
+ };
+ };
+ /** @description Validation Error */
+ 422: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["HTTPValidationError"];
+ };
+ };
+ };
+ };
+ delete_block_schema_block_schemas__id__delete: {
+ parameters: {
+ query?: never;
+ header?: {
+ "x-prefect-api-version"?: string;
+ };
+ path: {
+ /** @description The block schema id */
+ id: string;
+ };
+ cookie?: never;
+ };
+ requestBody?: never;
+ responses: {
+ /** @description Successful Response */
+ 204: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content?: never;
+ };
+ /** @description Validation Error */
+ 422: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["HTTPValidationError"];
+ };
+ };
+ };
+ };
+ read_block_schemas_block_schemas_filter_post: {
+ parameters: {
+ query?: never;
+ header?: {
+ "x-prefect-api-version"?: string;
+ };
+ path?: never;
+ cookie?: never;
+ };
+ requestBody?: {
+ content: {
+ "application/json": components["schemas"]["Body_read_block_schemas_block_schemas_filter_post"];
+ };
+ };
+ responses: {
+ /** @description Successful Response */
+ 200: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["BlockSchema"][];
+ };
+ };
+ /** @description Validation Error */
+ 422: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["HTTPValidationError"];
+ };
+ };
+ };
+ };
+ read_block_schema_by_checksum_block_schemas_checksum__checksum__get: {
+ parameters: {
+ query?: {
+ /** @description Version of block schema. If not provided the most recently created block schema with the matching checksum will be returned. */
+ version?: string | null;
+ };
+ header?: {
+ "x-prefect-api-version"?: string;
+ };
+ path: {
+ /** @description The block schema checksum */
+ checksum: string;
+ };
+ cookie?: never;
+ };
+ requestBody?: never;
+ responses: {
+ /** @description Successful Response */
+ 200: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["BlockSchema"];
+ };
+ };
+ /** @description Validation Error */
+ 422: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["HTTPValidationError"];
+ };
+ };
+ };
+ };
+ read_available_block_capabilities_block_capabilities__get: {
+ parameters: {
+ query?: never;
+ header?: {
+ "x-prefect-api-version"?: string;
+ };
+ path?: never;
+ cookie?: never;
+ };
+ requestBody?: never;
+ responses: {
+ /** @description Successful Response */
+ 200: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": string[];
+ };
+ };
+ /** @description Validation Error */
+ 422: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["HTTPValidationError"];
+ };
+ };
+ };
+ };
+ read_view_content_collections_views__view__get: {
+ parameters: {
+ query?: never;
+ header?: {
+ "x-prefect-api-version"?: string;
+ };
+ path: {
+ view: string;
+ };
+ cookie?: never;
+ };
+ requestBody?: never;
+ responses: {
+ /** @description Successful Response */
+ 200: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": Record;
+ };
+ };
+ /** @description Validation Error */
+ 422: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["HTTPValidationError"];
+ };
+ };
+ };
+ };
+ create_variable_variables__post: {
+ parameters: {
+ query?: never;
+ header?: {
+ "x-prefect-api-version"?: string;
+ };
+ path?: never;
+ cookie?: never;
+ };
+ requestBody: {
+ content: {
+ "application/json": components["schemas"]["VariableCreate"];
+ };
+ };
+ responses: {
+ /** @description Successful Response */
+ 201: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["Variable"];
+ };
+ };
+ /** @description Validation Error */
+ 422: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["HTTPValidationError"];
+ };
+ };
+ };
+ };
+ read_variable_variables__id__get: {
+ parameters: {
+ query?: never;
+ header?: {
+ "x-prefect-api-version"?: string;
+ };
+ path: {
+ id: string;
+ };
+ cookie?: never;
+ };
+ requestBody?: never;
+ responses: {
+ /** @description Successful Response */
+ 200: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["Variable"];
+ };
+ };
+ /** @description Validation Error */
+ 422: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["HTTPValidationError"];
+ };
+ };
+ };
+ };
+ delete_variable_variables__id__delete: {
+ parameters: {
+ query?: never;
+ header?: {
+ "x-prefect-api-version"?: string;
+ };
+ path: {
+ id: string;
+ };
+ cookie?: never;
+ };
+ requestBody?: never;
+ responses: {
+ /** @description Successful Response */
+ 204: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content?: never;
+ };
+ /** @description Validation Error */
+ 422: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["HTTPValidationError"];
+ };
+ };
+ };
+ };
+ update_variable_variables__id__patch: {
+ parameters: {
+ query?: never;
+ header?: {
+ "x-prefect-api-version"?: string;
+ };
+ path: {
+ id: string;
+ };
+ cookie?: never;
+ };
+ requestBody: {
+ content: {
+ "application/json": components["schemas"]["VariableUpdate"];
+ };
+ };
+ responses: {
+ /** @description Successful Response */
+ 204: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content?: never;
+ };
+ /** @description Validation Error */
+ 422: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["HTTPValidationError"];
+ };
+ };
+ };
+ };
+ read_variable_by_name_variables_name__name__get: {
+ parameters: {
+ query?: never;
+ header?: {
+ "x-prefect-api-version"?: string;
+ };
+ path: {
+ name: string;
+ };
+ cookie?: never;
+ };
+ requestBody?: never;
+ responses: {
+ /** @description Successful Response */
+ 200: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["Variable"];
+ };
+ };
+ /** @description Validation Error */
+ 422: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["HTTPValidationError"];
+ };
+ };
+ };
+ };
+ delete_variable_by_name_variables_name__name__delete: {
+ parameters: {
+ query?: never;
+ header?: {
+ "x-prefect-api-version"?: string;
+ };
+ path: {
+ name: string;
+ };
+ cookie?: never;
+ };
+ requestBody?: never;
+ responses: {
+ /** @description Successful Response */
+ 204: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content?: never;
+ };
+ /** @description Validation Error */
+ 422: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["HTTPValidationError"];
+ };
+ };
+ };
+ };
+ update_variable_by_name_variables_name__name__patch: {
+ parameters: {
+ query?: never;
+ header?: {
+ "x-prefect-api-version"?: string;
+ };
+ path: {
+ name: string;
+ };
+ cookie?: never;
+ };
+ requestBody: {
+ content: {
+ "application/json": components["schemas"]["VariableUpdate"];
+ };
+ };
+ responses: {
+ /** @description Successful Response */
+ 204: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content?: never;
+ };
+ /** @description Validation Error */
+ 422: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["HTTPValidationError"];
+ };
+ };
+ };
+ };
+ read_variables_variables_filter_post: {
+ parameters: {
+ query?: never;
+ header?: {
+ "x-prefect-api-version"?: string;
+ };
+ path?: never;
+ cookie?: never;
+ };
+ requestBody?: {
+ content: {
+ "application/json": components["schemas"]["Body_read_variables_variables_filter_post"];
+ };
+ };
+ responses: {
+ /** @description Successful Response */
+ 200: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["Variable"][];
+ };
+ };
+ /** @description Validation Error */
+ 422: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["HTTPValidationError"];
+ };
+ };
+ };
+ };
+ count_variables_variables_count_post: {
+ parameters: {
+ query?: never;
+ header?: {
+ "x-prefect-api-version"?: string;
+ };
+ path?: never;
+ cookie?: never;
+ };
+ requestBody?: {
+ content: {
+ "application/json": components["schemas"]["Body_count_variables_variables_count_post"];
+ };
+ };
+ responses: {
+ /** @description Successful Response */
+ 200: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": number;
+ };
+ };
+ /** @description Validation Error */
+ 422: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["HTTPValidationError"];
+ };
+ };
+ };
+ };
+ create_csrf_token_csrf_token_get: {
+ parameters: {
+ query: {
+ /** @description The client to create a CSRF token for */
+ client: string;
+ };
+ header?: {
+ "x-prefect-api-version"?: string;
+ };
+ path?: never;
+ cookie?: never;
+ };
+ requestBody?: never;
+ responses: {
+ /** @description Successful Response */
+ 200: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["CsrfToken"];
+ };
+ };
+ /** @description Validation Error */
+ 422: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["HTTPValidationError"];
+ };
+ };
+ };
+ };
+ create_events_events_post: {
+ parameters: {
+ query?: never;
+ header?: {
+ "x-prefect-api-version"?: string;
+ };
+ path?: never;
+ cookie?: never;
+ };
+ requestBody: {
+ content: {
+ "application/json": components["schemas"]["Event"][];
+ };
+ };
+ responses: {
+ /** @description Successful Response */
+ 204: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content?: never;
+ };
+ /** @description Validation Error */
+ 422: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["HTTPValidationError"];
+ };
+ };
+ };
+ };
+ read_events_events_filter_post: {
+ parameters: {
+ query?: never;
+ header?: {
+ "x-prefect-api-version"?: string;
+ };
+ path?: never;
+ cookie?: never;
+ };
+ requestBody?: {
+ content: {
+ "application/json": components["schemas"]["Body_read_events_events_filter_post"];
+ };
+ };
+ responses: {
+ /** @description Successful Response */
+ 200: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["EventPage"];
+ };
+ };
+ /** @description Validation Error */
+ 422: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["HTTPValidationError"];
+ };
+ };
+ };
+ };
+ read_account_events_page_events_filter_next_get: {
+ parameters: {
+ query: {
+ "page-token": string;
+ };
+ header?: {
+ "x-prefect-api-version"?: string;
+ };
+ path?: never;
+ cookie?: never;
+ };
+ requestBody?: never;
+ responses: {
+ /** @description Successful Response */
+ 200: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["EventPage"];
+ };
+ };
+ /** @description Validation Error */
+ 422: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["HTTPValidationError"];
+ };
+ };
+ };
+ };
+ count_account_events_events_count_by__countable__post: {
+ parameters: {
+ query?: never;
+ header?: {
+ "x-prefect-api-version"?: string;
+ };
+ path: {
+ countable: components["schemas"]["Countable"];
+ };
+ cookie?: never;
+ };
+ requestBody: {
+ content: {
+ "application/json": components["schemas"]["Body_count_account_events_events_count_by__countable__post"];
+ };
+ };
+ responses: {
+ /** @description Successful Response */
+ 200: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["EventCount"][];
+ };
+ };
+ /** @description Validation Error */
+ 422: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["HTTPValidationError"];
+ };
+ };
+ };
+ };
+ create_automation_automations__post: {
+ parameters: {
+ query?: never;
+ header?: {
+ "x-prefect-api-version"?: string;
+ };
+ path?: never;
+ cookie?: never;
+ };
+ requestBody: {
+ content: {
+ "application/json": components["schemas"]["AutomationCreate"];
+ };
+ };
+ responses: {
+ /** @description Successful Response */
+ 201: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["Automation"];
+ };
+ };
+ /** @description Validation Error */
+ 422: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["HTTPValidationError"];
+ };
+ };
+ };
+ };
+ read_automation_automations__id__get: {
+ parameters: {
+ query?: never;
+ header?: {
+ "x-prefect-api-version"?: string;
+ };
+ path: {
+ id: string;
+ };
+ cookie?: never;
+ };
+ requestBody?: never;
+ responses: {
+ /** @description Successful Response */
+ 200: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["Automation"];
+ };
+ };
+ /** @description Validation Error */
+ 422: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["HTTPValidationError"];
+ };
+ };
+ };
+ };
+ update_automation_automations__id__put: {
+ parameters: {
+ query?: never;
+ header?: {
+ "x-prefect-api-version"?: string;
+ };
+ path: {
+ id: string;
+ };
+ cookie?: never;
+ };
+ requestBody: {
+ content: {
+ "application/json": components["schemas"]["AutomationUpdate"];
+ };
+ };
+ responses: {
+ /** @description Successful Response */
+ 204: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content?: never;
+ };
+ /** @description Validation Error */
+ 422: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["HTTPValidationError"];
+ };
+ };
+ };
+ };
+ delete_automation_automations__id__delete: {
+ parameters: {
+ query?: never;
+ header?: {
+ "x-prefect-api-version"?: string;
+ };
+ path: {
+ id: string;
+ };
+ cookie?: never;
+ };
+ requestBody?: never;
+ responses: {
+ /** @description Successful Response */
+ 204: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content?: never;
+ };
+ /** @description Validation Error */
+ 422: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["HTTPValidationError"];
+ };
+ };
+ };
+ };
+ patch_automation_automations__id__patch: {
+ parameters: {
+ query?: never;
+ header?: {
+ "x-prefect-api-version"?: string;
+ };
+ path: {
+ id: string;
+ };
+ cookie?: never;
+ };
+ requestBody: {
+ content: {
+ "application/json": components["schemas"]["AutomationPartialUpdate"];
+ };
+ };
+ responses: {
+ /** @description Successful Response */
+ 204: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content?: never;
+ };
+ /** @description Validation Error */
+ 422: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["HTTPValidationError"];
+ };
+ };
+ };
+ };
+ read_automations_automations_filter_post: {
+ parameters: {
+ query?: never;
+ header?: {
+ "x-prefect-api-version"?: string;
+ };
+ path?: never;
+ cookie?: never;
+ };
+ requestBody?: {
+ content: {
+ "application/json": components["schemas"]["Body_read_automations_automations_filter_post"];
+ };
+ };
+ responses: {
+ /** @description Successful Response */
+ 200: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["Automation"][];
+ };
+ };
+ /** @description Validation Error */
+ 422: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["HTTPValidationError"];
+ };
+ };
+ };
+ };
+ count_automations_automations_count_post: {
+ parameters: {
+ query?: never;
+ header?: {
+ "x-prefect-api-version"?: string;
+ };
+ path?: never;
+ cookie?: never;
+ };
+ requestBody?: never;
+ responses: {
+ /** @description Successful Response */
+ 200: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": number;
+ };
+ };
+ /** @description Validation Error */
+ 422: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["HTTPValidationError"];
+ };
+ };
+ };
+ };
+ read_automations_related_to_resource_automations_related_to__resource_id__get: {
+ parameters: {
+ query?: never;
+ header?: {
+ "x-prefect-api-version"?: string;
+ };
+ path: {
+ resource_id: string;
+ };
+ cookie?: never;
+ };
+ requestBody?: never;
+ responses: {
+ /** @description Successful Response */
+ 200: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["Automation"][];
+ };
+ };
+ /** @description Validation Error */
+ 422: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["HTTPValidationError"];
+ };
+ };
+ };
+ };
+ delete_automations_owned_by_resource_automations_owned_by__resource_id__delete: {
+ parameters: {
+ query?: never;
+ header?: {
+ "x-prefect-api-version"?: string;
+ };
+ path: {
+ resource_id: string;
+ };
+ cookie?: never;
+ };
+ requestBody?: never;
+ responses: {
+ /** @description Successful Response */
+ 202: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": unknown;
+ };
+ };
+ /** @description Validation Error */
+ 422: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["HTTPValidationError"];
+ };
+ };
+ };
+ };
+ validate_template_templates_validate_post: {
+ parameters: {
+ query?: never;
+ header?: {
+ "x-prefect-api-version"?: string;
+ };
+ path?: never;
+ cookie?: never;
+ };
+ requestBody?: {
+ content: {
+ "application/json": string;
+ };
+ };
+ responses: {
+ /** @description Successful Response */
+ 200: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content?: never;
+ };
+ /** @description Validation Error */
+ 422: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["HTTPValidationError"];
+ };
+ };
+ };
+ };
+ count_deployments_by_flow_ui_flows_count_deployments_post: {
+ parameters: {
+ query?: never;
+ header?: {
+ "x-prefect-api-version"?: string;
+ };
+ path?: never;
+ cookie?: never;
+ };
+ requestBody: {
+ content: {
+ "application/json": components["schemas"]["Body_count_deployments_by_flow_ui_flows_count_deployments_post"];
+ };
+ };
+ responses: {
+ /** @description Successful Response */
+ 200: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": {
+ [key: string]: number;
+ };
+ };
+ };
+ /** @description Validation Error */
+ 422: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["HTTPValidationError"];
+ };
+ };
+ };
+ };
+ next_runs_by_flow_ui_flows_next_runs_post: {
+ parameters: {
+ query?: never;
+ header?: {
+ "x-prefect-api-version"?: string;
+ };
+ path?: never;
+ cookie?: never;
+ };
+ requestBody: {
+ content: {
+ "application/json": components["schemas"]["Body_next_runs_by_flow_ui_flows_next_runs_post"];
+ };
+ };
+ responses: {
+ /** @description Successful Response */
+ 200: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": {
+ [key: string]: components["schemas"]["SimpleNextFlowRun"] | null;
+ };
+ };
+ };
+ /** @description Validation Error */
+ 422: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["HTTPValidationError"];
+ };
+ };
+ };
+ };
+ read_flow_run_history_ui_flow_runs_history_post: {
+ parameters: {
+ query?: never;
+ header?: {
+ "x-prefect-api-version"?: string;
+ };
+ path?: never;
+ cookie?: never;
+ };
+ requestBody?: {
+ content: {
+ "application/json": components["schemas"]["Body_read_flow_run_history_ui_flow_runs_history_post"];
+ };
+ };
+ responses: {
+ /** @description Successful Response */
+ 200: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["SimpleFlowRun"][];
+ };
+ };
+ /** @description Validation Error */
+ 422: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["HTTPValidationError"];
+ };
+ };
+ };
+ };
+ count_task_runs_by_flow_run_ui_flow_runs_count_task_runs_post: {
+ parameters: {
+ query?: never;
+ header?: {
+ "x-prefect-api-version"?: string;
+ };
+ path?: never;
+ cookie?: never;
+ };
+ requestBody: {
+ content: {
+ "application/json": components["schemas"]["Body_count_task_runs_by_flow_run_ui_flow_runs_count_task_runs_post"];
+ };
+ };
+ responses: {
+ /** @description Successful Response */
+ 200: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": {
+ [key: string]: number;
+ };
+ };
+ };
+ /** @description Validation Error */
+ 422: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["HTTPValidationError"];
+ };
+ };
+ };
+ };
+ validate_obj_ui_schemas_validate_post: {
+ parameters: {
+ query?: never;
+ header?: {
+ "x-prefect-api-version"?: string;
+ };
+ path?: never;
+ cookie?: never;
+ };
+ requestBody: {
+ content: {
+ "application/json": components["schemas"]["Body_validate_obj_ui_schemas_validate_post"];
+ };
+ };
+ responses: {
+ /** @description Successful Response */
+ 200: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": unknown;
+ };
+ };
+ /** @description Validation Error */
+ 422: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["HTTPValidationError"];
+ };
+ };
+ };
+ };
+ read_dashboard_task_run_counts_ui_task_runs_dashboard_counts_post: {
+ parameters: {
+ query?: never;
+ header?: {
+ "x-prefect-api-version"?: string;
+ };
+ path?: never;
+ cookie?: never;
+ };
+ requestBody: {
+ content: {
+ "application/json": components["schemas"]["Body_read_dashboard_task_run_counts_ui_task_runs_dashboard_counts_post"];
+ };
+ };
+ responses: {
+ /** @description Successful Response */
+ 200: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["TaskRunCount"][];
+ };
+ };
+ /** @description Validation Error */
+ 422: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["HTTPValidationError"];
+ };
+ };
+ };
+ };
+ read_task_run_counts_by_state_ui_task_runs_count_post: {
+ parameters: {
+ query?: never;
+ header?: {
+ "x-prefect-api-version"?: string;
+ };
+ path?: never;
+ cookie?: never;
+ };
+ requestBody?: {
+ content: {
+ "application/json": components["schemas"]["Body_read_task_run_counts_by_state_ui_task_runs_count_post"];
+ };
+ };
+ responses: {
+ /** @description Successful Response */
+ 200: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["CountByState"];
+ };
+ };
+ /** @description Validation Error */
+ 422: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["HTTPValidationError"];
+ };
+ };
+ };
+ };
+ read_settings_admin_settings_get: {
+ parameters: {
+ query?: never;
+ header?: {
+ "x-prefect-api-version"?: string;
+ };
+ path?: never;
+ cookie?: never;
+ };
+ requestBody?: never;
+ responses: {
+ /** @description Successful Response */
+ 200: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["Settings"];
+ };
+ };
+ /** @description Validation Error */
+ 422: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["HTTPValidationError"];
+ };
+ };
+ };
+ };
+ read_version_admin_version_get: {
+ parameters: {
+ query?: never;
+ header?: {
+ "x-prefect-api-version"?: string;
+ };
+ path?: never;
+ cookie?: never;
+ };
+ requestBody?: never;
+ responses: {
+ /** @description Successful Response */
+ 200: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": string;
+ };
+ };
+ /** @description Validation Error */
+ 422: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["HTTPValidationError"];
+ };
+ };
+ };
+ };
+ clear_database_admin_database_clear_post: {
+ parameters: {
+ query?: never;
+ header?: {
+ "x-prefect-api-version"?: string;
+ };
+ path?: never;
+ cookie?: never;
+ };
+ requestBody?: {
+ content: {
+ "application/json": components["schemas"]["Body_clear_database_admin_database_clear_post"];
+ };
+ };
+ responses: {
+ /** @description Successful Response */
+ 204: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content?: never;
+ };
+ /** @description Validation Error */
+ 422: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["HTTPValidationError"];
+ };
+ };
+ };
+ };
+ drop_database_admin_database_drop_post: {
+ parameters: {
+ query?: never;
+ header?: {
+ "x-prefect-api-version"?: string;
+ };
+ path?: never;
+ cookie?: never;
+ };
+ requestBody?: {
+ content: {
+ "application/json": components["schemas"]["Body_drop_database_admin_database_drop_post"];
+ };
+ };
+ responses: {
+ /** @description Successful Response */
+ 204: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content?: never;
+ };
+ /** @description Validation Error */
+ 422: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["HTTPValidationError"];
+ };
+ };
+ };
+ };
+ create_database_admin_database_create_post: {
+ parameters: {
+ query?: never;
+ header?: {
+ "x-prefect-api-version"?: string;
+ };
+ path?: never;
+ cookie?: never;
+ };
+ requestBody?: {
+ content: {
+ "application/json": components["schemas"]["Body_create_database_admin_database_create_post"];
+ };
+ };
+ responses: {
+ /** @description Successful Response */
+ 204: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content?: never;
+ };
+ /** @description Validation Error */
+ 422: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["HTTPValidationError"];
+ };
+ };
+ };
+ };
+ hello_hello_get: {
+ parameters: {
+ query?: never;
+ header?: {
+ "x-prefect-api-version"?: string;
+ };
+ path?: never;
+ cookie?: never;
+ };
+ requestBody?: never;
+ responses: {
+ /** @description Successful Response */
+ 200: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": unknown;
+ };
+ };
+ /** @description Validation Error */
+ 422: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["HTTPValidationError"];
+ };
+ };
+ };
+ };
+ perform_readiness_check_ready_get: {
+ parameters: {
+ query?: never;
+ header?: {
+ "x-prefect-api-version"?: string;
+ };
+ path?: never;
+ cookie?: never;
+ };
+ requestBody?: never;
+ responses: {
+ /** @description Successful Response */
+ 200: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": unknown;
+ };
+ };
+ /** @description Validation Error */
+ 422: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["HTTPValidationError"];
+ };
+ };
+ };
+ };
+}
diff --git a/ui-v2/src/api/service.ts b/ui-v2/src/api/service.ts
new file mode 100644
index 000000000000..77fd199e0bac
--- /dev/null
+++ b/ui-v2/src/api/service.ts
@@ -0,0 +1,25 @@
+import createClient, { type Middleware } from "openapi-fetch";
+import type { paths } from "./prefect.ts"; // generated by openapi-typescript
+
+const throwOnError: Middleware = {
+ async onResponse({ response }) {
+ if (!response.ok) {
+ const body = (await response.clone().json()) as Record;
+ throw new Error(body.detail as string | undefined);
+ }
+ },
+};
+
+let client: ReturnType> | null = null;
+
+// TODO: Make the baseUrl configurable
+export const getQueryService = () => {
+ if (!client) {
+ client = createClient({
+ baseUrl:
+ (import.meta.env.VITE_API_URL as string) ?? "http://localhost:4200/api",
+ });
+ client.use(throwOnError);
+ }
+ return client;
+};
diff --git a/ui-v2/src/app.tsx b/ui-v2/src/app.tsx
new file mode 100644
index 000000000000..26375d1ee1f9
--- /dev/null
+++ b/ui-v2/src/app.tsx
@@ -0,0 +1,11 @@
+import { RouterProvider } from "@tanstack/react-router";
+import { queryClient, router } from "./router";
+import { QueryClientProvider } from "@tanstack/react-query";
+
+export const App = () => {
+ return (
+
+
+
+ );
+};
diff --git a/ui-v2/src/components/flows/cells.tsx b/ui-v2/src/components/flows/cells.tsx
new file mode 100644
index 000000000000..451a13172d6b
--- /dev/null
+++ b/ui-v2/src/components/flows/cells.tsx
@@ -0,0 +1,134 @@
+import { components } from "@/api/prefect";
+import { Button } from "@/components/ui/button";
+import {
+ DropdownMenu,
+ DropdownMenuContent,
+ DropdownMenuItem,
+ DropdownMenuLabel,
+ DropdownMenuSeparator,
+ DropdownMenuTrigger,
+} from "@/components/ui/dropdown-menu";
+import { cn } from "@/lib/utils";
+import { useQuery } from "@tanstack/react-query";
+import { Link } from "@tanstack/react-router";
+import { format, parseISO } from "date-fns";
+import { MoreVerticalIcon } from "lucide-react";
+import {
+ deploymentsCountQueryParams,
+ getLatestFlowRunsQueryParams,
+ getNextFlowRunsQueryParams,
+} from "./queries";
+
+type Flow = components["schemas"]["Flow"];
+
+export const FlowName = ({ row }: { row: { original: Flow } }) => {
+ if (!row.original.id) return null;
+
+ return (
+
+
+ {row.original.name}
+
+
+ Created{" "}
+ {row.original?.created &&
+ format(parseISO(row.original.created), "yyyy-MM-dd")}
+
+
+ );
+};
+
+export const FlowLastRun = ({ row }: { row: { original: Flow } }) => {
+ const { data } = useQuery(
+ getLatestFlowRunsQueryParams(row.original.id || "", 16, {
+ enabled: !!row.original.id,
+ }),
+ );
+
+ if (!row.original.id) return null;
+ return JSON.stringify(data?.[0]?.name);
+};
+
+export const FlowNextRun = ({ row }: { row: { original: Flow } }) => {
+ const { data } = useQuery(
+ getNextFlowRunsQueryParams(row.original.id || "", 16, {
+ enabled: !!row.original.id,
+ }),
+ );
+
+ if (!row.original.id) return null;
+ return JSON.stringify(data?.[0]?.name);
+};
+
+export const FlowDeploymentCount = ({ row }: { row: { original: Flow } }) => {
+ const { data } = useQuery(
+ deploymentsCountQueryParams(row.original.id || "", {
+ enabled: !!row.original.id,
+ }),
+ );
+ if (!row.original.id) return null;
+
+ return data;
+};
+
+export const FlowActionMenu = ({ row }: { row: { original: Flow } }) => {
+ const id = row.original.id;
+ if (!id) {
+ return null;
+ }
+ return (
+
+
+
+ Open menu
+
+
+
+
+ Actions
+ void navigator.clipboard.writeText(id)}
+ >
+ Copy ID
+
+
+ Delete
+ Automate
+
+
+ );
+};
+
+export const FlowActivity = ({ row }: { row: { original: Flow } }) => {
+ const { data } = useQuery(
+ getLatestFlowRunsQueryParams(row.original.id || "", 16, {
+ enabled: !!row.original.id,
+ }),
+ );
+ if (!row.original.id) return null;
+
+ return (
+
+ {Array(16)
+ .fill(1)
+ ?.map((_, index) => (
+
+ ))}
+
+ );
+};
diff --git a/ui-v2/src/components/flows/columns.tsx b/ui-v2/src/components/flows/columns.tsx
new file mode 100644
index 000000000000..7ac5ed14fb57
--- /dev/null
+++ b/ui-v2/src/components/flows/columns.tsx
@@ -0,0 +1,64 @@
+import { components } from "@/api/prefect";
+import { Checkbox } from "@/components/ui/checkbox";
+import { ColumnDef } from "@tanstack/react-table";
+import {
+ FlowActionMenu,
+ FlowActivity,
+ FlowDeploymentCount,
+ FlowLastRun,
+ FlowName,
+ FlowNextRun,
+} from "./cells";
+
+type Flow = components["schemas"]["Flow"];
+
+export const columns: ColumnDef[] = [
+ {
+ id: "select",
+ header: ({ table }) => (
+ table.toggleAllPageRowsSelected(!!value)}
+ aria-label="Select all"
+ />
+ ),
+ cell: ({ row }) => (
+ row.toggleSelected(!!value)}
+ aria-label="Select row"
+ />
+ ),
+ enableSorting: false,
+ enableHiding: false,
+ },
+ {
+ accessorKey: "name",
+ header: "Name",
+ cell: FlowName,
+ },
+ {
+ accessorKey: "lastRuns",
+ header: "Last Run",
+ cell: FlowLastRun,
+ },
+ {
+ accessorKey: "nextRuns",
+ header: "Next Run",
+ cell: FlowNextRun,
+ },
+ {
+ accessorKey: "deployments",
+ header: "Deployments",
+ cell: FlowDeploymentCount,
+ },
+ {
+ accessorKey: "activity",
+ header: "Activity",
+ cell: FlowActivity,
+ },
+ {
+ id: "actions",
+ cell: FlowActionMenu,
+ },
+];
diff --git a/ui-v2/src/components/flows/data-table.tsx b/ui-v2/src/components/flows/data-table.tsx
new file mode 100644
index 000000000000..0613b7796d29
--- /dev/null
+++ b/ui-v2/src/components/flows/data-table.tsx
@@ -0,0 +1,181 @@
+import type { components } from "@/api/prefect";
+import { Button } from "@/components/ui/button";
+import { DataTable } from "@/components/ui/data-table";
+import {
+ DropdownMenu,
+ DropdownMenuContent,
+ DropdownMenuItem,
+ DropdownMenuTrigger,
+} from "@/components/ui/dropdown-menu";
+import { Input } from "@/components/ui/input";
+import { useNavigate } from "@tanstack/react-router";
+import {
+ getCoreRowModel,
+ getPaginationRowModel,
+ useReactTable,
+} from "@tanstack/react-table";
+import { ChevronDownIcon, SearchIcon } from "lucide-react";
+import { useState } from "react";
+import { columns } from "./columns";
+
+const SearchComponent = () => {
+ const navigate = useNavigate();
+
+ return (
+
+
+ void navigate({
+ to: ".",
+ search: (prev) => ({ ...prev, name: e.target.value }),
+ })
+ }
+ />
+
+
+ );
+};
+const FilterComponent = () => {
+ const [selectedTags, setSelectedTags] = useState([]);
+ const [open, setOpen] = useState(false);
+
+ const toggleTag = (tag: string) => {
+ setSelectedTags((prev) =>
+ prev.includes(tag) ? prev.filter((t) => t !== tag) : [...prev, tag],
+ );
+ };
+
+ const renderSelectedTags = () => {
+ if (selectedTags.length === 0) return "All tags";
+ if (selectedTags.length === 1) return selectedTags[0];
+ return `${selectedTags[0]}, ${selectedTags[1]}${selectedTags.length > 2 ? "..." : ""}`;
+ };
+
+ return (
+
+
+
+ {renderSelectedTags()}
+
+
+
+
+ {
+ e.preventDefault();
+ toggleTag("Tag 1");
+ }}
+ >
+
+ Tag 1
+
+ {
+ e.preventDefault();
+ toggleTag("Tag 2");
+ }}
+ >
+
+ Tag 2
+
+ {
+ e.preventDefault();
+ toggleTag("Tag 3");
+ }}
+ >
+
+ Tag 3
+
+
+
+ );
+};
+
+const SortComponent = () => {
+ const navigate = useNavigate();
+
+ return (
+
+
+
+ Sort
+
+
+
+
+ void navigate({
+ to: ".",
+ search: (prev) => ({ ...prev, sort: "NAME_ASC" }),
+ })
+ }
+ >
+ A to Z
+
+
+ void navigate({
+ to: ".",
+ search: (prev) => ({ ...prev, sort: "NAME_DESC" }),
+ })
+ }
+ >
+ Z to A
+
+
+
+ );
+};
+
+export default function FlowsTable({
+ flows,
+}: {
+ flows: components["schemas"]["Flow"][];
+}) {
+ const table = useReactTable({
+ columns: columns,
+ data: flows,
+ getCoreRowModel: getCoreRowModel(),
+ getPaginationRowModel: getPaginationRowModel(),
+ initialState: {
+ pagination: {
+ pageIndex: 0,
+ pageSize: 10,
+ },
+ },
+ });
+
+ return (
+
+ );
+}
diff --git a/ui-v2/src/components/flows/detail/cells.tsx b/ui-v2/src/components/flows/detail/cells.tsx
new file mode 100644
index 000000000000..6b7e5218e771
--- /dev/null
+++ b/ui-v2/src/components/flows/detail/cells.tsx
@@ -0,0 +1,32 @@
+import type { components } from "@/api/prefect";
+import { getQueryService } from "@/api/service";
+import { useQuery } from "@tanstack/react-query";
+
+type FlowRun = components["schemas"]["FlowRun"];
+
+export const DeploymentCell = ({ row }: { row: { original: FlowRun } }) => {
+ const deploymentId = row.original.deployment_id;
+ const { data: deployment } = useQuery({
+ queryKey: ["deployment", deploymentId],
+ queryFn: () =>
+ getQueryService().GET("/deployments/{id}", {
+ params: { path: { id: deploymentId as string } },
+ }),
+ enabled: !!deploymentId,
+ });
+ return deployment?.data?.name;
+};
+
+export const WorkPoolCell = ({ row }: { row: { original: FlowRun } }) => {
+ const deploymentId = row.original.deployment_id;
+ const { data: deployment } = useQuery({
+ queryKey: ["deployment", deploymentId],
+ queryFn: () =>
+ getQueryService().GET("/deployments/{id}", {
+ params: { path: { id: deploymentId as string } },
+ }),
+ enabled: !!deploymentId,
+ });
+
+ return deployment?.data?.work_pool_name;
+};
diff --git a/ui-v2/src/components/flows/detail/deployment-columns.tsx b/ui-v2/src/components/flows/detail/deployment-columns.tsx
new file mode 100644
index 000000000000..5966c1cba7f5
--- /dev/null
+++ b/ui-v2/src/components/flows/detail/deployment-columns.tsx
@@ -0,0 +1,124 @@
+import { components } from "@/api/prefect";
+import { Button } from "@/components/ui/button";
+import {
+ DropdownMenu,
+ DropdownMenuContent,
+ DropdownMenuItem,
+ DropdownMenuTrigger,
+} from "@/components/ui/dropdown-menu";
+import { ColumnDef } from "@tanstack/react-table";
+import { MoreHorizontal } from "lucide-react";
+
+type Deployment = components["schemas"]["DeploymentResponse"];
+
+export const columns: ColumnDef[] = [
+ {
+ accessorKey: "name",
+ header: "Name",
+ cell: ({ row }) => row.original.name,
+ },
+ {
+ accessorKey: "status",
+ header: "Status",
+ cell: ({ row }) => {
+ return row.original.status;
+ },
+ },
+ {
+ accessorKey: "tags",
+ header: "Tags",
+ cell: ({ row }) => (
+
+ {row.original.tags?.map((tag, index) => (
+
+ {tag}
+
+ ))}
+
+ ),
+ },
+ {
+ accessorKey: "schedules",
+ header: "Schedules",
+ cell: ({ row }) => (
+
+ {row.original.schedules?.map((schedule, index) => {
+ if (
+ schedule.schedule &&
+ typeof schedule.schedule === "object" &&
+ "cron" in schedule.schedule
+ ) {
+ const cronExpression = schedule.schedule.cron;
+ return (
+
+ Cron: {cronExpression}
+
+ );
+ } else if (
+ schedule.schedule &&
+ typeof schedule.schedule === "object" &&
+ "interval" in schedule.schedule
+ ) {
+ return (
+
+ Interval: {schedule.schedule.interval} seconds
+
+ );
+ } else if (
+ schedule.schedule &&
+ typeof schedule.schedule === "object" &&
+ "rrule" in schedule.schedule
+ ) {
+ return (
+
+ RRule: {schedule.schedule.rrule}
+
+ );
+ } else {
+ return (
+
+ {JSON.stringify(schedule.schedule)}
+
+ );
+ }
+ })}
+
+ ),
+ },
+ {
+ id: "actions",
+ cell: ({ row }) => {
+ if (!row.original.id) return null;
+
+ return (
+
+
+
+ Open menu
+
+
+
+
+ Quick run
+ Custom run
+
+ void navigator.clipboard.writeText(row.original.id as string)
+ }
+ >
+ Copy ID
+
+ Edit
+ Delete
+ Duplicate
+ Manage Access
+ Add to incident
+
+
+ );
+ },
+ },
+];
diff --git a/ui-v2/src/components/flows/detail/index.tsx b/ui-v2/src/components/flows/detail/index.tsx
new file mode 100644
index 000000000000..a5581850f2d7
--- /dev/null
+++ b/ui-v2/src/components/flows/detail/index.tsx
@@ -0,0 +1,186 @@
+import { components } from "@/api/prefect";
+import { DataTable } from "@/components/ui/data-table";
+import { Tabs, TabsContent, TabsList, TabsTrigger } from "@/components/ui/tabs";
+import { useNavigate } from "@tanstack/react-router";
+import { columns as deploymentColumns } from "./deployment-columns";
+import {
+ getFlowMetadata,
+ columns as metadataColumns,
+} from "./metadata-columns";
+import { columns as flowRunColumns } from "./runs-columns";
+
+import { Button } from "@/components/ui/button";
+import {
+ DropdownMenu,
+ DropdownMenuContent,
+ DropdownMenuItem,
+ DropdownMenuTrigger,
+} from "@/components/ui/dropdown-menu";
+import { Input } from "@/components/ui/input";
+import {
+ getCoreRowModel,
+ getPaginationRowModel,
+ useReactTable,
+} from "@tanstack/react-table";
+import { ChevronDownIcon, SearchIcon } from "lucide-react";
+
+const SearchComponent = () => {
+ const navigate = useNavigate();
+
+ return (
+
+
+ void navigate({
+ to: ".",
+ search: (prev) => ({
+ ...prev,
+ "runs.flowRuns.nameLike": e.target.value,
+ }),
+ })
+ }
+ />
+
+
+ );
+};
+
+const SortComponent = () => {
+ const navigate = useNavigate();
+
+ return (
+
+
+
+ Sort
+
+
+
+
+ void navigate({
+ to: ".",
+ search: (prev) => ({ ...prev, "runs.sort": "START_TIME_DESC" }),
+ })
+ }
+ >
+ Newest
+
+
+ void navigate({
+ to: ".",
+ search: (prev) => ({ ...prev, "runs.sort": "START_TIME_ASC" }),
+ })
+ }
+ >
+ Oldest
+
+
+
+ );
+};
+
+export default function FlowDetail({
+ flow,
+ flowRuns,
+ activity,
+ deployments,
+ tab = "runs",
+}: {
+ flow: components["schemas"]["Flow"];
+ flowRuns: components["schemas"]["FlowRun"][];
+ activity: components["schemas"]["FlowRun"][];
+ deployments: components["schemas"]["DeploymentResponse"][];
+ tab: "runs" | "deployments" | "details";
+}): React.ReactElement {
+ const navigate = useNavigate();
+ console.log(activity);
+
+ const flowRunTable = useReactTable({
+ data: flowRuns,
+ columns: flowRunColumns,
+ getCoreRowModel: getCoreRowModel(),
+ getPaginationRowModel: getPaginationRowModel(),
+ initialState: {
+ pagination: {
+ pageIndex: 0,
+ pageSize: 10,
+ },
+ },
+ });
+
+ const deploymentsTable = useReactTable({
+ data: deployments,
+ columns: deploymentColumns,
+ getCoreRowModel: getCoreRowModel(),
+ getPaginationRowModel: getPaginationRowModel(),
+ initialState: {
+ pagination: {
+ pageIndex: 0,
+ pageSize: 10,
+ },
+ },
+ });
+
+ const metadataTable = useReactTable({
+ columns: metadataColumns,
+ data: getFlowMetadata(flow),
+ getCoreRowModel: getCoreRowModel(),
+ getPaginationRowModel: getPaginationRowModel(),
+ onPaginationChange: (pagination) => {
+ console.log(pagination);
+ return pagination;
+ },
+ initialState: {
+ pagination: {
+ pageIndex: 0,
+ pageSize: 10,
+ },
+ },
+ });
+
+ return (
+
+
+ void navigate({
+ to: ".",
+ search: (prev) => ({
+ ...prev,
+ tab: value as "runs" | "deployments" | "details",
+ }),
+ })
+ }
+ >
+
+ Runs
+ Deployments
+ Details
+
+
+
+
+
+
+
+
+
+
+
+
+
+ );
+}
diff --git a/ui-v2/src/components/flows/detail/metadata-columns.tsx b/ui-v2/src/components/flows/detail/metadata-columns.tsx
new file mode 100644
index 000000000000..4474fad98917
--- /dev/null
+++ b/ui-v2/src/components/flows/detail/metadata-columns.tsx
@@ -0,0 +1,28 @@
+import { components } from "@/api/prefect";
+import { ColumnDef } from "@tanstack/react-table";
+
+type Flow = components["schemas"]["Flow"];
+type FlowMetadata = { attribute: string; value: string | string[] | null };
+
+export const columns: ColumnDef[] = [
+ {
+ accessorKey: "attribute",
+ header: "Attribute",
+ cell: ({ row }) => (
+ {row.original.attribute}
+ ),
+ },
+ {
+ accessorKey: "value",
+ header: "Value",
+ cell: ({ row }) => row.original.value,
+ },
+];
+
+export const getFlowMetadata = (flow: Flow): FlowMetadata[] => [
+ { attribute: "ID", value: flow.id || null },
+ { attribute: "Name", value: flow.name },
+ { attribute: "Created", value: flow.created || null },
+ { attribute: "Updated", value: flow.updated || null },
+ { attribute: "Tags", value: flow.tags || [] },
+];
diff --git a/ui-v2/src/components/flows/detail/runs-columns.tsx b/ui-v2/src/components/flows/detail/runs-columns.tsx
new file mode 100644
index 000000000000..6f933ef5d4a0
--- /dev/null
+++ b/ui-v2/src/components/flows/detail/runs-columns.tsx
@@ -0,0 +1,81 @@
+import { components } from "@/api/prefect";
+import { ColumnDef } from "@tanstack/react-table";
+import { format, parseISO } from "date-fns";
+import { DeploymentCell, WorkPoolCell } from "./cells";
+
+type FlowRun = components["schemas"]["FlowRun"];
+
+export const columns: ColumnDef[] = [
+ {
+ accessorKey: "created",
+ header: "Time",
+ cell: ({ row }) => (
+
+ {row.original.created &&
+ format(parseISO(row.original.created), "MMM dd HH:mm:ss OOOO")}
+
+ ),
+ },
+ {
+ accessorKey: "state",
+ header: "State",
+ cell: ({ row }) => (
+
+ {row.original.state?.name}
+
+ ),
+ },
+ {
+ accessorKey: "name",
+ header: "Name",
+ cell: ({ row }) => row.original.name,
+ },
+ {
+ accessorKey: "deployment",
+ header: "Deployment",
+ cell: ({ row }) => ,
+ },
+ {
+ accessorKey: "work_pool",
+ header: "Work Pool",
+ cell: ({ row }) => ,
+ },
+ {
+ accessorKey: "work_queue",
+ header: "Work Queue",
+ cell: ({ row }) => row.original.work_queue_name,
+ },
+ {
+ accessorKey: "tags",
+ header: "Tags",
+ cell: ({ row }) =>
+ row.original.tags?.map((tag, index) => (
+
+ {tag}
+
+ )),
+ },
+ {
+ accessorKey: "duration",
+ header: "Duration",
+ cell: ({ row }) => (
+
+ {row.original.estimated_run_time
+ ? `${row.original.estimated_run_time}s`
+ : "-"}
+
+ ),
+ },
+];
diff --git a/ui-v2/src/components/flows/queries.tsx b/ui-v2/src/components/flows/queries.tsx
new file mode 100644
index 000000000000..0de7c4ac9078
--- /dev/null
+++ b/ui-v2/src/components/flows/queries.tsx
@@ -0,0 +1,329 @@
+import { components } from "@/api/prefect";
+import { getQueryService } from "@/api/service";
+import {
+ MutationFunction,
+ QueryFunction,
+ QueryKey,
+ QueryObserverOptions,
+} from "@tanstack/react-query";
+import { format } from "date-fns";
+
+export const flowQueryParams = (
+ flowId: string,
+ queryParams: Partial = {},
+): {
+ queryKey: QueryKey;
+ queryFn: QueryFunction;
+} => ({
+ ...queryParams,
+ queryKey: ["flows", flowId] as const,
+ queryFn: async (): Promise => {
+ const response = await getQueryService()
+ .GET("/flows/{id}", {
+ params: { path: { id: flowId } },
+ })
+ .then((response) => response.data);
+ return response as components["schemas"]["Flow"];
+ },
+});
+
+export const flowRunsQueryParams = (
+ id: string,
+ body: components["schemas"]["Body_read_flow_runs_flow_runs_filter_post"],
+ queryParams: Partial = {},
+): {
+ queryKey: readonly ["flowRun", string];
+ queryFn: () => Promise;
+} => ({
+ ...queryParams,
+ queryKey: ["flowRun", JSON.stringify({ flowId: id, ...body })] as const,
+ queryFn: async () => {
+ const response = await getQueryService()
+ .POST("/flow_runs/filter", {
+ body: {
+ ...body,
+ flows: {
+ ...body.flows,
+ operator: "and_" as const,
+ id: { any_: [id] },
+ },
+ },
+ })
+ .then((response) => response.data);
+ return response as components["schemas"]["FlowRunResponse"][];
+ },
+});
+
+export const getLatestFlowRunsQueryParams = (
+ id: string,
+ n: number,
+ queryParams: Partial = {},
+): {
+ queryKey: readonly ["flowRun", string];
+ queryFn: () => Promise;
+} => ({
+ ...queryParams,
+ queryKey: [
+ "flowRun",
+ JSON.stringify({
+ flowId: id,
+ offset: 0,
+ limit: n,
+ sort: "START_TIME_DESC",
+ }),
+ ] as const,
+ queryFn: async () => {
+ const response = await getQueryService()
+ .POST("/flow_runs/filter", {
+ body: {
+ flows: { operator: "and_" as const, id: { any_: [id] } },
+ flow_runs: {
+ operator: "and_" as const,
+ start_time: {
+ before_: format(new Date(), "yyyy-MM-dd'T'HH:mm:ss'Z'"),
+ is_null_: false,
+ },
+ },
+ offset: 0,
+ limit: n,
+ sort: "START_TIME_DESC",
+ },
+ })
+ .then((response) => response.data);
+ return response as components["schemas"]["FlowRunResponse"][];
+ },
+});
+
+export const getNextFlowRunsQueryParams = (
+ id: string,
+ n: number,
+ queryParams: Partial = {},
+): {
+ queryKey: readonly ["flowRun", string];
+ queryFn: () => Promise;
+} => ({
+ ...queryParams,
+ queryKey: [
+ "flowRun",
+ JSON.stringify({
+ flowId: id,
+ offset: 0,
+ limit: n,
+ sort: "EXPECTED_START_TIME_ASC",
+ }),
+ ] as const,
+ queryFn: async () => {
+ const response = await getQueryService()
+ .POST("/flow_runs/filter", {
+ body: {
+ flows: { operator: "and_" as const, id: { any_: [id] } },
+ flow_runs: {
+ operator: "and_" as const,
+ expected_start_time: {
+ after_: format(new Date(), "yyyy-MM-dd'T'HH:mm:ss'Z'"),
+ },
+ },
+ offset: 0,
+ limit: n,
+ sort: "EXPECTED_START_TIME_ASC",
+ },
+ })
+ .then((response) => response.data);
+ return response as components["schemas"]["FlowRunResponse"][];
+ },
+});
+
+export const flowRunsCountQueryParams = (
+ id: string,
+ body?: components["schemas"]["Body_count_flow_runs_flow_runs_count_post"],
+ queryParams: Partial = {},
+): {
+ queryKey: readonly ["flowRunCount", string];
+ queryFn: () => Promise;
+} => ({
+ ...queryParams,
+ queryKey: ["flowRunCount", JSON.stringify({ flowId: id, ...body })] as const,
+ queryFn: async () => {
+ const response = await getQueryService()
+ .POST("/flow_runs/count", {
+ body: {
+ ...body,
+ flows: {
+ ...body?.flows,
+ operator: "and_" as const,
+ id: { any_: [id] },
+ },
+ flow_runs: {
+ operator: "and_" as const,
+ expected_start_time: {
+ before_: format(new Date(), "yyyy-MM-dd'T'HH:mm:ss'Z'"),
+ },
+ ...body?.flow_runs,
+ },
+ },
+ })
+ .then((response) => response.data);
+ return response as number;
+ },
+});
+
+export const deploymentsQueryParams = (
+ id: string,
+ body: components["schemas"]["Body_read_deployments_deployments_filter_post"],
+ queryParams: Partial = {},
+): {
+ queryKey: readonly ["deployments", string];
+ queryFn: () => Promise;
+} => ({
+ ...queryParams,
+ queryKey: ["deployments", JSON.stringify({ ...body, flowId: id })] as const,
+ queryFn: async () => {
+ const response = await getQueryService()
+ .POST("/deployments/filter", {
+ body: {
+ ...body,
+ flows: {
+ ...body?.flows,
+ operator: "and_" as const,
+ id: { any_: [id] },
+ },
+ },
+ })
+ .then((response) => response.data);
+ return response as components["schemas"]["DeploymentResponse"][];
+ },
+});
+
+export const deploymentsCountQueryParams = (
+ id: string,
+ queryParams: Partial = {},
+): {
+ queryKey: readonly ["deploymentsCount", string];
+ queryFn: () => Promise;
+} => ({
+ ...queryParams,
+ queryKey: ["deploymentsCount", JSON.stringify({ flowId: id })] as const,
+ queryFn: async () => {
+ const response = await getQueryService()
+ .POST("/deployments/count", {
+ body: { flows: { operator: "and_" as const, id: { any_: [id] } } },
+ })
+ .then((response) => response.data);
+ return response as number;
+ },
+});
+
+export const deleteFlowMutation = (
+ id: string,
+): {
+ mutationFn: MutationFunction;
+} => ({
+ mutationFn: async () => {
+ await getQueryService().DELETE("/flows/{id}", {
+ params: { path: { id } },
+ });
+ },
+});
+
+// Define the Flow class
+export class FlowQuery {
+ private flowId: string;
+
+ /**
+ * Initializes a new instance of the Flow class.
+ * @param flowId - The ID of the flow.
+ */
+ constructor(flowId: string) {
+ this.flowId = flowId;
+ }
+
+ public getQueryParams(queryParams: Partial = {}): {
+ queryKey: QueryKey;
+ queryFn: QueryFunction;
+ } {
+ return flowQueryParams(this.flowId, queryParams);
+ }
+
+ public getFlowRunsQueryParams(
+ body: components["schemas"]["Body_read_flow_runs_flow_runs_filter_post"],
+ queryParams: Partial = {},
+ ): {
+ queryKey: readonly ["flowRun", string];
+ queryFn: () => Promise;
+ } {
+ return flowRunsQueryParams(this.flowId, body, queryParams);
+ }
+
+ public getLatestFlowRunsQueryParams(
+ n: number,
+ queryParams: Partial = {},
+ ): {
+ queryKey: readonly ["flowRun", string];
+ queryFn: () => Promise;
+ } {
+ return flowRunsQueryParams(
+ this.flowId,
+ { offset: 0, limit: n, sort: "START_TIME_DESC" },
+ queryParams,
+ );
+ }
+
+ public getNextFlowRunsQueryParams(
+ n: number,
+ queryParams: Partial = {},
+ ): {
+ queryKey: readonly ["flowRun", string];
+ queryFn: () => Promise;
+ } {
+ return flowRunsQueryParams(
+ this.flowId,
+ {
+ offset: 0,
+ limit: n,
+ sort: "EXPECTED_START_TIME_ASC",
+ flow_runs: {
+ operator: "and_",
+ expected_start_time: {
+ after_: format(new Date(), "yyyy-MM-dd'T'HH:mm:ss'Z'"),
+ },
+ },
+ },
+ queryParams,
+ );
+ }
+
+ public getFlowRunsCountQueryParams(
+ body?: components["schemas"]["Body_count_flow_runs_flow_runs_count_post"],
+ queryParams: Partial = {},
+ ): {
+ queryKey: readonly ["flowRunCount", string];
+ queryFn: () => Promise;
+ } {
+ return flowRunsCountQueryParams(this.flowId, body, queryParams);
+ }
+
+ public getDeploymentsQueryParams(
+ body: components["schemas"]["Body_read_deployments_deployments_filter_post"],
+ queryParams: Partial = {},
+ ): {
+ queryKey: readonly ["deployments", string];
+ queryFn: () => Promise;
+ } {
+ return deploymentsQueryParams(this.flowId, body, queryParams);
+ }
+
+ public getDeploymentsCountQueryParams(
+ queryParams: Partial = {},
+ ): {
+ queryKey: readonly ["deploymentsCount", string];
+ queryFn: () => Promise;
+ } {
+ return deploymentsCountQueryParams(this.flowId, queryParams);
+ }
+
+ public getDeleteFlowMutation(): {
+ mutationFn: MutationFunction;
+ } {
+ return deleteFlowMutation(this.flowId);
+ }
+}
diff --git a/ui-v2/src/components/layouts/MainLayout.tsx b/ui-v2/src/components/layouts/MainLayout.tsx
new file mode 100644
index 000000000000..6c6b89cebe14
--- /dev/null
+++ b/ui-v2/src/components/layouts/MainLayout.tsx
@@ -0,0 +1,13 @@
+import { SidebarProvider } from "@/components/ui/sidebar";
+import { AppSidebar } from "@/components/ui/app-sidebar";
+import { Toaster } from "../ui/toaster";
+
+export function MainLayout({ children }: { children: React.ReactNode }) {
+ return (
+
+
+ {children}
+
+
+ );
+}
diff --git a/ui-v2/src/components/ui/app-sidebar.tsx b/ui-v2/src/components/ui/app-sidebar.tsx
new file mode 100644
index 000000000000..804e23694d4e
--- /dev/null
+++ b/ui-v2/src/components/ui/app-sidebar.tsx
@@ -0,0 +1,185 @@
+import {
+ Sidebar,
+ SidebarContent,
+ SidebarFooter,
+ SidebarGroup,
+ SidebarHeader,
+ SidebarMenu,
+ SidebarMenuItem,
+ SidebarMenuButton,
+} from "@/components/ui/sidebar";
+import { Link } from "@tanstack/react-router";
+import { Button } from "@/components/ui/button";
+
+export function AppSidebar() {
+ return (
+
+
+
+ Prefect Logo
+
+
+
+
+
+
+
+
+
+
+ {({ isActive }) => (
+
+ Dashboard
+
+ )}
+
+
+
+
+ {({ isActive }) => (
+
+ Runs
+
+ )}
+
+
+
+
+ {({ isActive }) => (
+
+ Flows
+
+ )}
+
+
+
+
+ {({ isActive }) => (
+
+ Deployments
+
+ )}
+
+
+
+
+ {({ isActive }) => (
+
+ Work Pools
+
+ )}
+
+
+
+
+ {({ isActive }) => (
+
+ Blocks
+
+ )}
+
+
+
+
+ {({ isActive }) => (
+
+ Variables
+
+ )}
+
+
+
+
+ {({ isActive }) => (
+
+ Automations
+
+ )}
+
+
+
+
+ {({ isActive }) => (
+
+ Event Feed
+
+ )}
+
+
+
+
+ {({ isActive }) => (
+
+ Notifications
+
+ )}
+
+
+
+
+ {({ isActive }) => (
+
+ Concurrency
+
+ )}
+
+
+
+
+
+
+
+
+
+
+
+ Ready to scale?
+ Upgrade
+
+
+
+
+
+
+ Join the community
+
+
+
+
+ {({ isActive }) => (
+
+ Settings
+
+ )}
+
+
+
+
+
+ );
+}
diff --git a/ui-v2/src/components/ui/badge/badge.tsx b/ui-v2/src/components/ui/badge/badge.tsx
new file mode 100644
index 000000000000..e059e673ff1c
--- /dev/null
+++ b/ui-v2/src/components/ui/badge/badge.tsx
@@ -0,0 +1,14 @@
+import type { VariantProps } from "class-variance-authority";
+
+import { cn } from "@/lib/utils";
+import { badgeVariants } from "./styles";
+
+export interface BadgeProps
+ extends React.HTMLAttributes,
+ VariantProps {}
+
+export function Badge({ className, variant, ...props }: BadgeProps) {
+ return (
+
+ );
+}
diff --git a/ui-v2/src/components/ui/badge/index.ts b/ui-v2/src/components/ui/badge/index.ts
new file mode 100644
index 000000000000..5c823ae92e4a
--- /dev/null
+++ b/ui-v2/src/components/ui/badge/index.ts
@@ -0,0 +1,2 @@
+export * from "./badge";
+export * from "./styles";
diff --git a/ui-v2/src/components/ui/badge/styles.ts b/ui-v2/src/components/ui/badge/styles.ts
new file mode 100644
index 000000000000..efaedefbb2f0
--- /dev/null
+++ b/ui-v2/src/components/ui/badge/styles.ts
@@ -0,0 +1,21 @@
+import { cva } from "class-variance-authority";
+
+export const badgeVariants = cva(
+ "inline-flex items-center rounded-md border px-2.5 py-0.5 text-xs font-semibold transition-colors focus:outline-none focus:ring-2 focus:ring-ring focus:ring-offset-2",
+ {
+ variants: {
+ variant: {
+ default:
+ "border-transparent bg-primary text-primary-foreground shadow hover:bg-primary/80",
+ secondary:
+ "border-transparent bg-secondary text-secondary-foreground hover:bg-secondary/80",
+ destructive:
+ "border-transparent bg-destructive text-destructive-foreground shadow hover:bg-destructive/80",
+ outline: "text-foreground",
+ },
+ },
+ defaultVariants: {
+ variant: "default",
+ },
+ },
+);
diff --git a/ui-v2/src/components/ui/breadcrumb.tsx b/ui-v2/src/components/ui/breadcrumb.tsx
new file mode 100644
index 000000000000..033e3c54517b
--- /dev/null
+++ b/ui-v2/src/components/ui/breadcrumb.tsx
@@ -0,0 +1,134 @@
+import { ChevronRightIcon, DotsHorizontalIcon } from "@radix-ui/react-icons";
+import { Slot } from "@radix-ui/react-slot";
+import * as React from "react";
+
+import { cn } from "@/lib/utils";
+
+const Breadcrumb = React.forwardRef<
+ HTMLElement,
+ React.ComponentPropsWithoutRef<"nav"> & {
+ separator?: React.ReactNode;
+ }
+>(({ ...props }, ref) => );
+Breadcrumb.displayName = "Breadcrumb";
+
+type BreadcrumbListProps = React.ComponentPropsWithoutRef<"ol"> & {
+ className?: string;
+};
+
+const BreadcrumbList = React.forwardRef(
+ ({ className, ...props }, ref) => (
+
+ ),
+);
+BreadcrumbList.displayName = "BreadcrumbList";
+
+type BreadcrumbItemProps = React.ComponentPropsWithoutRef<"li"> & {
+ className?: string;
+};
+
+const BreadcrumbItem = React.forwardRef(
+ ({ className, ...props }, ref) => (
+
+ ),
+);
+BreadcrumbItem.displayName = "BreadcrumbItem";
+
+type BreadcrumbLinkProps = React.ComponentPropsWithoutRef<"a"> & {
+ asChild?: boolean;
+ className?: string;
+};
+
+const BreadcrumbLink = React.forwardRef(
+ ({ asChild, className, ...props }, ref) => {
+ const Comp = asChild ? Slot : "a";
+
+ return (
+
+ );
+ },
+);
+BreadcrumbLink.displayName = "BreadcrumbLink";
+
+type BreadcrumbPageProps = React.ComponentPropsWithoutRef<"span"> & {
+ className?: string;
+};
+
+const BreadcrumbPage = React.forwardRef(
+ ({ className, ...props }, ref) => (
+
+ ),
+);
+BreadcrumbPage.displayName = "BreadcrumbPage";
+
+type BreadcrumbSeparatorProps = React.ComponentPropsWithoutRef<"li"> & {
+ className?: string;
+};
+
+const BreadcrumbSeparator = ({
+ children,
+ className,
+ ...props
+}: BreadcrumbSeparatorProps) => (
+ svg]:size-3.5", className)}
+ {...props}
+ >
+ {children ?? }
+
+);
+BreadcrumbSeparator.displayName = "BreadcrumbSeparator";
+
+type BreadcrumbEllipsisProps = React.ComponentPropsWithoutRef<"span"> & {
+ className?: string;
+};
+
+const BreadcrumbEllipsis = ({
+ className,
+ ...props
+}: BreadcrumbEllipsisProps) => (
+
+
+ More
+
+);
+BreadcrumbEllipsis.displayName = "BreadcrumbElipssis";
+
+export {
+ Breadcrumb,
+ BreadcrumbList,
+ BreadcrumbItem,
+ BreadcrumbLink,
+ BreadcrumbPage,
+ BreadcrumbSeparator,
+ BreadcrumbEllipsis,
+};
diff --git a/ui-v2/src/components/ui/button/button.tsx b/ui-v2/src/components/ui/button/button.tsx
new file mode 100644
index 000000000000..b7842f2b9d4e
--- /dev/null
+++ b/ui-v2/src/components/ui/button/button.tsx
@@ -0,0 +1,28 @@
+import { Slot } from "@radix-ui/react-slot";
+import type { VariantProps } from "class-variance-authority";
+import * as React from "react";
+
+import { cn } from "@/lib/utils";
+import { buttonVariants } from "./styles";
+
+export interface ButtonProps
+ extends React.ButtonHTMLAttributes,
+ VariantProps {
+ asChild?: boolean;
+}
+
+const Button = React.forwardRef(
+ ({ className, variant, size, asChild = false, ...props }, ref) => {
+ const Comp = asChild ? Slot : "button";
+ return (
+
+ );
+ },
+);
+Button.displayName = "Button";
+
+export { Button };
diff --git a/ui-v2/src/components/ui/button/index.ts b/ui-v2/src/components/ui/button/index.ts
new file mode 100644
index 000000000000..cfd75ca081e3
--- /dev/null
+++ b/ui-v2/src/components/ui/button/index.ts
@@ -0,0 +1,2 @@
+export * from "./button";
+export * from "./styles";
diff --git a/ui-v2/src/components/ui/button/styles.ts b/ui-v2/src/components/ui/button/styles.ts
new file mode 100644
index 000000000000..b7cd7db9e761
--- /dev/null
+++ b/ui-v2/src/components/ui/button/styles.ts
@@ -0,0 +1,31 @@
+import { cva } from "class-variance-authority";
+
+export const buttonVariants = cva(
+ "inline-flex items-center justify-center whitespace-nowrap rounded-md text-sm font-medium transition-colors focus-visible:outline-none focus-visible:ring-1 focus-visible:ring-ring disabled:pointer-events-none disabled:opacity-50",
+ {
+ variants: {
+ variant: {
+ default:
+ "bg-primary text-primary-foreground shadow hover:bg-primary/90",
+ destructive:
+ "bg-destructive text-destructive-foreground shadow-sm hover:bg-destructive/90",
+ outline:
+ "border border-input bg-background shadow-sm hover:bg-accent hover:text-accent-foreground",
+ secondary:
+ "bg-secondary text-secondary-foreground shadow-sm hover:bg-secondary/80",
+ ghost: "hover:bg-accent hover:text-accent-foreground",
+ link: "text-primary underline-offset-4 hover:underline",
+ },
+ size: {
+ default: "h-9 px-4 py-2",
+ sm: "h-8 rounded-md px-3 text-xs",
+ lg: "h-10 rounded-md px-8",
+ icon: "h-9 w-9",
+ },
+ },
+ defaultVariants: {
+ variant: "default",
+ size: "default",
+ },
+ },
+);
diff --git a/ui-v2/src/components/ui/calendar.tsx b/ui-v2/src/components/ui/calendar.tsx
new file mode 100644
index 000000000000..9fb2c4e8ed95
--- /dev/null
+++ b/ui-v2/src/components/ui/calendar.tsx
@@ -0,0 +1,75 @@
+import { ChevronLeftIcon, ChevronRightIcon } from "@radix-ui/react-icons";
+import * as React from "react";
+import { DayPicker } from "react-day-picker";
+
+import { buttonVariants } from "@/components/ui/button";
+import { cn } from "@/lib/utils";
+
+type CalendarProps = React.ComponentProps & {
+ className?: string;
+ classNames?: Record;
+ showOutsideDays?: boolean;
+ mode?: "default" | "single" | "multiple" | "range" | undefined;
+};
+
+function Calendar({
+ className,
+ classNames,
+ showOutsideDays = true,
+ ...props
+}: CalendarProps) {
+ return (
+ .day-range-end)]:rounded-r-md [&:has(>.day-range-start)]:rounded-l-md first:[&:has([aria-selected])]:rounded-l-md last:[&:has([aria-selected])]:rounded-r-md"
+ : "[&:has([aria-selected])]:rounded-md",
+ ),
+ day: cn(
+ buttonVariants({ variant: "ghost" }),
+ "h-8 w-8 p-0 font-normal aria-selected:opacity-100",
+ ),
+ day_range_start: "day-range-start",
+ day_range_end: "day-range-end",
+ day_selected:
+ "bg-primary text-primary-foreground hover:bg-primary hover:text-primary-foreground focus:bg-primary focus:text-primary-foreground",
+ day_today: "bg-accent text-accent-foreground",
+ day_outside:
+ "day-outside text-muted-foreground opacity-50 aria-selected:bg-accent/50 aria-selected:text-muted-foreground aria-selected:opacity-30",
+ day_disabled: "text-muted-foreground opacity-50",
+ day_range_middle:
+ "aria-selected:bg-accent aria-selected:text-accent-foreground",
+ day_hidden: "invisible",
+ ...classNames,
+ }}
+ components={{
+ IconLeft: () => ,
+ IconRight: () => ,
+ }}
+ {...props}
+ />
+ );
+}
+Calendar.displayName = "Calendar";
+
+export { Calendar };
diff --git a/ui-v2/src/components/ui/card.tsx b/ui-v2/src/components/ui/card.tsx
new file mode 100644
index 000000000000..78cd0c348b24
--- /dev/null
+++ b/ui-v2/src/components/ui/card.tsx
@@ -0,0 +1,83 @@
+import * as React from "react";
+
+import { cn } from "@/lib/utils";
+
+const Card = React.forwardRef<
+ HTMLDivElement,
+ React.HTMLAttributes
+>(({ className, ...props }, ref) => (
+
+));
+Card.displayName = "Card";
+
+const CardHeader = React.forwardRef<
+ HTMLDivElement,
+ React.HTMLAttributes
+>(({ className, ...props }, ref) => (
+
+));
+CardHeader.displayName = "CardHeader";
+
+const CardTitle = React.forwardRef<
+ HTMLParagraphElement,
+ React.HTMLAttributes
+>(({ className, ...props }, ref) => (
+
+));
+CardTitle.displayName = "CardTitle";
+
+const CardDescription = React.forwardRef<
+ HTMLParagraphElement,
+ React.HTMLAttributes
+>(({ className, ...props }, ref) => (
+
+));
+CardDescription.displayName = "CardDescription";
+
+const CardContent = React.forwardRef<
+ HTMLDivElement,
+ React.HTMLAttributes
+>(({ className, ...props }, ref) => (
+
+));
+CardContent.displayName = "CardContent";
+
+const CardFooter = React.forwardRef<
+ HTMLDivElement,
+ React.HTMLAttributes
+>(({ className, ...props }, ref) => (
+
+));
+CardFooter.displayName = "CardFooter";
+
+export {
+ Card,
+ CardHeader,
+ CardFooter,
+ CardTitle,
+ CardDescription,
+ CardContent,
+};
diff --git a/ui-v2/src/components/ui/chart.tsx b/ui-v2/src/components/ui/chart.tsx
new file mode 100644
index 000000000000..fc33fc2fa252
--- /dev/null
+++ b/ui-v2/src/components/ui/chart.tsx
@@ -0,0 +1,369 @@
+/* eslint-disable */
+// This file was generated by shadcn-ui, but raises a lot of eslint errors.
+// TODO: Fix eslint errors.
+import * as React from "react";
+import * as RechartsPrimitive from "recharts";
+
+import { cn } from "@/lib/utils";
+
+// Format: { THEME_NAME: CSS_SELECTOR }
+const THEMES = { light: "", dark: ".dark" } as const;
+
+export type ChartConfig = {
+ [k in string]: {
+ label?: React.ReactNode;
+ icon?: React.ComponentType;
+ } & (
+ | { color?: string; theme?: never }
+ | { color?: never; theme: Record }
+ );
+};
+
+type ChartContextProps = {
+ config: ChartConfig;
+};
+
+const ChartContext = React.createContext(null);
+
+function useChart() {
+ const context = React.useContext(ChartContext);
+
+ if (!context) {
+ throw new Error("useChart must be used within a ");
+ }
+
+ return context;
+}
+
+type ChartContainerProps = React.ComponentProps<"div"> & {
+ config: ChartConfig;
+ className?: string;
+ id?: string;
+ children: React.ComponentProps<
+ typeof RechartsPrimitive.ResponsiveContainer
+ >["children"];
+};
+
+const ChartContainer = React.forwardRef(
+ ({ id, className, children, config, ...props }, ref) => {
+ const uniqueId = React.useId();
+ const chartId = `chart-${id || uniqueId.replace(/:/g, "")}`;
+
+ return (
+
+
+
+
+ {children}
+
+
+
+ );
+ },
+);
+ChartContainer.displayName = "Chart";
+
+const ChartStyle = ({ id, config }: { id: string; config: ChartConfig }) => {
+ const colorConfig = Object.entries(config).filter(
+ ([, config]) => config.theme || config.color,
+ );
+
+ if (!colorConfig.length) {
+ return null;
+ }
+
+ return (
+