From af55dbf713a93dc6b18972646c425be8219b97d9 Mon Sep 17 00:00:00 2001
From: Paul Gottschling <paul.gottschling@goteleport.com>
Date: Tue, 29 Oct 2024 10:32:01 -0400
Subject: [PATCH 01/13] Update Terraform reference category page locations
 (#47955)

As with #47797, Docusaurus expects category index pages to be
within--and named after--their parent directories. (Docusaurus also
accepts the names `README` and `index` for category pages, but we are
not using this convention.) This change applies this standard to our
Terraform provider reference docs.

- Update paths in the generator configuration. Assume that each category
  index page is in the same subdirectory as its contents.
- Update paths in the `lint.yaml` workflow.
- Fix internal links.
---
 .github/workflows/lint.yaml                   |  1 -
 .../import-existing-resources.mdx             |  5 ++-
 .../terraform-provider/dedicated-server.mdx   |  2 +-
 .../terraform-provider/local.mdx              |  8 ++--
 .../long-lived-credentials.mdx                |  9 +++--
 .../terraform-provider/spacelift.mdx          |  4 +-
 .../terraform-provider/terraform-provider.mdx |  4 +-
 .../terraform-starter/enroll-resources.mdx    |  6 +--
 .../terraform-starter/rbac.mdx                |  2 +-
 .../terraform-provider/data-sources.mdx       | 35 ------------------
 .../data-sources/data-sources.mdx             | 35 ++++++++++++++++++
 .../terraform-provider/resources.mdx          | 37 -------------------
 .../resources/resources.mdx                   | 37 +++++++++++++++++++
 .../terraform-provider.mdx                    | 28 +++++++-------
 integrations/terraform/gen/docs.sh            | 10 ++---
 integrations/terraform/provider/provider.go   |  4 +-
 .../templates/data-sources-index.mdx.tmpl     |  2 +-
 .../terraform/templates/index.md.tmpl         | 24 ++++++------
 .../templates/resources-index.mdx.tmpl        |  2 +-
 19 files changed, 130 insertions(+), 125 deletions(-)
 delete mode 100644 docs/pages/reference/terraform-provider/data-sources.mdx
 create mode 100644 docs/pages/reference/terraform-provider/data-sources/data-sources.mdx
 delete mode 100644 docs/pages/reference/terraform-provider/resources.mdx
 create mode 100644 docs/pages/reference/terraform-provider/resources/resources.mdx
 rename docs/pages/reference/{ => terraform-provider}/terraform-provider.mdx (84%)

diff --git a/.github/workflows/lint.yaml b/.github/workflows/lint.yaml
index 2c31d0ef7ec92..860aeefeaecf9 100644
--- a/.github/workflows/lint.yaml
+++ b/.github/workflows/lint.yaml
@@ -63,7 +63,6 @@ jobs:
               - 'docs/pages/admin-guides/**'
               - 'docs/pages/enroll-resources/**'
               - 'docs/pages/reference/operator-resources/**'
-              - 'docs/pages/reference/terraform-provider.mdx'
               - 'docs/pages/reference/terraform-provider/**'
               - 'examples/chart/teleport-cluster/charts/teleport-operator/operator-crds'
 
diff --git a/docs/pages/admin-guides/infrastructure-as-code/managing-resources/import-existing-resources.mdx b/docs/pages/admin-guides/infrastructure-as-code/managing-resources/import-existing-resources.mdx
index d5329defb9feb..4e3a186d6b0c4 100644
--- a/docs/pages/admin-guides/infrastructure-as-code/managing-resources/import-existing-resources.mdx
+++ b/docs/pages/admin-guides/infrastructure-as-code/managing-resources/import-existing-resources.mdx
@@ -83,5 +83,6 @@ cluster configuration matches your expectations.
 - Follow [the user and role IaC guide](user-and-role.mdx) to use the Terraform
   Provider to create Teleport users and grant them roles.
 - Explore the full list of supported [Terraform provider
-  resources](../../../reference/terraform-provider.mdx).
-- See [the list of supported Teleport Terraform setups](../terraform-provider/terraform-provider.mdx):
+  resources](../../../reference/terraform-provider/terraform-provider.mdx).
+- See [the list of supported Teleport Terraform
+  setups](../terraform-provider/terraform-provider.mdx):
diff --git a/docs/pages/admin-guides/infrastructure-as-code/terraform-provider/dedicated-server.mdx b/docs/pages/admin-guides/infrastructure-as-code/terraform-provider/dedicated-server.mdx
index 3afb79ea87035..683f4f3082bf8 100644
--- a/docs/pages/admin-guides/infrastructure-as-code/terraform-provider/dedicated-server.mdx
+++ b/docs/pages/admin-guides/infrastructure-as-code/terraform-provider/dedicated-server.mdx
@@ -159,7 +159,7 @@ $ tctl get role/terraform-test
 ## Next steps
 
 - Explore the
-  [Terraform provider resource reference](../../../reference/terraform-provider.mdx)
+  [Terraform provider resource reference](../../../reference/terraform-provider/terraform-provider.mdx)
   to discover what can be configured with the Teleport Terraform provider.
 - Read the [tbot configuration reference](../../../reference/machine-id/configuration.mdx) to explore
   all the available `tbot` configuration options.
diff --git a/docs/pages/admin-guides/infrastructure-as-code/terraform-provider/local.mdx b/docs/pages/admin-guides/infrastructure-as-code/terraform-provider/local.mdx
index 59c49dd2e831f..d6912c912c32e 100644
--- a/docs/pages/admin-guides/infrastructure-as-code/terraform-provider/local.mdx
+++ b/docs/pages/admin-guides/infrastructure-as-code/terraform-provider/local.mdx
@@ -159,8 +159,10 @@ Do not forget to obtain new temporary credentials every hour by re-running `eval
 - Follow [the user and role IaC guide](../managing-resources/user-and-role.mdx) to use the Terraform
   Provider to create Teleport users and grant them roles.
 - Consult the list of Terraform-supported
-  resources [in the Terraform reference](../../../reference/terraform-provider.mdx).
-- Once you have working Terraform code that configures your Teleport cluster, you might want to run it in the CI or
-  from a bastion instead of running it locally. To do this, please follow the dedicated guides:
+  resources [in the Terraform
+  reference](../../../reference/terraform-provider/terraform-provider.mdx).
+- Once you have working Terraform code that configures your Teleport cluster,
+  you might want to run it in the CI or from a bastion instead of running it
+  locally. To do this, please follow the dedicated guides:
   - [Run the Terraform Provider in CI or cloud VMs](./ci-or-cloud.mdx)
   - [Run the Terraform Provider on a dedicated server](./dedicated-server.mdx)
diff --git a/docs/pages/admin-guides/infrastructure-as-code/terraform-provider/long-lived-credentials.mdx b/docs/pages/admin-guides/infrastructure-as-code/terraform-provider/long-lived-credentials.mdx
index ee1e9e3ed3cc5..9e8d15e9d372b 100644
--- a/docs/pages/admin-guides/infrastructure-as-code/terraform-provider/long-lived-credentials.mdx
+++ b/docs/pages/admin-guides/infrastructure-as-code/terraform-provider/long-lived-credentials.mdx
@@ -199,6 +199,9 @@ To apply the configuration:
 
 ## Next steps
 
-- Explore the full list of supported [Terraform provider resources](../../../reference/terraform-provider.mdx).
-- Learn [how to manage users and roles with IaC](../managing-resources/user-and-role.mdx)
-- Read more about [impersonation](../../access-controls/guides/impersonation.mdx).
+- Explore the full list of supported [Terraform provider
+  resources](../../../reference/terraform-provider/terraform-provider.mdx).
+- Learn [how to manage users and roles with
+  IaC](../managing-resources/user-and-role.mdx)
+- Read more about
+  [impersonation](../../access-controls/guides/impersonation.mdx).
diff --git a/docs/pages/admin-guides/infrastructure-as-code/terraform-provider/spacelift.mdx b/docs/pages/admin-guides/infrastructure-as-code/terraform-provider/spacelift.mdx
index 976b95f8b2306..249a954eee4a3 100644
--- a/docs/pages/admin-guides/infrastructure-as-code/terraform-provider/spacelift.mdx
+++ b/docs/pages/admin-guides/infrastructure-as-code/terraform-provider/spacelift.mdx
@@ -260,8 +260,8 @@ $ tctl get users/terraform-test
 
 - Now that you know how to manage Teleport configuration resources with
   Terraform and Spacelift, read the [Terraform resource
-  reference](../../../reference/terraform-provider.mdx) so you can flesh out your
-  configuration.
+  reference](../../../reference/terraform-provider/terraform-provider.mdx) so
+  you can flesh out your configuration.
 - To find out more about Spacelift's OIDC implementation, which Machine ID uses
   to authenticate to your Teleport cluster, read [the Spacelift
   documentation](https://docs.spacelift.io/integrations/cloud-providers/oidc/).
diff --git a/docs/pages/admin-guides/infrastructure-as-code/terraform-provider/terraform-provider.mdx b/docs/pages/admin-guides/infrastructure-as-code/terraform-provider/terraform-provider.mdx
index e19b9a49b0fc9..5a7a41505ae19 100644
--- a/docs/pages/admin-guides/infrastructure-as-code/terraform-provider/terraform-provider.mdx
+++ b/docs/pages/admin-guides/infrastructure-as-code/terraform-provider/terraform-provider.mdx
@@ -28,8 +28,8 @@ is executed. You must pick the correct guide for your setup:
 
 Once you have a functional Teleport Terraform provider, you will want to configure your resources with it.
 
-You can find the list of supported resources and their fields is
-available [in the Terraform reference](../../../reference/terraform-provider.mdx).
+The list of supported resources and their fields is available [in the Terraform
+reference](../../../reference/terraform-provider/terraform-provider.mdx).
 
 Some resources have their dedicated Infrastructure-as-Code (IaC) step-by step guides such as:
 - [Managing Users And Roles With IaC](../managing-resources/user-and-role.mdx)
diff --git a/docs/pages/admin-guides/infrastructure-as-code/terraform-starter/enroll-resources.mdx b/docs/pages/admin-guides/infrastructure-as-code/terraform-starter/enroll-resources.mdx
index d4de6522c848a..5a2f34e326db2 100644
--- a/docs/pages/admin-guides/infrastructure-as-code/terraform-starter/enroll-resources.mdx
+++ b/docs/pages/admin-guides/infrastructure-as-code/terraform-starter/enroll-resources.mdx
@@ -623,7 +623,7 @@ edit your Terraform module to:
 1. **Change the userdata script** to enable additional Agent services additional
    infrastructure resources for your Agents to proxy.
 1. **Deploy dynamic resources:** Consult the [Terraform provider
-   reference](../../../reference/terraform-provider.mdx) for Terraform resources
-   that you can apply in order to enroll dynamic resources in your
-   infrastructure.
+   reference](../../../reference/terraform-provider/terraform-provider.mdx) for
+   Terraform resources that you can apply in order to enroll dynamic resources
+   in your infrastructure.
 
diff --git a/docs/pages/admin-guides/infrastructure-as-code/terraform-starter/rbac.mdx b/docs/pages/admin-guides/infrastructure-as-code/terraform-starter/rbac.mdx
index 3699876228ef1..865192382bc8a 100644
--- a/docs/pages/admin-guides/infrastructure-as-code/terraform-starter/rbac.mdx
+++ b/docs/pages/admin-guides/infrastructure-as-code/terraform-starter/rbac.mdx
@@ -563,4 +563,4 @@ troubleshoot the single sign-on provider.
 
 Now that you have configured RBAC in your Terraform demo cluster, fine-tune your
 setup by reading the comprehensive [Terraform provider
-reference](../../../reference/terraform-provider.mdx).
+reference](../../../reference/terraform-provider/terraform-provider.mdx).
diff --git a/docs/pages/reference/terraform-provider/data-sources.mdx b/docs/pages/reference/terraform-provider/data-sources.mdx
deleted file mode 100644
index 6c7f82c16279a..0000000000000
--- a/docs/pages/reference/terraform-provider/data-sources.mdx
+++ /dev/null
@@ -1,35 +0,0 @@
----
-title: "Terraform data-sources index"
-description: "Index of all the data-sources supported by the Teleport Terraform Provider"
----
-
-{/*Auto-generated file. Do not edit.*/}
-{/*To regenerate, navigate to integrations/terraform and run `make docs`.*/}
-
-{/*
-    This file will be renamed data-sources.mdx during build time.
-    The template name is reserved by tfplugindocs so we suffix with -index.
-*/}
-
-The Teleport Terraform provider supports the following data-sources:
-
-  - [`teleport_access_list`](./data-sources/access_list.mdx)
-  - [`teleport_access_monitoring_rule`](./data-sources/access_monitoring_rule.mdx)
-  - [`teleport_app`](./data-sources/app.mdx)
-  - [`teleport_auth_preference`](./data-sources/auth_preference.mdx)
-  - [`teleport_cluster_maintenance_config`](./data-sources/cluster_maintenance_config.mdx)
-  - [`teleport_cluster_networking_config`](./data-sources/cluster_networking_config.mdx)
-  - [`teleport_database`](./data-sources/database.mdx)
-  - [`teleport_github_connector`](./data-sources/github_connector.mdx)
-  - [`teleport_installer`](./data-sources/installer.mdx)
-  - [`teleport_login_rule`](./data-sources/login_rule.mdx)
-  - [`teleport_oidc_connector`](./data-sources/oidc_connector.mdx)
-  - [`teleport_okta_import_rule`](./data-sources/okta_import_rule.mdx)
-  - [`teleport_provision_token`](./data-sources/provision_token.mdx)
-  - [`teleport_role`](./data-sources/role.mdx)
-  - [`teleport_saml_connector`](./data-sources/saml_connector.mdx)
-  - [`teleport_session_recording_config`](./data-sources/session_recording_config.mdx)
-  - [`teleport_static_host_user`](./data-sources/static_host_user.mdx)
-  - [`teleport_trusted_cluster`](./data-sources/trusted_cluster.mdx)
-  - [`teleport_trusted_device`](./data-sources/trusted_device.mdx)
-  - [`teleport_user`](./data-sources/user.mdx)
diff --git a/docs/pages/reference/terraform-provider/data-sources/data-sources.mdx b/docs/pages/reference/terraform-provider/data-sources/data-sources.mdx
new file mode 100644
index 0000000000000..047a8a04a630b
--- /dev/null
+++ b/docs/pages/reference/terraform-provider/data-sources/data-sources.mdx
@@ -0,0 +1,35 @@
+---
+title: "Terraform data-sources index"
+description: "Index of all the data-sources supported by the Teleport Terraform Provider"
+---
+
+{/*Auto-generated file. Do not edit.*/}
+{/*To regenerate, navigate to integrations/terraform and run `make docs`.*/}
+
+{/*
+    This file will be renamed data-sources.mdx during build time.
+    The template name is reserved by tfplugindocs so we suffix with -index.
+*/}
+
+The Teleport Terraform provider supports the following data-sources:
+
+  - [`teleport_access_list`](./access_list.mdx)
+  - [`teleport_access_monitoring_rule`](./access_monitoring_rule.mdx)
+  - [`teleport_app`](./app.mdx)
+  - [`teleport_auth_preference`](./auth_preference.mdx)
+  - [`teleport_cluster_maintenance_config`](./cluster_maintenance_config.mdx)
+  - [`teleport_cluster_networking_config`](./cluster_networking_config.mdx)
+  - [`teleport_database`](./database.mdx)
+  - [`teleport_github_connector`](./github_connector.mdx)
+  - [`teleport_installer`](./installer.mdx)
+  - [`teleport_login_rule`](./login_rule.mdx)
+  - [`teleport_oidc_connector`](./oidc_connector.mdx)
+  - [`teleport_okta_import_rule`](./okta_import_rule.mdx)
+  - [`teleport_provision_token`](./provision_token.mdx)
+  - [`teleport_role`](./role.mdx)
+  - [`teleport_saml_connector`](./saml_connector.mdx)
+  - [`teleport_session_recording_config`](./session_recording_config.mdx)
+  - [`teleport_static_host_user`](./static_host_user.mdx)
+  - [`teleport_trusted_cluster`](./trusted_cluster.mdx)
+  - [`teleport_trusted_device`](./trusted_device.mdx)
+  - [`teleport_user`](./user.mdx)
diff --git a/docs/pages/reference/terraform-provider/resources.mdx b/docs/pages/reference/terraform-provider/resources.mdx
deleted file mode 100644
index dd2640e926d22..0000000000000
--- a/docs/pages/reference/terraform-provider/resources.mdx
+++ /dev/null
@@ -1,37 +0,0 @@
----
-title: "Terraform resources index"
-description: "Index of all the datasources supported by the Teleport Terraform Provider"
----
-
-{/*Auto-generated file. Do not edit.*/}
-{/*To regenerate, navigate to integrations/terraform and run `make docs`.*/}
-
-{/*
-    This file will be renamed data-sources.mdx during build time.
-    The template name is reserved by tfplugindocs so we suffix with -index.
-*/}
-
-The Teleport Terraform provider supports the following resources:
-
-  - [`teleport_access_list`](./resources/access_list.mdx)
-  - [`teleport_access_monitoring_rule`](./resources/access_monitoring_rule.mdx)
-  - [`teleport_app`](./resources/app.mdx)
-  - [`teleport_auth_preference`](./resources/auth_preference.mdx)
-  - [`teleport_bot`](./resources/bot.mdx)
-  - [`teleport_cluster_maintenance_config`](./resources/cluster_maintenance_config.mdx)
-  - [`teleport_cluster_networking_config`](./resources/cluster_networking_config.mdx)
-  - [`teleport_database`](./resources/database.mdx)
-  - [`teleport_github_connector`](./resources/github_connector.mdx)
-  - [`teleport_installer`](./resources/installer.mdx)
-  - [`teleport_login_rule`](./resources/login_rule.mdx)
-  - [`teleport_oidc_connector`](./resources/oidc_connector.mdx)
-  - [`teleport_okta_import_rule`](./resources/okta_import_rule.mdx)
-  - [`teleport_provision_token`](./resources/provision_token.mdx)
-  - [`teleport_role`](./resources/role.mdx)
-  - [`teleport_saml_connector`](./resources/saml_connector.mdx)
-  - [`teleport_server`](./resources/server.mdx)
-  - [`teleport_session_recording_config`](./resources/session_recording_config.mdx)
-  - [`teleport_static_host_user`](./resources/static_host_user.mdx)
-  - [`teleport_trusted_cluster`](./resources/trusted_cluster.mdx)
-  - [`teleport_trusted_device`](./resources/trusted_device.mdx)
-  - [`teleport_user`](./resources/user.mdx)
diff --git a/docs/pages/reference/terraform-provider/resources/resources.mdx b/docs/pages/reference/terraform-provider/resources/resources.mdx
new file mode 100644
index 0000000000000..ac150d8a43048
--- /dev/null
+++ b/docs/pages/reference/terraform-provider/resources/resources.mdx
@@ -0,0 +1,37 @@
+---
+title: "Terraform resources index"
+description: "Index of all the datasources supported by the Teleport Terraform Provider"
+---
+
+{/*Auto-generated file. Do not edit.*/}
+{/*To regenerate, navigate to integrations/terraform and run `make docs`.*/}
+
+{/*
+    This file will be renamed data-sources.mdx during build time.
+    The template name is reserved by tfplugindocs so we suffix with -index.
+*/}
+
+The Teleport Terraform provider supports the following resources:
+
+  - [`teleport_access_list`](./access_list.mdx)
+  - [`teleport_access_monitoring_rule`](./access_monitoring_rule.mdx)
+  - [`teleport_app`](./app.mdx)
+  - [`teleport_auth_preference`](./auth_preference.mdx)
+  - [`teleport_bot`](./bot.mdx)
+  - [`teleport_cluster_maintenance_config`](./cluster_maintenance_config.mdx)
+  - [`teleport_cluster_networking_config`](./cluster_networking_config.mdx)
+  - [`teleport_database`](./database.mdx)
+  - [`teleport_github_connector`](./github_connector.mdx)
+  - [`teleport_installer`](./installer.mdx)
+  - [`teleport_login_rule`](./login_rule.mdx)
+  - [`teleport_oidc_connector`](./oidc_connector.mdx)
+  - [`teleport_okta_import_rule`](./okta_import_rule.mdx)
+  - [`teleport_provision_token`](./provision_token.mdx)
+  - [`teleport_role`](./role.mdx)
+  - [`teleport_saml_connector`](./saml_connector.mdx)
+  - [`teleport_server`](./server.mdx)
+  - [`teleport_session_recording_config`](./session_recording_config.mdx)
+  - [`teleport_static_host_user`](./static_host_user.mdx)
+  - [`teleport_trusted_cluster`](./trusted_cluster.mdx)
+  - [`teleport_trusted_device`](./trusted_device.mdx)
+  - [`teleport_user`](./user.mdx)
diff --git a/docs/pages/reference/terraform-provider.mdx b/docs/pages/reference/terraform-provider/terraform-provider.mdx
similarity index 84%
rename from docs/pages/reference/terraform-provider.mdx
rename to docs/pages/reference/terraform-provider/terraform-provider.mdx
index 0c959e49ff397..d1a84f5b694aa 100644
--- a/docs/pages/reference/terraform-provider.mdx
+++ b/docs/pages/reference/terraform-provider/terraform-provider.mdx
@@ -14,10 +14,10 @@ It lists all the supported resources and their fields.
 
 <Admonition type="tip">
 To get started with the Terraform provider, you must start with [the installation
-guide](../admin-guides/infrastructure-as-code/terraform-provider/terraform-provider.mdx).
+guide](../../admin-guides/infrastructure-as-code/terraform-provider/terraform-provider.mdx).
 Once you got a working provider, we recommend you to follow the
 ["Managing users and roles with IaC"](
-../admin-guides/infrastructure-as-code/managing-resources/user-and-role.mdx) guide.
+../../admin-guides/infrastructure-as-code/managing-resources/user-and-role.mdx) guide.
 </Admonition>
 
 The provider exposes Teleport resources both as Terraform data-sources and Terraform resources.
@@ -27,8 +27,8 @@ to create resources in Teleport.
 {/* Note: the awkward `resource-index` file names are here because `data-sources`
 is reserved by the generator for the catch-all resource template */}
 
-- [list of supported resources](./terraform-provider/resources.mdx)
-- [list of supported data-sources](./terraform-provider/data-sources.mdx)
+- [list of supported resources](./resources/resources.mdx)
+- [list of supported data-sources](./data-sources/data-sources.mdx)
 
 ## Example Usage
 
@@ -81,7 +81,7 @@ provider "teleport" {
 This section lists the different ways of passing credentials to the Terraform provider.
 You can find which method fits your use case in
 the [Teleport Terraform provider setup
-page](../admin-guides/infrastructure-as-code/terraform-provider/terraform-provider.mdx)
+page](../../admin-guides/infrastructure-as-code/terraform-provider/terraform-provider.mdx)
 
 ### With an identity file
 
@@ -108,16 +108,16 @@ Detected security key tap
 ```
 
 You can find more information in
-the ["Run the Terraform provider locally" guide](../admin-guides/infrastructure-as-code/terraform-provider/local.mdx)
+the ["Run the Terraform provider locally" guide](../../admin-guides/infrastructure-as-code/terraform-provider/local.mdx)
 
 #### Obtaining an identity file via `tbot`
 
-`tbot` relies on [MachineID](../enroll-resources/machine-id/introduction.mdx) to obtain and automatically renew
+`tbot` relies on [MachineID](../../enroll-resources/machine-id/introduction.mdx) to obtain and automatically renew
 short-lived credentials. Such credentials are harder to exfiltrate, and you can control more precisely who has access to
 which roles (e.g. you can allow only GitHub Actions pipelines targeting the `prod` environment to get certificates).
 
 You can follow [the Terraform Provider
-guide](../admin-guides/infrastructure-as-code/terraform-provider/terraform-provider.mdx) to setup `tbot`
+guide](../../admin-guides/infrastructure-as-code/terraform-provider/terraform-provider.mdx) to setup `tbot`
 and have Terraform use its identity.
 
 #### Obtaining an identity file via `tctl auth sign`
@@ -132,7 +132,7 @@ This auth method has the following limitations:
 - Such credentials are high-privileged and long-lived. They must be protected and rotated.
 - This auth method does not work against Teleport clusters with MFA set to `webauthn`.
   On such clusters, Teleport will reject any long-lived certificate and require
-  [an additional MFA challenge for administrative actions](../admin-guides/access-controls/guides/mfa-for-admin-actions.mdx).
+  [an additional MFA challenge for administrative actions](../../admin-guides/access-controls/guides/mfa-for-admin-actions.mdx).
 
 ### With a token (native MachineID)
 
@@ -140,11 +140,11 @@ Starting with 16.2, the Teleport Terraform provider can natively use MachineID (
 cluster. The Terraform Provider will rely on its runtime (AWS, GCP, Kubernetes, CI/CD system) to prove its identity to
 Teleport.
 
-You can use any [delegated join method](./join-methods.mdx#delegated-join-methods) by setting
+You can use any [delegated join method](../join-methods.mdx#delegated-join-methods) by setting
 both `join_method` and `join_token` in the provider configuration.
 
 This setup is described in more details in
-the ["Run the Teleport Terraform provider in CI or Cloud" guide](../admin-guides/infrastructure-as-code/terraform-provider/ci-or-cloud.mdx).
+the ["Run the Teleport Terraform provider in CI or Cloud" guide](../../admin-guides/infrastructure-as-code/terraform-provider/ci-or-cloud.mdx).
 
 ### With key, certificate, and CA certificate
 
@@ -160,7 +160,7 @@ This auth method has the following limitations:
 - Such credentials are high-privileged and long-lived. They must be protected and rotated.
 - This auth method does not work against Teleport clusters with MFA set to `webauthn`.
   On such clusters, Teleport will reject any long-lived certificate and require
-  [an additional MFA challenge for administrative actions](../admin-guides/access-controls/guides/mfa-for-admin-actions.mdx).
+  [an additional MFA challenge for administrative actions](../../admin-guides/access-controls/guides/mfa-for-admin-actions.mdx).
 
 {/*  schema generated by tfplugindocs */}
 ## Schema
@@ -175,8 +175,8 @@ This auth method has the following limitations:
 - `identity_file` (String, Sensitive) Teleport identity file content. This can also be set with the environment variable `TF_TELEPORT_IDENTITY_FILE`.
 - `identity_file_base64` (String, Sensitive) Teleport identity file content base64 encoded. This can also be set with the environment variable `TF_TELEPORT_IDENTITY_FILE_BASE64`.
 - `identity_file_path` (String) Teleport identity file path. This can also be set with the environment variable `TF_TELEPORT_IDENTITY_FILE_PATH`.
-- `join_method` (String) Enables the native Terraform MachineID support. When set, Terraform uses MachineID to securely join the Teleport cluster and obtain credentials. See [the join method reference](./join-methods.mdx) for possible values, you must use [a delegated join method](./join-methods.mdx#secret-vs-delegated). This can also be set with the environment variable `TF_TELEPORT_JOIN_METHOD`.
-- `join_token` (String) Name of the token used for the native MachineID joining. This value is not sensitive for [delegated join methods](./join-methods.mdx#secret-vs-delegated). This can also be set with the environment variable `TF_TELEPORT_JOIN_TOKEN`.
+- `join_method` (String) Enables the native Terraform MachineID support. When set, Terraform uses MachineID to securely join the Teleport cluster and obtain credentials. See [the join method reference](../join-methods.mdx) for possible values. You must use [a delegated join method](../join-methods.mdx#secret-vs-delegated). This can also be set with the environment variable `TF_TELEPORT_JOIN_METHOD`.
+- `join_token` (String) Name of the token used for the native MachineID joining. This value is not sensitive for [delegated join methods](../join-methods.mdx#secret-vs-delegated). This can also be set with the environment variable `TF_TELEPORT_JOIN_TOKEN`.
 - `key_base64` (String, Sensitive) Base64 encoded TLS auth key. This can also be set with the environment variable `TF_TELEPORT_KEY_BASE64`.
 - `key_path` (String) Path to Teleport auth key file. This can also be set with the environment variable `TF_TELEPORT_KEY`.
 - `profile_dir` (String) Teleport profile path. This can also be set with the environment variable `TF_TELEPORT_PROFILE_PATH`.
diff --git a/integrations/terraform/gen/docs.sh b/integrations/terraform/gen/docs.sh
index eba48091d57ce..f6570db4b41a3 100755
--- a/integrations/terraform/gen/docs.sh
+++ b/integrations/terraform/gen/docs.sh
@@ -67,15 +67,15 @@ info "Converting .md files to .mdx"
 cd "$TMPDIR/docs"
 find . -iname '*.md' -type f -exec sh -c 'i="$1"; mv "$i" "${i%.md}.mdx"' shell {} \;
 # renaming the resources and data-sources indexes because the names were reserved by the generator
-mv "$TMPDIR/docs/resources-index.mdx" "$TMPDIR/docs/resources.mdx"
-mv "$TMPDIR/docs/data-sources-index.mdx" "$TMPDIR/docs/data-sources.mdx"
+mv "$TMPDIR/docs/resources-index.mdx" "$TMPDIR/docs/resources/resources.mdx"
+mv "$TMPDIR/docs/data-sources-index.mdx" "$TMPDIR/docs/data-sources/data-sources.mdx"
 
 info "Copying generated documentation into the teleport docs directory"
 
 # Removing the apex terraform.mdx
-rm -rf "$DOCSDIR" "$DOCSDIR.mdx"
+rm -rf "$DOCSDIR" "$DOCSDIR/terraform-provider.mdx"
 cp -r "$TMPDIR/docs" "$DOCSDIR"
 # unpacking the index to the apex terraform.mdx
-mv "$DOCSDIR/index.mdx" "$DOCSDIR.mdx"
+mv "$DOCSDIR/index.mdx" "$DOCSDIR/terraform-provider.mdx"
 
-info "TF documentation successfully generated"
\ No newline at end of file
+info "TF documentation successfully generated"
diff --git a/integrations/terraform/provider/provider.go b/integrations/terraform/provider/provider.go
index dfc0d9b9a14c3..1f1a923a60c91 100644
--- a/integrations/terraform/provider/provider.go
+++ b/integrations/terraform/provider/provider.go
@@ -247,13 +247,13 @@ func (p *Provider) GetSchema(_ context.Context) (tfsdk.Schema, diag.Diagnostics)
 				Type:        types.StringType,
 				Sensitive:   false,
 				Optional:    true,
-				Description: fmt.Sprintf("Enables the native Terraform MachineID support. When set, Terraform uses MachineID to securely join the Teleport cluster and obtain credentials. See [the join method reference](./join-methods.mdx) for possible values, you must use [a delegated join method](./join-methods.mdx#secret-vs-delegated). This can also be set with the environment variable `%s`.", constants.EnvVarTerraformJoinMethod),
+				Description: fmt.Sprintf("Enables the native Terraform MachineID support. When set, Terraform uses MachineID to securely join the Teleport cluster and obtain credentials. See [the join method reference](../join-methods.mdx) for possible values. You must use [a delegated join method](../join-methods.mdx#secret-vs-delegated). This can also be set with the environment variable `%s`.", constants.EnvVarTerraformJoinMethod),
 			},
 			attributeTerraformJoinToken: {
 				Type:        types.StringType,
 				Sensitive:   false,
 				Optional:    true,
-				Description: fmt.Sprintf("Name of the token used for the native MachineID joining. This value is not sensitive for [delegated join methods](./join-methods.mdx#secret-vs-delegated). This can also be set with the environment variable `%s`.", constants.EnvVarTerraformJoinToken),
+				Description: fmt.Sprintf("Name of the token used for the native MachineID joining. This value is not sensitive for [delegated join methods](../join-methods.mdx#secret-vs-delegated). This can also be set with the environment variable `%s`.", constants.EnvVarTerraformJoinToken),
 			},
 			attributeTerraformJoinAudienceTag: {
 				Type:        types.StringType,
diff --git a/integrations/terraform/templates/data-sources-index.mdx.tmpl b/integrations/terraform/templates/data-sources-index.mdx.tmpl
index c4c7b90af7525..9eac755076952 100644
--- a/integrations/terraform/templates/data-sources-index.mdx.tmpl
+++ b/integrations/terraform/templates/data-sources-index.mdx.tmpl
@@ -13,5 +13,5 @@ description: "Index of all the data-sources supported by the Teleport Terraform
 
 The Teleport Terraform provider supports the following data-sources:
 {{ range $key, $value := .DataSourceFiles }}
-  - [`{{$key}}`](./data-sources/{{$value}}.mdx)
+  - [`{{$key}}`](./{{$value}}.mdx)
 {{- end }}
diff --git a/integrations/terraform/templates/index.md.tmpl b/integrations/terraform/templates/index.md.tmpl
index 15bc1c7c81fa5..488665209f78a 100644
--- a/integrations/terraform/templates/index.md.tmpl
+++ b/integrations/terraform/templates/index.md.tmpl
@@ -14,10 +14,10 @@ It lists all the supported resources and their fields.
 
 <Admonition type="tip">
 To get started with the Terraform provider, you must start with [the installation
-guide](../admin-guides/infrastructure-as-code/terraform-provider/terraform-provider.mdx).
+guide](../../admin-guides/infrastructure-as-code/terraform-provider/terraform-provider.mdx).
 Once you got a working provider, we recommend you to follow the
 ["Managing users and roles with IaC"](
-../admin-guides/infrastructure-as-code/managing-resources/user-and-role.mdx) guide.
+../../admin-guides/infrastructure-as-code/managing-resources/user-and-role.mdx) guide.
 </Admonition>
 
 The provider exposes Teleport resources both as Terraform data-sources and Terraform resources.
@@ -27,8 +27,8 @@ to create resources in Teleport.
 {/* Note: the awkward `resource-index` file names are here because `data-sources`
 is reserved by the generator for the catch-all resource template */}
 
-- [list of supported resources](./terraform-provider/resources.mdx)
-- [list of supported data-sources](./terraform-provider/data-sources.mdx)
+- [list of supported resources](./resources/resources.mdx)
+- [list of supported data-sources](./data-sources/data-sources.mdx)
 
 ## Example Usage
 
@@ -81,7 +81,7 @@ provider "teleport" {
 This section lists the different ways of passing credentials to the Terraform provider.
 You can find which method fits your use case in
 the [Teleport Terraform provider setup
-page](../admin-guides/infrastructure-as-code/terraform-provider/terraform-provider.mdx)
+page](../../admin-guides/infrastructure-as-code/terraform-provider/terraform-provider.mdx)
 
 ### With an identity file
 
@@ -108,16 +108,16 @@ Detected security key tap
 ```
 
 You can find more information in
-the ["Run the Terraform provider locally" guide](../admin-guides/infrastructure-as-code/terraform-provider/local.mdx)
+the ["Run the Terraform provider locally" guide](../../admin-guides/infrastructure-as-code/terraform-provider/local.mdx)
 
 #### Obtaining an identity file via `tbot`
 
-`tbot` relies on [MachineID](../enroll-resources/machine-id/introduction.mdx) to obtain and automatically renew
+`tbot` relies on [MachineID](../../enroll-resources/machine-id/introduction.mdx) to obtain and automatically renew
 short-lived credentials. Such credentials are harder to exfiltrate, and you can control more precisely who has access to
 which roles (e.g. you can allow only GitHub Actions pipelines targeting the `prod` environment to get certificates).
 
 You can follow [the Terraform Provider
-guide](../admin-guides/infrastructure-as-code/terraform-provider/terraform-provider.mdx) to setup `tbot`
+guide](../../admin-guides/infrastructure-as-code/terraform-provider/terraform-provider.mdx) to setup `tbot`
 and have Terraform use its identity.
 
 #### Obtaining an identity file via `tctl auth sign`
@@ -132,7 +132,7 @@ This auth method has the following limitations:
 - Such credentials are high-privileged and long-lived. They must be protected and rotated.
 - This auth method does not work against Teleport clusters with MFA set to `webauthn`.
   On such clusters, Teleport will reject any long-lived certificate and require
-  [an additional MFA challenge for administrative actions](../admin-guides/access-controls/guides/mfa-for-admin-actions.mdx).
+  [an additional MFA challenge for administrative actions](../../admin-guides/access-controls/guides/mfa-for-admin-actions.mdx).
 
 ### With a token (native MachineID)
 
@@ -140,11 +140,11 @@ Starting with 16.2, the Teleport Terraform provider can natively use MachineID (
 cluster. The Terraform Provider will rely on its runtime (AWS, GCP, Kubernetes, CI/CD system) to prove its identity to
 Teleport.
 
-You can use any [delegated join method](./join-methods.mdx#delegated-join-methods) by setting
+You can use any [delegated join method](../join-methods.mdx#delegated-join-methods) by setting
 both `join_method` and `join_token` in the provider configuration.
 
 This setup is described in more details in
-the ["Run the Teleport Terraform provider in CI or Cloud" guide](../admin-guides/infrastructure-as-code/terraform-provider/ci-or-cloud.mdx).
+the ["Run the Teleport Terraform provider in CI or Cloud" guide](../../admin-guides/infrastructure-as-code/terraform-provider/ci-or-cloud.mdx).
 
 ### With key, certificate, and CA certificate
 
@@ -160,7 +160,7 @@ This auth method has the following limitations:
 - Such credentials are high-privileged and long-lived. They must be protected and rotated.
 - This auth method does not work against Teleport clusters with MFA set to `webauthn`.
   On such clusters, Teleport will reject any long-lived certificate and require
-  [an additional MFA challenge for administrative actions](../admin-guides/access-controls/guides/mfa-for-admin-actions.mdx).
+  [an additional MFA challenge for administrative actions](../../admin-guides/access-controls/guides/mfa-for-admin-actions.mdx).
 
 {{ .SchemaMarkdown | trimspace }}
 
diff --git a/integrations/terraform/templates/resources-index.mdx.tmpl b/integrations/terraform/templates/resources-index.mdx.tmpl
index 42f5821dfbca9..00167441cf03b 100644
--- a/integrations/terraform/templates/resources-index.mdx.tmpl
+++ b/integrations/terraform/templates/resources-index.mdx.tmpl
@@ -13,5 +13,5 @@ description: "Index of all the datasources supported by the Teleport Terraform P
 
 The Teleport Terraform provider supports the following resources:
 {{ range $key, $value := .ResourceFiles }}
-  - [`{{$key}}`](./resources/{{$value}}.mdx)
+  - [`{{$key}}`](./{{$value}}.mdx)
 {{- end }}

From 690a0bdd7f5878843cb16a21304adc5232be36c1 Mon Sep 17 00:00:00 2001
From: Zac Bergquist <zac.bergquist@goteleport.com>
Date: Tue, 29 Oct 2024 08:36:05 -0600
Subject: [PATCH 02/13] Fix racy X11 forwarding test (#48045)

{require,assert}.Eventually run the passed in func on a new goroutine
for each attempt, so it's not safe to write to a shared variable
without any synchronization.

Closes #47756
---
 integration/integration_test.go | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)

diff --git a/integration/integration_test.go b/integration/integration_test.go
index bad3f826bd7f8..f49dfb06f5e0c 100644
--- a/integration/integration_test.go
+++ b/integration/integration_test.go
@@ -4740,7 +4740,7 @@ func testX11Forwarding(t *testing.T, suite *integrationTestSuite) {
 
 						// Reading the display may fail if the session is not fully initialized
 						// and the write to stdin is swallowed.
-						var display string
+						display := make(chan string, 1)
 						require.EventuallyWithT(t, func(t *assert.CollectT) {
 							// enter 'printenv DISPLAY > /path/to/tmp/file' into the session (dumping the value of DISPLAY into the temp file)
 							_, err = keyboard.Write([]byte(fmt.Sprintf("printenv %v > %s\n\r", x11.DisplayEnv, tmpFile.Name())))
@@ -4749,7 +4749,7 @@ func testX11Forwarding(t *testing.T, suite *integrationTestSuite) {
 							assert.Eventually(t, func() bool {
 								output, err := os.ReadFile(tmpFile.Name())
 								if err == nil && len(output) != 0 {
-									display = strings.TrimSpace(string(output))
+									display <- strings.TrimSpace(string(output))
 									return true
 								}
 								return false
@@ -4757,7 +4757,7 @@ func testX11Forwarding(t *testing.T, suite *integrationTestSuite) {
 						}, 10*time.Second, time.Second)
 
 						// Make a new connection to the XServer proxy to confirm that forwarding is working.
-						serverDisplay, err := x11.ParseDisplay(display)
+						serverDisplay, err := x11.ParseDisplay(<-display)
 						require.NoError(t, err)
 
 						conn, err := serverDisplay.Dial()

From 074f80646e7111fbb9ebea780291319711dcf066 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Marek=20Smoli=C5=84ski?= <marek@goteleport.com>
Date: Tue, 29 Oct 2024 15:48:55 +0100
Subject: [PATCH 03/13] Update e ref (#48067)

---
 e | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/e b/e
index c8b2aed1f1c9d..b23222d7c1c5a 160000
--- a/e
+++ b/e
@@ -1 +1 @@
-Subproject commit c8b2aed1f1c9d059e8853163486214778dcb08b0
+Subproject commit b23222d7c1c5a747f41a95fb98d15e0073f7cd99

From 1807dfdf527e61d293ca05153fac153876c3baaf Mon Sep 17 00:00:00 2001
From: Hugo Shaka <hugo.hervieux@goteleport.com>
Date: Tue, 29 Oct 2024 11:11:29 -0400
Subject: [PATCH 04/13] Implement immediate schedule support for automatic
 updates (#47920)

* Implement immediate schedule support

* expose edition, fips, and ensure ping endpoint answers

* fix after rebase

* fix cache tests
---
 api/client/webclient/webclient.go    |  10 ++
 api/types/autoupdate/rollout_test.go |  12 +--
 api/types/autoupdate/utils.go        |   4 +-
 api/types/autoupdate/version_test.go |  12 +--
 lib/cache/cache_test.go              |   6 +-
 lib/web/apiserver.go                 | 139 +++++++++++++++++++--------
 lib/web/apiserver_ping_test.go       | 100 ++++++++++++++++---
 7 files changed, 209 insertions(+), 74 deletions(-)

diff --git a/api/client/webclient/webclient.go b/api/client/webclient/webclient.go
index 95ae0ea9747c3..f3b6ba5586768 100644
--- a/api/client/webclient/webclient.go
+++ b/api/client/webclient/webclient.go
@@ -305,6 +305,10 @@ type PingResponse struct {
 	// reserved: license_warnings ([]string)
 	// AutomaticUpgrades describes whether agents should automatically upgrade.
 	AutomaticUpgrades bool `json:"automatic_upgrades"`
+	// Edition represents the Teleport edition. Possible values are "oss", "ent", and "community".
+	Edition string `json:"edition"`
+	// FIPS represents if Teleport is using FIPS-compliant cryptography.
+	FIPS bool `json:"fips"`
 }
 
 // PingErrorResponse contains the error from /webapi/ping.
@@ -336,6 +340,12 @@ type AutoUpdateSettings struct {
 	ToolsVersion string `json:"tools_version"`
 	// ToolsMode defines mode client auto update feature `enabled|disabled`.
 	ToolsMode string `json:"tools_mode"`
+	// AgentVersion defines the version of teleport that agents enrolled into autoupdates should run.
+	AgentVersion string `json:"agent_version"`
+	// AgentAutoUpdate indicates if the requesting agent should attempt to update now.
+	AgentAutoUpdate bool `json:"agent_auto_update"`
+	// AgentUpdateJitterSeconds defines the jitter time an agent should wait before updating.
+	AgentUpdateJitterSeconds int `json:"agent_update_jitter_seconds"`
 }
 
 // KubeProxySettings is kubernetes proxy settings
diff --git a/api/types/autoupdate/rollout_test.go b/api/types/autoupdate/rollout_test.go
index cce4dc8495d83..66c1b705d1568 100644
--- a/api/types/autoupdate/rollout_test.go
+++ b/api/types/autoupdate/rollout_test.go
@@ -41,7 +41,7 @@ func TestNewAutoUpdateAgentRollout(t *testing.T) {
 			spec: &autoupdate.AutoUpdateAgentRolloutSpec{
 				StartVersion:   "1.2.3",
 				TargetVersion:  "2.3.4-dev",
-				Schedule:       AgentsScheduleRegular,
+				Schedule:       AgentsScheduleImmediate,
 				AutoupdateMode: AgentsUpdateModeEnabled,
 				Strategy:       AgentsStrategyHaltOnError,
 			},
@@ -57,7 +57,7 @@ func TestNewAutoUpdateAgentRollout(t *testing.T) {
 				Spec: &autoupdate.AutoUpdateAgentRolloutSpec{
 					StartVersion:   "1.2.3",
 					TargetVersion:  "2.3.4-dev",
-					Schedule:       AgentsScheduleRegular,
+					Schedule:       AgentsScheduleImmediate,
 					AutoupdateMode: AgentsUpdateModeEnabled,
 					Strategy:       AgentsStrategyHaltOnError,
 				},
@@ -74,7 +74,7 @@ func TestNewAutoUpdateAgentRollout(t *testing.T) {
 			name: "missing start version",
 			spec: &autoupdate.AutoUpdateAgentRolloutSpec{
 				TargetVersion:  "2.3.4-dev",
-				Schedule:       AgentsScheduleRegular,
+				Schedule:       AgentsScheduleImmediate,
 				AutoupdateMode: AgentsUpdateModeEnabled,
 				Strategy:       AgentsStrategyHaltOnError,
 			},
@@ -87,7 +87,7 @@ func TestNewAutoUpdateAgentRollout(t *testing.T) {
 			spec: &autoupdate.AutoUpdateAgentRolloutSpec{
 				StartVersion:   "1.2.3",
 				TargetVersion:  "2-3-4",
-				Schedule:       AgentsScheduleRegular,
+				Schedule:       AgentsScheduleImmediate,
 				AutoupdateMode: AgentsUpdateModeEnabled,
 				Strategy:       AgentsStrategyHaltOnError,
 			},
@@ -100,7 +100,7 @@ func TestNewAutoUpdateAgentRollout(t *testing.T) {
 			spec: &autoupdate.AutoUpdateAgentRolloutSpec{
 				StartVersion:   "1.2.3",
 				TargetVersion:  "2.3.4-dev",
-				Schedule:       AgentsScheduleRegular,
+				Schedule:       AgentsScheduleImmediate,
 				AutoupdateMode: "invalid-mode",
 				Strategy:       AgentsStrategyHaltOnError,
 			},
@@ -126,7 +126,7 @@ func TestNewAutoUpdateAgentRollout(t *testing.T) {
 			spec: &autoupdate.AutoUpdateAgentRolloutSpec{
 				StartVersion:   "1.2.3",
 				TargetVersion:  "2.3.4-dev",
-				Schedule:       AgentsScheduleRegular,
+				Schedule:       AgentsScheduleImmediate,
 				AutoupdateMode: AgentsUpdateModeEnabled,
 				Strategy:       "invalid-strategy",
 			},
diff --git a/api/types/autoupdate/utils.go b/api/types/autoupdate/utils.go
index 4772ff8a94411..30658c80d71ec 100644
--- a/api/types/autoupdate/utils.go
+++ b/api/types/autoupdate/utils.go
@@ -51,8 +51,10 @@ func checkToolsMode(mode string) error {
 
 func checkScheduleName(schedule string) error {
 	switch schedule {
-	case AgentsScheduleRegular, AgentsScheduleImmediate:
+	case AgentsScheduleImmediate:
 		return nil
+	case AgentsScheduleRegular:
+		return trace.BadParameter("regular schedule is not implemented yet")
 	default:
 		return trace.BadParameter("unsupported schedule type: %q", schedule)
 	}
diff --git a/api/types/autoupdate/version_test.go b/api/types/autoupdate/version_test.go
index a59a4f6fe6c22..793d7d6a2a145 100644
--- a/api/types/autoupdate/version_test.go
+++ b/api/types/autoupdate/version_test.go
@@ -94,7 +94,7 @@ func TestNewAutoUpdateVersion(t *testing.T) {
 				Agents: &autoupdate.AutoUpdateVersionSpecAgents{
 					StartVersion:  "1.2.3-dev.1",
 					TargetVersion: "1.2.3-dev.2",
-					Schedule:      AgentsScheduleRegular,
+					Schedule:      AgentsScheduleImmediate,
 					Mode:          AgentsUpdateModeEnabled,
 				},
 			},
@@ -111,7 +111,7 @@ func TestNewAutoUpdateVersion(t *testing.T) {
 					Agents: &autoupdate.AutoUpdateVersionSpecAgents{
 						StartVersion:  "1.2.3-dev.1",
 						TargetVersion: "1.2.3-dev.2",
-						Schedule:      AgentsScheduleRegular,
+						Schedule:      AgentsScheduleImmediate,
 						Mode:          AgentsUpdateModeEnabled,
 					},
 				},
@@ -124,7 +124,7 @@ func TestNewAutoUpdateVersion(t *testing.T) {
 					StartVersion:  "",
 					TargetVersion: "1.2.3",
 					Mode:          AgentsUpdateModeEnabled,
-					Schedule:      AgentsScheduleRegular,
+					Schedule:      AgentsScheduleImmediate,
 				},
 			},
 			assertErr: func(t *testing.T, err error, a ...any) {
@@ -138,7 +138,7 @@ func TestNewAutoUpdateVersion(t *testing.T) {
 					StartVersion:  "1.2.3-dev",
 					TargetVersion: "",
 					Mode:          AgentsUpdateModeEnabled,
-					Schedule:      AgentsScheduleRegular,
+					Schedule:      AgentsScheduleImmediate,
 				},
 			},
 			assertErr: func(t *testing.T, err error, a ...any) {
@@ -152,7 +152,7 @@ func TestNewAutoUpdateVersion(t *testing.T) {
 					StartVersion:  "17-0-0",
 					TargetVersion: "1.2.3",
 					Mode:          AgentsUpdateModeEnabled,
-					Schedule:      AgentsScheduleRegular,
+					Schedule:      AgentsScheduleImmediate,
 				},
 			},
 			assertErr: func(t *testing.T, err error, a ...any) {
@@ -166,7 +166,7 @@ func TestNewAutoUpdateVersion(t *testing.T) {
 					StartVersion:  "1.2.3",
 					TargetVersion: "17-0-0",
 					Mode:          AgentsUpdateModeEnabled,
-					Schedule:      AgentsScheduleRegular,
+					Schedule:      AgentsScheduleImmediate,
 				},
 			},
 			assertErr: func(t *testing.T, err error, a ...any) {
diff --git a/lib/cache/cache_test.go b/lib/cache/cache_test.go
index 9c11b4f3a1145..f74ca9eeef151 100644
--- a/lib/cache/cache_test.go
+++ b/lib/cache/cache_test.go
@@ -4083,9 +4083,9 @@ func newAutoUpdateAgentRollout(t *testing.T) *autoupdate.AutoUpdateAgentRollout
 	r, err := update.NewAutoUpdateAgentRollout(&autoupdate.AutoUpdateAgentRolloutSpec{
 		StartVersion:   "1.2.3",
 		TargetVersion:  "2.3.4",
-		Schedule:       "regular",
-		AutoupdateMode: "enabled",
-		Strategy:       "time-based",
+		Schedule:       update.AgentsScheduleImmediate,
+		AutoupdateMode: update.AgentsUpdateModeEnabled,
+		Strategy:       update.AgentsStrategyTimeBased,
 	})
 	require.NoError(t, err)
 	return r
diff --git a/lib/web/apiserver.go b/lib/web/apiserver.go
index de4d085377c3c..2bbb2bcbdd561 100644
--- a/lib/web/apiserver.go
+++ b/lib/web/apiserver.go
@@ -65,6 +65,7 @@ import (
 	"github.com/gravitational/teleport/api/client/webclient"
 	"github.com/gravitational/teleport/api/constants"
 	apidefaults "github.com/gravitational/teleport/api/defaults"
+	autoupdatepb "github.com/gravitational/teleport/api/gen/proto/go/teleport/autoupdate/v1"
 	mfav1 "github.com/gravitational/teleport/api/gen/proto/go/teleport/mfa/v1"
 	notificationsv1 "github.com/gravitational/teleport/api/gen/proto/go/teleport/notifications/v1"
 	"github.com/gravitational/teleport/api/mfa"
@@ -135,6 +136,8 @@ const (
 	// This cache is here to protect against accidental or intentional DDoS, the TTL must be low to quickly reflect
 	// cluster configuration changes.
 	findEndpointCacheTTL = 10 * time.Second
+	// DefaultAgentUpdateJitterSeconds is the default jitter agents should wait before updating.
+	DefaultAgentUpdateJitterSeconds = 60
 )
 
 // healthCheckAppServerFunc defines a function used to perform a health check
@@ -1539,69 +1542,65 @@ func (h *Handler) ping(w http.ResponseWriter, r *http.Request, p httprouter.Para
 		MinClientVersion:  teleport.MinClientVersion,
 		ClusterName:       h.auth.clusterName,
 		AutomaticUpgrades: pr.ServerFeatures.GetAutomaticUpgrades(),
+		AutoUpdate:        h.automaticUpdateSettings(r.Context()),
+		Edition:           modules.GetModules().BuildType(),
+		FIPS:              modules.IsBoringBinary(),
 	}, nil
 }
 
 func (h *Handler) find(w http.ResponseWriter, r *http.Request, p httprouter.Params) (interface{}, error) {
 	// cache the generic answer to avoid doing work for each request
 	resp, err := utils.FnCacheGet[*webclient.PingResponse](r.Context(), h.findEndpointCache, "find", func(ctx context.Context) (*webclient.PingResponse, error) {
-		response := webclient.PingResponse{
-			ServerVersion:    teleport.Version,
-			MinClientVersion: teleport.MinClientVersion,
-			ClusterName:      h.auth.clusterName,
-		}
-
-		proxyConfig, err := h.cfg.ProxySettings.GetProxySettings(r.Context())
+		proxyConfig, err := h.cfg.ProxySettings.GetProxySettings(ctx)
 		if err != nil {
 			return nil, trace.Wrap(err)
 		}
-		response.Proxy = *proxyConfig
 
-		authPref, err := h.cfg.AccessPoint.GetAuthPreference(r.Context())
+		authPref, err := h.cfg.AccessPoint.GetAuthPreference(ctx)
 		if err != nil {
 			return nil, trace.Wrap(err)
 		}
-		response.Auth = webclient.AuthenticationSettings{SignatureAlgorithmSuite: authPref.GetSignatureAlgorithmSuite()}
-
-		autoUpdateConfig, err := h.cfg.AccessPoint.GetAutoUpdateConfig(r.Context())
-		// TODO(vapopov) DELETE IN v18.0.0 check of IsNotImplemented, must be backported to all latest supported versions.
-		if err != nil && !trace.IsNotFound(err) && !trace.IsNotImplemented(err) {
-			h.logger.ErrorContext(r.Context(), "failed to receive AutoUpdateConfig", "error", err)
-		}
-		// If we can't get the AU config or tools AU are not configured, we default to "disabled".
-		// This ensures we fail open and don't accidentally update agents if something is going wrong.
-		// If we want to enable AUs by default, it would be better to create a default "autoupdate_config" resource
-		// than changing this logic.
-		if autoUpdateConfig.GetSpec().GetTools() == nil {
-			response.AutoUpdate.ToolsMode = autoupdate.ToolsUpdateModeDisabled
-		} else {
-			response.AutoUpdate.ToolsMode = autoUpdateConfig.GetSpec().GetTools().GetMode()
-		}
-
-		autoUpdateVersion, err := h.cfg.AccessPoint.GetAutoUpdateVersion(r.Context())
-		// TODO(vapopov) DELETE IN v18.0.0 check of IsNotImplemented, must be backported to all latest supported versions.
-		if err != nil && !trace.IsNotFound(err) && !trace.IsNotImplemented(err) {
-			h.logger.ErrorContext(r.Context(), "failed to receive AutoUpdateVersion", "error", err)
-		}
-		// If we can't get the AU version or tools AU version is not specified, we default to the current proxy version.
-		// This ensures we always advertise a version compatible with the cluster.
-		if autoUpdateVersion.GetSpec().GetTools() == nil {
-			response.AutoUpdate.ToolsVersion = api.Version
-		} else {
-			response.AutoUpdate.ToolsVersion = autoUpdateVersion.GetSpec().GetTools().GetTargetVersion()
-		}
 
-		return &response, nil
+		return &webclient.PingResponse{
+			Proxy:            *proxyConfig,
+			Auth:             webclient.AuthenticationSettings{SignatureAlgorithmSuite: authPref.GetSignatureAlgorithmSuite()},
+			ServerVersion:    teleport.Version,
+			MinClientVersion: teleport.MinClientVersion,
+			ClusterName:      h.auth.clusterName,
+			Edition:          modules.GetModules().BuildType(),
+			FIPS:             modules.IsBoringBinary(),
+			AutoUpdate:       h.automaticUpdateSettings(ctx),
+		}, nil
 	})
 	if err != nil {
 		return nil, trace.Wrap(err)
 	}
-
-	// If you need to modulate the response based on the request params (will need to do this for automatic updates)
-	// Do it here.
 	return resp, nil
 }
 
+// TODO: add the request as a parameter when we'll need to modulate the content based on the UUID and group
+func (h *Handler) automaticUpdateSettings(ctx context.Context) webclient.AutoUpdateSettings {
+	autoUpdateConfig, err := h.cfg.AccessPoint.GetAutoUpdateConfig(ctx)
+	// TODO(vapopov) DELETE IN v18.0.0 check of IsNotImplemented, must be backported to all latest supported versions.
+	if err != nil && !trace.IsNotFound(err) && !trace.IsNotImplemented(err) {
+		h.logger.ErrorContext(ctx, "failed to receive AutoUpdateConfig", "error", err)
+	}
+
+	autoUpdateVersion, err := h.cfg.AccessPoint.GetAutoUpdateVersion(ctx)
+	// TODO(vapopov) DELETE IN v18.0.0 check of IsNotImplemented, must be backported to all latest supported versions.
+	if err != nil && !trace.IsNotFound(err) && !trace.IsNotImplemented(err) {
+		h.logger.ErrorContext(ctx, "failed to receive AutoUpdateVersion", "error", err)
+	}
+
+	return webclient.AutoUpdateSettings{
+		ToolsMode:                getToolsMode(autoUpdateConfig),
+		ToolsVersion:             getToolsVersion(autoUpdateVersion),
+		AgentUpdateJitterSeconds: DefaultAgentUpdateJitterSeconds,
+		AgentVersion:             getAgentVersion(autoUpdateVersion),
+		AgentAutoUpdate:          agentShouldUpdate(autoUpdateConfig, autoUpdateVersion),
+	}
+}
+
 func (h *Handler) pingWithConnector(w http.ResponseWriter, r *http.Request, p httprouter.Params) (interface{}, error) {
 	authClient := h.cfg.ProxyClient
 	connectorName := p.ByName("connector")
@@ -5154,3 +5153,59 @@ func readEtagFromAppHash(fs http.FileSystem) (string, error) {
 
 	return etag, nil
 }
+
+func getToolsMode(config *autoupdatepb.AutoUpdateConfig) string {
+	// If we can't get the AU config or if AUs are not configured, we default to "disabled".
+	// This ensures we fail open and don't accidentally update agents if something is going wrong.
+	// If we want to enable AUs by default, it would be better to create a default "autoupdate_config" resource
+	// than changing this logic.
+	if config.GetSpec().GetTools() == nil {
+		return autoupdate.ToolsUpdateModeDisabled
+	}
+	return config.GetSpec().GetTools().GetMode()
+}
+
+func getToolsVersion(version *autoupdatepb.AutoUpdateVersion) string {
+	// If we can't get the AU version or tools AU version is not specified, we default to the current proxy version.
+	// This ensures we always advertise a version compatible with the cluster.
+	if version.GetSpec().GetTools() == nil {
+		return api.Version
+	}
+	return version.GetSpec().GetTools().GetTargetVersion()
+}
+
+func getAgentVersion(version *autoupdatepb.AutoUpdateVersion) string {
+	// If we can't get the AU version or tools AU version is not specified, we default to the current proxy version.
+	// This ensures we always advertise a version compatible with the cluster.
+	// TODO: read the version from the autoupdate_agent_rollout when the resource is implemented
+	if version.GetSpec().GetAgents() == nil {
+		return api.Version
+	}
+
+	return version.GetSpec().GetAgents().GetTargetVersion()
+}
+
+func agentShouldUpdate(config *autoupdatepb.AutoUpdateConfig, version *autoupdatepb.AutoUpdateVersion) bool {
+	// TODO: read the data from the autoupdate_agent_rollout when the resource is implemented
+
+	// If we can't get the AU config or if AUs are not configured, we default to "disabled".
+	// This ensures we fail open and don't accidentally update agents if something is going wrong.
+	// If we want to enable AUs by default, it would be better to create a default "autoupdate_config" resource
+	// than changing this logic.
+	if config.GetSpec().GetAgents() == nil {
+		return false
+	}
+	if version.GetSpec().GetAgents() == nil {
+		return false
+	}
+	configMode := config.GetSpec().GetAgents().GetMode()
+	versionMode := version.GetSpec().GetAgents().GetMode()
+
+	// We update only if both version and config agent modes are "enabled"
+	if configMode != autoupdate.AgentsUpdateModeEnabled || versionMode != autoupdate.AgentsUpdateModeEnabled {
+		return false
+	}
+
+	scheduleName := version.GetSpec().GetAgents().GetSchedule()
+	return scheduleName == autoupdate.AgentsScheduleImmediate
+}
diff --git a/lib/web/apiserver_ping_test.go b/lib/web/apiserver_ping_test.go
index 231c8625ffacd..5ce3720375c46 100644
--- a/lib/web/apiserver_ping_test.go
+++ b/lib/web/apiserver_ping_test.go
@@ -305,48 +305,110 @@ func TestPing_autoUpdateResources(t *testing.T) {
 		{
 			name: "resources not defined",
 			expected: webclient.AutoUpdateSettings{
-				ToolsVersion: api.Version,
-				ToolsMode:    autoupdate.ToolsUpdateModeDisabled,
+				ToolsVersion:             api.Version,
+				ToolsMode:                autoupdate.ToolsUpdateModeDisabled,
+				AgentUpdateJitterSeconds: DefaultAgentUpdateJitterSeconds,
+				AgentAutoUpdate:          false,
+				AgentVersion:             api.Version,
 			},
 		},
 		{
-			name: "enable auto update",
+			name: "enable tools auto update",
 			config: &autoupdatev1pb.AutoUpdateConfigSpec{
 				Tools: &autoupdatev1pb.AutoUpdateConfigSpecTools{
 					Mode: autoupdate.ToolsUpdateModeEnabled,
 				},
 			},
 			expected: webclient.AutoUpdateSettings{
-				ToolsMode:    autoupdate.ToolsUpdateModeEnabled,
-				ToolsVersion: api.Version,
+				ToolsMode:                autoupdate.ToolsUpdateModeEnabled,
+				ToolsVersion:             api.Version,
+				AgentUpdateJitterSeconds: DefaultAgentUpdateJitterSeconds,
+				AgentAutoUpdate:          false,
+				AgentVersion:             api.Version,
 			},
 			cleanup: true,
 		},
 		{
-			name:    "no autoupdate tool config nor version",
+			name: "enable agent auto update, immediate schedule",
+			config: &autoupdatev1pb.AutoUpdateConfigSpec{
+				Agents: &autoupdatev1pb.AutoUpdateConfigSpecAgents{
+					Mode:     autoupdate.AgentsUpdateModeEnabled,
+					Strategy: autoupdate.AgentsStrategyHaltOnError,
+				},
+			},
+			version: &autoupdatev1pb.AutoUpdateVersionSpec{
+				Agents: &autoupdatev1pb.AutoUpdateVersionSpecAgents{
+					Mode:          autoupdate.AgentsUpdateModeEnabled,
+					StartVersion:  "1.2.3",
+					TargetVersion: "1.2.4",
+					Schedule:      autoupdate.AgentsScheduleImmediate,
+				},
+			},
+			expected: webclient.AutoUpdateSettings{
+				ToolsVersion:             api.Version,
+				ToolsMode:                autoupdate.ToolsUpdateModeDisabled,
+				AgentUpdateJitterSeconds: DefaultAgentUpdateJitterSeconds,
+				AgentAutoUpdate:          true,
+				AgentVersion:             "1.2.4",
+			},
+			cleanup: true,
+		},
+		{
+			name: "version enable agent auto update, but config disables them",
+			config: &autoupdatev1pb.AutoUpdateConfigSpec{
+				Agents: &autoupdatev1pb.AutoUpdateConfigSpecAgents{
+					Mode:     autoupdate.AgentsUpdateModeDisabled,
+					Strategy: autoupdate.AgentsStrategyHaltOnError,
+				},
+			},
+			version: &autoupdatev1pb.AutoUpdateVersionSpec{
+				Agents: &autoupdatev1pb.AutoUpdateVersionSpecAgents{
+					Mode:          autoupdate.AgentsUpdateModeEnabled,
+					StartVersion:  "1.2.3",
+					TargetVersion: "1.2.4",
+					Schedule:      autoupdate.AgentsScheduleImmediate,
+				},
+			},
+			expected: webclient.AutoUpdateSettings{
+				ToolsVersion:             api.Version,
+				ToolsMode:                autoupdate.ToolsUpdateModeDisabled,
+				AgentUpdateJitterSeconds: DefaultAgentUpdateJitterSeconds,
+				AgentAutoUpdate:          false,
+				AgentVersion:             "1.2.4",
+			},
+			cleanup: true,
+		},
+		{
+			name:    "empty config and version",
 			config:  &autoupdatev1pb.AutoUpdateConfigSpec{},
 			version: &autoupdatev1pb.AutoUpdateVersionSpec{},
 			expected: webclient.AutoUpdateSettings{
-				ToolsVersion: api.Version,
-				ToolsMode:    autoupdate.ToolsUpdateModeDisabled,
+				ToolsVersion:             api.Version,
+				ToolsMode:                autoupdate.ToolsUpdateModeDisabled,
+				AgentUpdateJitterSeconds: DefaultAgentUpdateJitterSeconds,
+				AgentAutoUpdate:          false,
+				AgentVersion:             api.Version,
 			},
 			cleanup: true,
 		},
 		{
-			name: "set auto update version",
+			name: "set tools auto update version",
 			version: &autoupdatev1pb.AutoUpdateVersionSpec{
 				Tools: &autoupdatev1pb.AutoUpdateVersionSpecTools{
 					TargetVersion: "1.2.3",
 				},
 			},
 			expected: webclient.AutoUpdateSettings{
-				ToolsVersion: "1.2.3",
-				ToolsMode:    autoupdate.ToolsUpdateModeDisabled,
+				ToolsVersion:             "1.2.3",
+				ToolsMode:                autoupdate.ToolsUpdateModeDisabled,
+				AgentUpdateJitterSeconds: DefaultAgentUpdateJitterSeconds,
+				AgentAutoUpdate:          false,
+				AgentVersion:             api.Version,
 			},
 			cleanup: true,
 		},
 		{
-			name: "enable auto update and set version",
+			name: "enable tools auto update and set version",
 			config: &autoupdatev1pb.AutoUpdateConfigSpec{
 				Tools: &autoupdatev1pb.AutoUpdateConfigSpecTools{
 					Mode: autoupdate.ToolsUpdateModeEnabled,
@@ -358,8 +420,11 @@ func TestPing_autoUpdateResources(t *testing.T) {
 				},
 			},
 			expected: webclient.AutoUpdateSettings{
-				ToolsMode:    autoupdate.ToolsUpdateModeEnabled,
-				ToolsVersion: "1.2.3",
+				ToolsMode:                autoupdate.ToolsUpdateModeEnabled,
+				ToolsVersion:             "1.2.3",
+				AgentUpdateJitterSeconds: DefaultAgentUpdateJitterSeconds,
+				AgentAutoUpdate:          false,
+				AgentVersion:             api.Version,
 			},
 		},
 		{
@@ -375,8 +440,11 @@ func TestPing_autoUpdateResources(t *testing.T) {
 				},
 			},
 			expected: webclient.AutoUpdateSettings{
-				ToolsMode:    autoupdate.ToolsUpdateModeDisabled,
-				ToolsVersion: "3.2.1",
+				ToolsMode:                autoupdate.ToolsUpdateModeDisabled,
+				ToolsVersion:             "3.2.1",
+				AgentUpdateJitterSeconds: DefaultAgentUpdateJitterSeconds,
+				AgentAutoUpdate:          false,
+				AgentVersion:             api.Version,
 			},
 		},
 	}

From 9d4b20ca5d74ef24759e387aeb3ed151c9801a4b Mon Sep 17 00:00:00 2001
From: Tiago Silva <tiago.silva@goteleport.com>
Date: Tue, 29 Oct 2024 15:12:01 +0000
Subject: [PATCH 05/13] [kube] add server_id to targets when monitoring
 exec/portforward connections (#47829)

This PR adds the target server_id (kubernetes service) when proxy establishes a connection to support kubectl exec and portforward. This allows proxies to terminate early the connection without relying on the upstream to terminate it.
---
 lib/kube/proxy/forwarder.go | 22 +++++++++++++++++-----
 lib/kube/proxy/roundtrip.go |  1 -
 lib/kube/proxy/transport.go | 23 ++++++++++++++++++++---
 3 files changed, 37 insertions(+), 9 deletions(-)

diff --git a/lib/kube/proxy/forwarder.go b/lib/kube/proxy/forwarder.go
index b66665d16b927..729ab913139ff 100644
--- a/lib/kube/proxy/forwarder.go
+++ b/lib/kube/proxy/forwarder.go
@@ -2306,7 +2306,7 @@ func (s *clusterSession) close() {
 	}
 }
 
-func (s *clusterSession) monitorConn(conn net.Conn, err error) (net.Conn, error) {
+func (s *clusterSession) monitorConn(conn net.Conn, err error, hostID string) (net.Conn, error) {
 	if err != nil {
 		return nil, trace.Wrap(err)
 	}
@@ -2321,10 +2321,18 @@ func (s *clusterSession) monitorConn(conn net.Conn, err error) (net.Conn, error)
 		s.connMonitorCancel(err)
 		return nil, trace.Wrap(err)
 	}
-
+	lockTargets := s.LockTargets()
+	// when the target is not a kubernetes_service instance, we don't need to lock it.
+	// the target could be a remote cluster or a local Kubernetes API server. In both cases,
+	// hostID is empty.
+	if hostID != "" {
+		lockTargets = append(lockTargets, types.LockTarget{
+			ServerID: hostID,
+		})
+	}
 	err = srv.StartMonitor(srv.MonitorConfig{
 		LockWatcher:           s.parent.cfg.LockWatcher,
-		LockTargets:           s.LockTargets(),
+		LockTargets:           lockTargets,
 		DisconnectExpiredCert: s.disconnectExpiredCert,
 		ClientIdleTimeout:     s.clientIdleTimeout,
 		Clock:                 s.parent.cfg.Clock,
@@ -2356,12 +2364,16 @@ func (s *clusterSession) getServerMetadata() apievents.ServerMetadata {
 }
 
 func (s *clusterSession) Dial(network, addr string) (net.Conn, error) {
-	return s.monitorConn(s.dial(s.requestContext, network, addr))
+	var hostID string
+	conn, err := s.dial(s.requestContext, network, addr, withHostIDCollection(&hostID))
+	return s.monitorConn(conn, err, hostID)
 }
 
 func (s *clusterSession) DialWithContext(opts ...contextDialerOption) func(ctx context.Context, network, addr string) (net.Conn, error) {
 	return func(ctx context.Context, network, addr string) (net.Conn, error) {
-		return s.monitorConn(s.dial(ctx, network, addr, opts...))
+		var hostID string
+		conn, err := s.dial(ctx, network, addr, append(opts, withHostIDCollection(&hostID))...)
+		return s.monitorConn(conn, err, hostID)
 	}
 }
 
diff --git a/lib/kube/proxy/roundtrip.go b/lib/kube/proxy/roundtrip.go
index b6935c3ce6cfc..3630f3e898dd7 100644
--- a/lib/kube/proxy/roundtrip.go
+++ b/lib/kube/proxy/roundtrip.go
@@ -113,7 +113,6 @@ func (s *SpdyRoundTripper) Dial(req *http.Request) (net.Conn, error) {
 	if err != nil {
 		return nil, err
 	}
-
 	if err := req.Write(conn); err != nil {
 		conn.Close()
 		return nil, err
diff --git a/lib/kube/proxy/transport.go b/lib/kube/proxy/transport.go
index 7ee2eabad282b..1e8e0067ed9e4 100644
--- a/lib/kube/proxy/transport.go
+++ b/lib/kube/proxy/transport.go
@@ -347,6 +347,7 @@ func (f *Forwarder) localClusterDialer(kubeClusterName string, opts ...contextDi
 				ProxyIDs: s.GetProxyIDs(),
 			})
 			if err == nil {
+				opt.collect(s.GetHostID())
 				return conn, nil
 			}
 			errs = append(errs, trace.Wrap(err))
@@ -423,13 +424,21 @@ func (f *Forwarder) getContextDialerFunc(s *clusterSession, opts ...contextDiale
 // contextDialerOptions is a set of options that can be used to filter
 // the hosts that the dialer connects to.
 type contextDialerOptions struct {
-	hostID string
+	hostIDFilter  string
+	collectHostID *string
 }
 
 // matches returns true if the host matches the hostID of the dialer options or
 // if the dialer hostID is empty.
 func (c *contextDialerOptions) matches(hostID string) bool {
-	return c.hostID == "" || c.hostID == hostID
+	return c.hostIDFilter == "" || c.hostIDFilter == hostID
+}
+
+// collect sets the hostID that the dialer connected to if collectHostID is not nil.
+func (c *contextDialerOptions) collect(hostID string) {
+	if c.collectHostID != nil {
+		*c.collectHostID = hostID
+	}
 }
 
 // contextDialerOption is a functional option for the contextDialerOptions.
@@ -442,6 +451,14 @@ type contextDialerOption func(*contextDialerOptions)
 // error.
 func withTargetHostID(hostID string) contextDialerOption {
 	return func(o *contextDialerOptions) {
-		o.hostID = hostID
+		o.hostIDFilter = hostID
+	}
+}
+
+// withHostIDCollection is a functional option that sets the hostID of the dialer
+// to the provided pointer.
+func withHostIDCollection(hostID *string) contextDialerOption {
+	return func(o *contextDialerOptions) {
+		o.collectHostID = hostID
 	}
 }

From 425846af79c144c90112a5f416086b46357f7719 Mon Sep 17 00:00:00 2001
From: Gus Luxton <gus@goteleport.com>
Date: Tue, 29 Oct 2024 13:50:11 -0300
Subject: [PATCH 06/13] ha-autoscale-cluster: Source unit file environment
 variables from /etc/default/teleport (#48040)

---
 assets/aws/files/system/teleport-proxy-acm.service           | 2 --
 assets/aws/files/system/teleport-proxy.service               | 2 --
 .../aws/terraform/ha-autoscale-cluster/proxy-user-data.tpl   | 5 +++++
 3 files changed, 5 insertions(+), 4 deletions(-)

diff --git a/assets/aws/files/system/teleport-proxy-acm.service b/assets/aws/files/system/teleport-proxy-acm.service
index 757c9219e4b0d..ca5e913a081a6 100644
--- a/assets/aws/files/system/teleport-proxy-acm.service
+++ b/assets/aws/files/system/teleport-proxy-acm.service
@@ -11,8 +11,6 @@ Restart=always
 RestartSec=5
 RuntimeDirectory=teleport
 EnvironmentFile=-/etc/default/teleport
-# TODO(gus): REMOVE IN 17.0.0 - /etc/default/teleport should be used instead
-EnvironmentFile=/etc/teleport.d/conf
 ExecStartPre=/usr/local/bin/teleport-ssm-get-token
 ExecStart=/usr/local/bin/teleport start --config=/etc/teleport.yaml --diag-addr=127.0.0.1:3000 --pid-file=/run/teleport/teleport.pid
 # systemd before 239 needs an absolute path
diff --git a/assets/aws/files/system/teleport-proxy.service b/assets/aws/files/system/teleport-proxy.service
index 8fe4ba5985ce6..3c31de24b1178 100644
--- a/assets/aws/files/system/teleport-proxy.service
+++ b/assets/aws/files/system/teleport-proxy.service
@@ -11,8 +11,6 @@ Restart=always
 RestartSec=5
 RuntimeDirectory=teleport
 EnvironmentFile=-/etc/default/teleport
-# TODO(gus): REMOVE IN 17.0.0 - /etc/default/teleport should be used instead
-EnvironmentFile=/etc/teleport.d/conf
 ExecStartPre=/usr/local/bin/teleport-ssm-get-token
 ExecStartPre=/bin/aws s3 sync s3://${TELEPORT_S3_BUCKET}/live/${TELEPORT_DOMAIN_NAME} /var/lib/teleport
 ExecStart=/usr/local/bin/teleport start --config=/etc/teleport.yaml --diag-addr=127.0.0.1:3000 --pid-file=/run/teleport/teleport.pid
diff --git a/examples/aws/terraform/ha-autoscale-cluster/proxy-user-data.tpl b/examples/aws/terraform/ha-autoscale-cluster/proxy-user-data.tpl
index 3b348c8c4a237..97071d35b014d 100644
--- a/examples/aws/terraform/ha-autoscale-cluster/proxy-user-data.tpl
+++ b/examples/aws/terraform/ha-autoscale-cluster/proxy-user-data.tpl
@@ -14,3 +14,8 @@ TELEPORT_ENABLE_POSTGRES=${enable_postgres_listener}
 USE_ACM=${use_acm}
 USE_TLS_ROUTING=${use_tls_routing}
 EOF
+cat >>/etc/default/teleport <<EOF
+EC2_REGION=${region}
+TELEPORT_DOMAIN_NAME=${domain_name}
+TELEPORT_S3_BUCKET=${s3_bucket}
+EOF

From 24e48d61c85a967a63dcc1597e0ba402969cb7ce Mon Sep 17 00:00:00 2001
From: Gus Rivera <gus.rivera@goteleport.com>
Date: Tue, 29 Oct 2024 12:24:55 -0500
Subject: [PATCH 07/13] Cleaning up release-notes tool and using
 shared-workflows (#48084)

* Cleaning up release-notes tool and using shared-workflows

* Updating docs
---
 Makefile                                      |  10 +-
 .../tooling/cmd/release-notes/README.md       |  29 -----
 .../tooling/cmd/release-notes/main.go         |  54 --------
 .../cmd/release-notes/release_notes.go        | 116 ------------------
 .../cmd/release-notes/release_notes_test.go   |  91 --------------
 .../template/release-notes.md.tmpl            |  26 ----
 .../testdata/expected-release-notes.md        |  26 ----
 .../testdata/expected-with-labels.md          |  30 -----
 .../release-notes/testdata/test-changelog.md  |  23 ----
 9 files changed, 4 insertions(+), 401 deletions(-)
 delete mode 100644 build.assets/tooling/cmd/release-notes/README.md
 delete mode 100644 build.assets/tooling/cmd/release-notes/main.go
 delete mode 100644 build.assets/tooling/cmd/release-notes/release_notes.go
 delete mode 100644 build.assets/tooling/cmd/release-notes/release_notes_test.go
 delete mode 100644 build.assets/tooling/cmd/release-notes/template/release-notes.md.tmpl
 delete mode 100644 build.assets/tooling/cmd/release-notes/testdata/expected-release-notes.md
 delete mode 100644 build.assets/tooling/cmd/release-notes/testdata/expected-with-labels.md
 delete mode 100644 build.assets/tooling/cmd/release-notes/testdata/test-changelog.md

diff --git a/Makefile b/Makefile
index a6cf23c23e277..82e210bbad320 100644
--- a/Makefile
+++ b/Makefile
@@ -820,10 +820,6 @@ RERUN := $(TOOLINGDIR)/bin/rerun
 $(RERUN): $(wildcard $(TOOLINGDIR)/cmd/rerun/*.go)
 	cd $(TOOLINGDIR) && go build -o "$@" ./cmd/rerun
 
-RELEASE_NOTES_GEN := $(TOOLINGDIR)/bin/release-notes
-$(RELEASE_NOTES_GEN): $(wildcard $(TOOLINGDIR)/cmd/release-notes/*.go)
-	cd $(TOOLINGDIR) && go build -o "$@" ./cmd/release-notes
-
 .PHONY: tooling
 tooling: ensure-gotestsum $(DIFF_TEST)
 
@@ -1822,11 +1818,13 @@ changelog:
 # does not match version set it will fail to create a release. If tag doesn't exist it
 # will also fail to create a release.
 #
-# For more information on release notes generation see ./build.assets/tooling/cmd/release-notes
+# For more information on release notes generation see: 
+#   https://github.com/gravitational/shared-workflows/tree/gus/release-notes/tools/release-notes#readme
+RELEASE_NOTES_GEN = github.com/gravitational/shared-workflows/tools/release-notes@latest
 .PHONY: create-github-release
 create-github-release: LATEST = false
 create-github-release: GITHUB_RELEASE_LABELS = ""
-create-github-release: $(RELEASE_NOTES_GEN)
+create-github-release:
 	@NOTES=$$($(RELEASE_NOTES_GEN) --labels=$(GITHUB_RELEASE_LABELS) $(VERSION) CHANGELOG.md) && gh release create v$(VERSION) \
 	-t "Teleport $(VERSION)" \
 	--latest=$(LATEST) \
diff --git a/build.assets/tooling/cmd/release-notes/README.md b/build.assets/tooling/cmd/release-notes/README.md
deleted file mode 100644
index 1a8c8e41f09f4..0000000000000
--- a/build.assets/tooling/cmd/release-notes/README.md
+++ /dev/null
@@ -1,29 +0,0 @@
-# release-notes
-
-A release notes generator for Teleport releases.
-
-## Usage
-
-```shell
-usage: release-notes <version> <changelog>
-
-
-Flags:
-  --[no-]help  Show context-sensitive help (also try --help-long and --help-man).
-
-Args:
-  <version>    Version to be released
-  <changelog>  Path to CHANGELOG.md
-```
-
-This script is expected to be run along side the `gh` CLI to create a release.
-
-```shell
-release-notes $VERSION CHANGELOG.md | gh release create \
-    -t "Teleport $VERSION" \
-    --latest=false \
-    --target=$BRANCH \
-    --verify-tag \
-    -F - \
-
-```
\ No newline at end of file
diff --git a/build.assets/tooling/cmd/release-notes/main.go b/build.assets/tooling/cmd/release-notes/main.go
deleted file mode 100644
index 8ec06e4c43c93..0000000000000
--- a/build.assets/tooling/cmd/release-notes/main.go
+++ /dev/null
@@ -1,54 +0,0 @@
-/*
- * Teleport
- * Copyright (C) 2024  Gravitational, Inc.
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU Affero General Public License as published by
- * the Free Software Foundation, either version 3 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU Affero General Public License for more details.
- *
- * You should have received a copy of the GNU Affero General Public License
- * along with this program.  If not, see <http://www.gnu.org/licenses/>.
- */
-
-package main
-
-import (
-	"fmt"
-	"log"
-	"os"
-
-	"github.com/alecthomas/kingpin/v2"
-)
-
-var (
-	version   = kingpin.Arg("version", "Version to be released").Required().String()
-	changelog = kingpin.Arg("changelog", "Path to CHANGELOG.md").Required().String()
-	labels    = kingpin.Flag("labels", "Labels to apply to the end of a release, e.g. security labels").String()
-)
-
-func main() {
-	kingpin.Parse()
-
-	clFile, err := os.Open(*changelog)
-	if err != nil {
-		log.Fatal(err)
-	}
-	defer clFile.Close()
-
-	gen := &releaseNotesGenerator{
-		releaseVersion: *version,
-		labels:         *labels,
-	}
-
-	notes, err := gen.generateReleaseNotes(clFile)
-	if err != nil {
-		log.Fatal(err)
-	}
-	fmt.Println(notes)
-}
diff --git a/build.assets/tooling/cmd/release-notes/release_notes.go b/build.assets/tooling/cmd/release-notes/release_notes.go
deleted file mode 100644
index 6795efc841dbb..0000000000000
--- a/build.assets/tooling/cmd/release-notes/release_notes.go
+++ /dev/null
@@ -1,116 +0,0 @@
-/*
- * Teleport
- * Copyright (C) 2024  Gravitational, Inc.
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU Affero General Public License as published by
- * the Free Software Foundation, either version 3 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU Affero General Public License for more details.
- *
- * You should have received a copy of the GNU Affero General Public License
- * along with this program.  If not, see <http://www.gnu.org/licenses/>.
- */
-
-package main
-
-import (
-	"bufio"
-	"bytes"
-	_ "embed"
-	"fmt"
-	"html/template"
-	"io"
-	"strings"
-
-	"github.com/gravitational/trace"
-)
-
-//go:embed template/release-notes.md.tmpl
-var tmpl string
-
-type tmplInfo struct {
-	Version     string
-	Description string
-	Labels      string
-}
-
-var (
-	releaseNotesTemplate = template.Must(template.New("release notes").Parse(tmpl))
-)
-
-type releaseNotesGenerator struct {
-	// releaseVersion is the version for the release.
-	// This will be compared against the version present in the changelog.
-	releaseVersion string
-	// labels is a string applied to the end of the release description
-	// that will be picked up by other automation.
-	//
-	// It won't be validated but it is expected to be a comma separated list of
-	// entries in the format
-	// 	label=key
-	labels string
-}
-
-func (r *releaseNotesGenerator) generateReleaseNotes(md io.Reader) (string, error) {
-	desc, err := r.parseMD(md)
-	if err != nil {
-		return "", err
-	}
-
-	info := tmplInfo{
-		Version:     r.releaseVersion,
-		Description: desc,
-		Labels:      r.labels,
-	}
-	var buff bytes.Buffer
-	if err := releaseNotesTemplate.Execute(&buff, info); err != nil {
-		return "", trace.Wrap(err)
-	}
-	return buff.String(), nil
-}
-
-// parseMD is a simple implementation of a parser to extract the description from a changelog.
-// Will scan for the first double header and pull the version from that.
-// Will pull all information between the first and second double header for the description.
-func (r *releaseNotesGenerator) parseMD(md io.Reader) (string, error) {
-	sc := bufio.NewScanner(md)
-
-	// Extract the first second-level heading
-	var heading string
-	for sc.Scan() {
-		if strings.HasPrefix(sc.Text(), "## ") {
-			heading = strings.TrimSpace(strings.TrimPrefix(sc.Text(), "## "))
-			break
-		}
-	}
-	if err := sc.Err(); err != nil {
-		return "", trace.Wrap(err)
-	}
-	if heading == "" {
-		return "", trace.BadParameter("no second-level heading found in changelog")
-	}
-
-	// Expected heading would be something like "16.0.4 (MM/DD/YY)"
-	parts := strings.SplitN(heading, " ", 2)
-	if parts[0] != r.releaseVersion {
-		return "", trace.BadParameter("changelog version number did not match expected version number: %q != %q", parts[0], r.releaseVersion)
-	}
-
-	// Write everything until next header to buffer
-	var buff bytes.Buffer
-	for sc.Scan() && !strings.HasPrefix(sc.Text(), "## ") {
-		if _, err := fmt.Fprintln(&buff, sc.Text()); err != nil {
-			return "", trace.Wrap(err)
-		}
-	}
-	if err := sc.Err(); err != nil {
-		return "", trace.Wrap(err)
-	}
-
-	return strings.TrimSpace(buff.String()), nil
-}
diff --git a/build.assets/tooling/cmd/release-notes/release_notes_test.go b/build.assets/tooling/cmd/release-notes/release_notes_test.go
deleted file mode 100644
index 67af99d28ce9c..0000000000000
--- a/build.assets/tooling/cmd/release-notes/release_notes_test.go
+++ /dev/null
@@ -1,91 +0,0 @@
-/*
- * Teleport
- * Copyright (C) 2024  Gravitational, Inc.
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU Affero General Public License as published by
- * the Free Software Foundation, either version 3 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU Affero General Public License for more details.
- *
- * You should have received a copy of the GNU Affero General Public License
- * along with this program.  If not, see <http://www.gnu.org/licenses/>.
- */
-
-package main
-
-import (
-	_ "embed"
-	"os"
-	"path/filepath"
-	"testing"
-
-	"github.com/stretchr/testify/assert"
-	"github.com/stretchr/testify/require"
-)
-
-func Test_generateReleaseNotes(t *testing.T) {
-	tests := []struct {
-		name           string
-		releaseVersion string
-		labels         string
-		clFile         *os.File
-		want           string
-		wantErr        bool
-	}{
-		{
-			name:           "happy path",
-			releaseVersion: "16.0.1",
-			clFile:         mustOpen(t, "test-changelog.md"),
-			want:           mustRead(t, "expected-release-notes.md"),
-			wantErr:        false,
-		},
-		{
-			name:           "with labels",
-			releaseVersion: "16.0.1",
-			labels:         "security-patch=yes, security-patch-alts=v16.0.0,v16.0.1",
-			clFile:         mustOpen(t, "test-changelog.md"),
-			want:           mustRead(t, "expected-with-labels.md"),
-			wantErr:        false,
-		},
-		{
-			name:           "version mismatch",
-			releaseVersion: "15.0.1", // test-changelog has 16.0.1
-			clFile:         mustOpen(t, "test-changelog.md"),
-			want:           "",
-			wantErr:        true,
-		},
-	}
-	for _, tt := range tests {
-		t.Run(tt.name, func(t *testing.T) {
-			r := &releaseNotesGenerator{
-				releaseVersion: tt.releaseVersion,
-				labels:         tt.labels,
-			}
-
-			got, err := r.generateReleaseNotes(tt.clFile)
-			if tt.wantErr {
-				assert.Error(t, err)
-				return
-			}
-			assert.NoError(t, err)
-			assert.Equal(t, tt.want, got)
-		})
-	}
-}
-
-func mustOpen(t *testing.T, filename string) *os.File {
-	testfile, err := os.Open(filepath.Join("testdata", filename))
-	require.NoError(t, err)
-	return testfile
-}
-
-func mustRead(t *testing.T, filename string) string {
-	expectedReleaseNotes, err := os.ReadFile(filepath.Join("testdata", filename))
-	require.NoError(t, err)
-	return string(expectedReleaseNotes)
-}
diff --git a/build.assets/tooling/cmd/release-notes/template/release-notes.md.tmpl b/build.assets/tooling/cmd/release-notes/template/release-notes.md.tmpl
deleted file mode 100644
index a4825e3ac7d40..0000000000000
--- a/build.assets/tooling/cmd/release-notes/template/release-notes.md.tmpl
+++ /dev/null
@@ -1,26 +0,0 @@
-## Description
-
-{{ .Description }}
-
-## Download
-
-Download the current and previous releases of Teleport at https://goteleport.com/download.
-
-## Plugins
-
-Download the current release of Teleport plugins from the links below.
-* Slack [Linux amd64](https://cdn.teleport.dev/teleport-access-slack-v{{ .Version }}-linux-amd64-bin.tar.gz) | [Linux arm64](https://cdn.teleport.dev/teleport-access-slack-v{{ .Version }}-linux-arm64-bin.tar.gz) 
-* Mattermost [Linux amd64](https://cdn.teleport.dev/teleport-access-mattermost-v{{ .Version }}-linux-amd64-bin.tar.gz) | [Linux arm64](https://cdn.teleport.dev/teleport-access-mattermost-v{{ .Version }}-linux-arm64-bin.tar.gz)
-* Discord [Linux amd64](https://cdn.teleport.dev/teleport-access-discord-v{{ .Version }}-linux-amd64-bin.tar.gz) | [Linux arm64](https://cdn.teleport.dev/teleport-access-discord-v{{ .Version }}-linux-arm64-bin.tar.gz)
-* Terraform Provider [Linux amd64](https://cdn.teleport.dev/terraform-provider-teleport-v{{ .Version }}-linux-amd64-bin.tar.gz) | [Linux arm64](https://cdn.teleport.dev/terraform-provider-teleport-v{{ .Version }}-linux-arm64-bin.tar.gz) | [macOS amd64](https://cdn.teleport.dev/terraform-provider-teleport-v{{ .Version }}-darwin-amd64-bin.tar.gz) | [macOS arm64](https://cdn.teleport.dev/terraform-provider-teleport-v{{ .Version }}-darwin-arm64-bin.tar.gz) | [macOS universal](https://cdn.teleport.dev/terraform-provider-teleport-v{{ .Version }}-darwin-universal-bin.tar.gz)
-* Event Handler [Linux amd64](https://cdn.teleport.dev/teleport-event-handler-v{{ .Version }}-linux-amd64-bin.tar.gz) | [Linux arm64](https://cdn.teleport.dev/teleport-event-handler-v{{ .Version }}-linux-arm64-bin.tar.gz) | [macOS amd64](https://cdn.teleport.dev/teleport-event-handler-v{{ .Version }}-darwin-amd64-bin.tar.gz)
-* PagerDuty [Linux amd64](https://cdn.teleport.dev/teleport-access-pagerduty-v{{ .Version }}-linux-amd64-bin.tar.gz) | [Linux arm64](https://cdn.teleport.dev/teleport-access-pagerduty-v{{ .Version }}-linux-arm64-bin.tar.gz)
-* Jira [Linux amd64](https://cdn.teleport.dev/teleport-access-jira-v{{ .Version }}-linux-amd64-bin.tar.gz) | [Linux arm64](https://cdn.teleport.dev/teleport-access-jira-v{{ .Version }}-linux-arm64-bin.tar.gz)
-* Email [Linux amd64](https://cdn.teleport.dev/teleport-access-email-v{{ .Version }}-linux-amd64-bin.tar.gz) | [Linux arm64](https://cdn.teleport.dev/teleport-access-email-v{{ .Version }}-linux-arm64-bin.tar.gz)
-* Microsoft Teams [Linux amd64](https://cdn.teleport.dev/teleport-access-msteams-v{{ .Version }}-linux-amd64-bin.tar.gz) | [Linux arm64](https://cdn.teleport.dev/teleport-access-msteams-v{{ .Version }}-linux-arm64-bin.tar.gz)
-{{- if .Labels }}
-
----
-
-labels: {{ .Labels }}
-{{- end }}
diff --git a/build.assets/tooling/cmd/release-notes/testdata/expected-release-notes.md b/build.assets/tooling/cmd/release-notes/testdata/expected-release-notes.md
deleted file mode 100644
index a8e835ad84ad5..0000000000000
--- a/build.assets/tooling/cmd/release-notes/testdata/expected-release-notes.md
+++ /dev/null
@@ -1,26 +0,0 @@
-## Description
-
-* `tctl` now ignores any configuration file if the auth_service section is disabled, and prefer loading credentials from a given identity file or tsh profile instead. [#43115](https://github.com/gravitational/teleport/pull/43115)
-* Skip `jamf_service` validation when the service is not enabled. [#43095](https://github.com/gravitational/teleport/pull/43095)
-* Fix v16.0.0 amd64 Teleport plugin images using arm64 binaries. [#43084](https://github.com/gravitational/teleport/pull/43084)
-* Add ability to edit user traits from the Web UI. [#43067](https://github.com/gravitational/teleport/pull/43067)
-* Enforce limits when reading events from Firestore for large time windows to prevent OOM events. [#42966](https://github.com/gravitational/teleport/pull/42966)
-* Allow all authenticated users to read the cluster `vnet_config`. [#42957](https://github.com/gravitational/teleport/pull/42957)
-* Improve search and predicate/label based dialing performance in large clusters under very high load. [#42943](https://github.com/gravitational/teleport/pull/42943)
-
-## Download
-
-Download the current and previous releases of Teleport at https://goteleport.com/download.
-
-## Plugins
-
-Download the current release of Teleport plugins from the links below.
-* Slack [Linux amd64](https://cdn.teleport.dev/teleport-access-slack-v16.0.1-linux-amd64-bin.tar.gz) | [Linux arm64](https://cdn.teleport.dev/teleport-access-slack-v16.0.1-linux-arm64-bin.tar.gz) 
-* Mattermost [Linux amd64](https://cdn.teleport.dev/teleport-access-mattermost-v16.0.1-linux-amd64-bin.tar.gz) | [Linux arm64](https://cdn.teleport.dev/teleport-access-mattermost-v16.0.1-linux-arm64-bin.tar.gz)
-* Discord [Linux amd64](https://cdn.teleport.dev/teleport-access-discord-v16.0.1-linux-amd64-bin.tar.gz) | [Linux arm64](https://cdn.teleport.dev/teleport-access-discord-v16.0.1-linux-arm64-bin.tar.gz)
-* Terraform Provider [Linux amd64](https://cdn.teleport.dev/terraform-provider-teleport-v16.0.1-linux-amd64-bin.tar.gz) | [Linux arm64](https://cdn.teleport.dev/terraform-provider-teleport-v16.0.1-linux-arm64-bin.tar.gz) | [macOS amd64](https://cdn.teleport.dev/terraform-provider-teleport-v16.0.1-darwin-amd64-bin.tar.gz) | [macOS arm64](https://cdn.teleport.dev/terraform-provider-teleport-v16.0.1-darwin-arm64-bin.tar.gz) | [macOS universal](https://cdn.teleport.dev/terraform-provider-teleport-v16.0.1-darwin-universal-bin.tar.gz)
-* Event Handler [Linux amd64](https://cdn.teleport.dev/teleport-event-handler-v16.0.1-linux-amd64-bin.tar.gz) | [Linux arm64](https://cdn.teleport.dev/teleport-event-handler-v16.0.1-linux-arm64-bin.tar.gz) | [macOS amd64](https://cdn.teleport.dev/teleport-event-handler-v16.0.1-darwin-amd64-bin.tar.gz)
-* PagerDuty [Linux amd64](https://cdn.teleport.dev/teleport-access-pagerduty-v16.0.1-linux-amd64-bin.tar.gz) | [Linux arm64](https://cdn.teleport.dev/teleport-access-pagerduty-v16.0.1-linux-arm64-bin.tar.gz)
-* Jira [Linux amd64](https://cdn.teleport.dev/teleport-access-jira-v16.0.1-linux-amd64-bin.tar.gz) | [Linux arm64](https://cdn.teleport.dev/teleport-access-jira-v16.0.1-linux-arm64-bin.tar.gz)
-* Email [Linux amd64](https://cdn.teleport.dev/teleport-access-email-v16.0.1-linux-amd64-bin.tar.gz) | [Linux arm64](https://cdn.teleport.dev/teleport-access-email-v16.0.1-linux-arm64-bin.tar.gz)
-* Microsoft Teams [Linux amd64](https://cdn.teleport.dev/teleport-access-msteams-v16.0.1-linux-amd64-bin.tar.gz) | [Linux arm64](https://cdn.teleport.dev/teleport-access-msteams-v16.0.1-linux-arm64-bin.tar.gz)
diff --git a/build.assets/tooling/cmd/release-notes/testdata/expected-with-labels.md b/build.assets/tooling/cmd/release-notes/testdata/expected-with-labels.md
deleted file mode 100644
index 4a91b668129d2..0000000000000
--- a/build.assets/tooling/cmd/release-notes/testdata/expected-with-labels.md
+++ /dev/null
@@ -1,30 +0,0 @@
-## Description
-
-* `tctl` now ignores any configuration file if the auth_service section is disabled, and prefer loading credentials from a given identity file or tsh profile instead. [#43115](https://github.com/gravitational/teleport/pull/43115)
-* Skip `jamf_service` validation when the service is not enabled. [#43095](https://github.com/gravitational/teleport/pull/43095)
-* Fix v16.0.0 amd64 Teleport plugin images using arm64 binaries. [#43084](https://github.com/gravitational/teleport/pull/43084)
-* Add ability to edit user traits from the Web UI. [#43067](https://github.com/gravitational/teleport/pull/43067)
-* Enforce limits when reading events from Firestore for large time windows to prevent OOM events. [#42966](https://github.com/gravitational/teleport/pull/42966)
-* Allow all authenticated users to read the cluster `vnet_config`. [#42957](https://github.com/gravitational/teleport/pull/42957)
-* Improve search and predicate/label based dialing performance in large clusters under very high load. [#42943](https://github.com/gravitational/teleport/pull/42943)
-
-## Download
-
-Download the current and previous releases of Teleport at https://goteleport.com/download.
-
-## Plugins
-
-Download the current release of Teleport plugins from the links below.
-* Slack [Linux amd64](https://cdn.teleport.dev/teleport-access-slack-v16.0.1-linux-amd64-bin.tar.gz) | [Linux arm64](https://cdn.teleport.dev/teleport-access-slack-v16.0.1-linux-arm64-bin.tar.gz) 
-* Mattermost [Linux amd64](https://cdn.teleport.dev/teleport-access-mattermost-v16.0.1-linux-amd64-bin.tar.gz) | [Linux arm64](https://cdn.teleport.dev/teleport-access-mattermost-v16.0.1-linux-arm64-bin.tar.gz)
-* Discord [Linux amd64](https://cdn.teleport.dev/teleport-access-discord-v16.0.1-linux-amd64-bin.tar.gz) | [Linux arm64](https://cdn.teleport.dev/teleport-access-discord-v16.0.1-linux-arm64-bin.tar.gz)
-* Terraform Provider [Linux amd64](https://cdn.teleport.dev/terraform-provider-teleport-v16.0.1-linux-amd64-bin.tar.gz) | [Linux arm64](https://cdn.teleport.dev/terraform-provider-teleport-v16.0.1-linux-arm64-bin.tar.gz) | [macOS amd64](https://cdn.teleport.dev/terraform-provider-teleport-v16.0.1-darwin-amd64-bin.tar.gz) | [macOS arm64](https://cdn.teleport.dev/terraform-provider-teleport-v16.0.1-darwin-arm64-bin.tar.gz) | [macOS universal](https://cdn.teleport.dev/terraform-provider-teleport-v16.0.1-darwin-universal-bin.tar.gz)
-* Event Handler [Linux amd64](https://cdn.teleport.dev/teleport-event-handler-v16.0.1-linux-amd64-bin.tar.gz) | [Linux arm64](https://cdn.teleport.dev/teleport-event-handler-v16.0.1-linux-arm64-bin.tar.gz) | [macOS amd64](https://cdn.teleport.dev/teleport-event-handler-v16.0.1-darwin-amd64-bin.tar.gz)
-* PagerDuty [Linux amd64](https://cdn.teleport.dev/teleport-access-pagerduty-v16.0.1-linux-amd64-bin.tar.gz) | [Linux arm64](https://cdn.teleport.dev/teleport-access-pagerduty-v16.0.1-linux-arm64-bin.tar.gz)
-* Jira [Linux amd64](https://cdn.teleport.dev/teleport-access-jira-v16.0.1-linux-amd64-bin.tar.gz) | [Linux arm64](https://cdn.teleport.dev/teleport-access-jira-v16.0.1-linux-arm64-bin.tar.gz)
-* Email [Linux amd64](https://cdn.teleport.dev/teleport-access-email-v16.0.1-linux-amd64-bin.tar.gz) | [Linux arm64](https://cdn.teleport.dev/teleport-access-email-v16.0.1-linux-arm64-bin.tar.gz)
-* Microsoft Teams [Linux amd64](https://cdn.teleport.dev/teleport-access-msteams-v16.0.1-linux-amd64-bin.tar.gz) | [Linux arm64](https://cdn.teleport.dev/teleport-access-msteams-v16.0.1-linux-arm64-bin.tar.gz)
-
----
-
-labels: security-patch=yes, security-patch-alts=v16.0.0,v16.0.1
diff --git a/build.assets/tooling/cmd/release-notes/testdata/test-changelog.md b/build.assets/tooling/cmd/release-notes/testdata/test-changelog.md
deleted file mode 100644
index 912a9a1060100..0000000000000
--- a/build.assets/tooling/cmd/release-notes/testdata/test-changelog.md
+++ /dev/null
@@ -1,23 +0,0 @@
-# Changelog
-
-## 16.0.1 (06/17/24)
-
-* `tctl` now ignores any configuration file if the auth_service section is disabled, and prefer loading credentials from a given identity file or tsh profile instead. [#43115](https://github.com/gravitational/teleport/pull/43115)
-* Skip `jamf_service` validation when the service is not enabled. [#43095](https://github.com/gravitational/teleport/pull/43095)
-* Fix v16.0.0 amd64 Teleport plugin images using arm64 binaries. [#43084](https://github.com/gravitational/teleport/pull/43084)
-* Add ability to edit user traits from the Web UI. [#43067](https://github.com/gravitational/teleport/pull/43067)
-* Enforce limits when reading events from Firestore for large time windows to prevent OOM events. [#42966](https://github.com/gravitational/teleport/pull/42966)
-* Allow all authenticated users to read the cluster `vnet_config`. [#42957](https://github.com/gravitational/teleport/pull/42957)
-* Improve search and predicate/label based dialing performance in large clusters under very high load. [#42943](https://github.com/gravitational/teleport/pull/42943)
-
-## 16.0.0 (06/13/24)
-
-Teleport 16 brings the following new features and improvements:
-
-- Teleport VNet
-- Device Trust for the Web UI
-- Increased support for per-session MFA
-- Web UI notification system
-- Access requests from the resources view
-- `tctl` for Windows
-- Teleport plugins improvements

From a54c311d874b0728dabb4d6986ec89de87a31f69 Mon Sep 17 00:00:00 2001
From: Forrest <30576607+fspmarshall@users.noreply.github.com>
Date: Tue, 29 Oct 2024 10:28:01 -0700
Subject: [PATCH 08/13] switch trusted/remote cluster management to atomic
 write (#48009)

---
 api/types/trustedcluster.go      |   8 +
 lib/auth/trustedcluster.go       | 294 +++++++----------
 lib/auth/trustedcluster_test.go  |  17 +-
 lib/services/local/trust.go      | 529 ++++++++++++++++++++++++++-----
 lib/services/local/trust_test.go | 330 ++++++++++++++++++-
 lib/services/trust.go            |  20 ++
 6 files changed, 914 insertions(+), 284 deletions(-)

diff --git a/api/types/trustedcluster.go b/api/types/trustedcluster.go
index 7e233c864c826..27d8129f70cfe 100644
--- a/api/types/trustedcluster.go
+++ b/api/types/trustedcluster.go
@@ -22,6 +22,8 @@ import (
 	"time"
 
 	"github.com/gravitational/trace"
+
+	"github.com/gravitational/teleport/api/utils"
 )
 
 // TrustedCluster holds information needed for a cluster that can not be directly
@@ -60,6 +62,8 @@ type TrustedCluster interface {
 	SetReverseTunnelAddress(string)
 	// CanChangeStateTo checks the TrustedCluster can transform into another.
 	CanChangeStateTo(TrustedCluster) error
+	// Clone returns a deep copy of the TrustedCluster.
+	Clone() TrustedCluster
 }
 
 // NewTrustedCluster is a convenience way to create a TrustedCluster resource.
@@ -259,6 +263,10 @@ func (c *TrustedClusterV2) CanChangeStateTo(t TrustedCluster) error {
 	return nil
 }
 
+func (c *TrustedClusterV2) Clone() TrustedCluster {
+	return utils.CloneProtoMsg(c)
+}
+
 // String represents a human readable version of trusted cluster settings.
 func (c *TrustedClusterV2) String() string {
 	return fmt.Sprintf("TrustedCluster(Enabled=%v,Roles=%v,Token=%v,ProxyAddress=%v,ReverseTunnelAddress=%v)",
diff --git a/lib/auth/trustedcluster.go b/lib/auth/trustedcluster.go
index bd68f9d10832e..acbc46dc4f281 100644
--- a/lib/auth/trustedcluster.go
+++ b/lib/auth/trustedcluster.go
@@ -45,129 +45,115 @@ import (
 )
 
 // UpsertTrustedCluster creates or toggles a Trusted Cluster relationship.
-func (a *Server) UpsertTrustedCluster(ctx context.Context, trustedCluster types.TrustedCluster) (newTrustedCluster types.TrustedCluster, returnErr error) {
+func (a *Server) UpsertTrustedCluster(ctx context.Context, tc types.TrustedCluster) (newTrustedCluster types.TrustedCluster, returnErr error) {
+	// verify that trusted cluster role map does not reference non-existent roles
+	if err := a.checkLocalRoles(ctx, tc.GetRoleMap()); err != nil {
+		return nil, trace.Wrap(err)
+	}
+
 	// It is recommended to omit trusted cluster name because the trusted cluster name
 	// is updated to the roots cluster name during the handshake with the root cluster.
 	var existingCluster types.TrustedCluster
-	if trustedCluster.GetName() != "" {
+	var cas []types.CertAuthority
+	if tc.GetName() != "" {
 		var err error
-		existingCluster, err = a.GetTrustedCluster(ctx, trustedCluster.GetName())
+		existingCluster, err = a.GetTrustedCluster(ctx, tc.GetName())
 		if err != nil && !trace.IsNotFound(err) {
 			return nil, trace.Wrap(err)
 		}
 	}
 
-	enable := trustedCluster.GetEnabled()
-
-	// If the trusted cluster already exists in the backend, make sure it's a
-	// valid state change client is trying to make.
-	if existingCluster != nil {
-		if err := existingCluster.CanChangeStateTo(trustedCluster); err != nil {
-			return nil, trace.Wrap(err)
-		}
+	// if there is no existing cluster, switch to the create case
+	if existingCluster == nil {
+		return a.createTrustedCluster(ctx, tc)
 	}
 
-	logger := log.WithField("trusted_cluster", trustedCluster.GetName())
+	if err := existingCluster.CanChangeStateTo(tc); err != nil {
+		return nil, trace.Wrap(err)
+	}
 
-	// change state
-	if err := a.checkLocalRoles(ctx, trustedCluster.GetRoleMap()); err != nil {
+	// always load all current CAs. even if we aren't changing them as part of
+	// this function, Services.UpdateTrustedCluster will only correctly activate/deactivate
+	// CAs that are explicitly passed to it. note that we pass in the existing cluster state
+	// since where CAs are stored depends on the current state of the trusted cluster.
+	cas, err := a.getCAsForTrustedCluster(ctx, existingCluster)
+	if err != nil {
 		return nil, trace.Wrap(err)
 	}
 
-	// Update role map
-	if existingCluster != nil && !existingCluster.GetRoleMap().IsEqual(trustedCluster.GetRoleMap()) {
-		if err := a.UpdateUserCARoleMap(ctx, existingCluster.GetName(), trustedCluster.GetRoleMap(),
-			existingCluster.GetEnabled()); err != nil {
-			return nil, trace.Wrap(err)
-		}
+	// propagate any role map changes to cas
+	configureCAsForTrustedCluster(tc, cas)
 
-		// Reset previous UserCA role map if this func fails later on
-		defer func() {
-			if returnErr != nil {
-				if err := a.UpdateUserCARoleMap(ctx, trustedCluster.GetName(), existingCluster.GetRoleMap(),
-					trustedCluster.GetEnabled()); err != nil {
-					returnErr = trace.NewAggregate(err, returnErr)
-				}
-			}
-		}()
-	}
-	// Create or update state
-	switch {
-	case existingCluster != nil && enable == true:
-		if existingCluster.GetEnabled() {
-			break
-		}
-		log.Debugf("Enabling existing Trusted Cluster relationship.")
+	// state transition is valid, set the expected revision
+	tc.SetRevision(existingCluster.GetRevision())
 
-		if err := a.activateCertAuthority(ctx, trustedCluster); err != nil {
-			if trace.IsNotFound(err) {
-				return nil, trace.BadParameter("enable only supported for Trusted Clusters created with Teleport 2.3 and above")
-			}
-			return nil, trace.Wrap(err)
-		}
+	revision, err := a.Services.UpdateTrustedCluster(ctx, tc, cas)
+	if err != nil {
+		return nil, trace.Wrap(err)
+	}
 
-		if err := a.createReverseTunnel(ctx, trustedCluster); err != nil {
-			return nil, trace.Wrap(err)
-		}
-	case existingCluster != nil && enable == false:
-		if !existingCluster.GetEnabled() {
-			break
-		}
-		log.Debugf("Disabling existing Trusted Cluster relationship.")
+	tc.SetRevision(revision)
 
-		if err := a.deactivateCertAuthority(ctx, trustedCluster); err != nil {
-			if trace.IsNotFound(err) {
-				return nil, trace.BadParameter("enable only supported for Trusted Clusters created with Teleport 2.3 and above")
-			}
-			return nil, trace.Wrap(err)
-		}
+	if err := a.onTrustedClusterWrite(ctx, tc); err != nil {
+		return nil, trace.Wrap(err)
+	}
 
-		if err := a.DeleteReverseTunnel(ctx, trustedCluster.GetName()); err != nil {
-			return nil, trace.Wrap(err)
-		}
-	case existingCluster == nil && enable == true:
-		logger.Info("Creating enabled Trusted Cluster relationship.")
+	return tc, nil
+}
 
-		remoteCAs, err := a.establishTrust(ctx, trustedCluster)
-		if err != nil {
-			return nil, trace.Wrap(err)
-		}
+func (a *Server) createTrustedCluster(ctx context.Context, tc types.TrustedCluster) (types.TrustedCluster, error) {
+	remoteCAs, err := a.establishTrust(ctx, tc)
+	if err != nil {
+		return nil, trace.Wrap(err)
+	}
 
-		// Force name of the trusted cluster resource
-		// to be equal to the name of the remote cluster it is connecting to.
-		trustedCluster.SetName(remoteCAs[0].GetClusterName())
+	// Force name to the name of the trusted cluster.
+	tc.SetName(remoteCAs[0].GetClusterName())
 
-		if err := a.addCertAuthorities(ctx, trustedCluster, remoteCAs); err != nil {
-			return nil, trace.Wrap(err)
-		}
+	// perform some configuration on the remote CAs
+	configureCAsForTrustedCluster(tc, remoteCAs)
 
-		if err := a.createReverseTunnel(ctx, trustedCluster); err != nil {
-			return nil, trace.Wrap(err)
-		}
+	// atomically create trusted cluster and cert authorities
+	revision, err := a.Services.CreateTrustedCluster(ctx, tc, remoteCAs)
+	if err != nil {
+		return nil, trace.Wrap(err)
+	}
 
-	case existingCluster == nil && enable == false:
-		logger.Info("Creating disabled Trusted Cluster relationship.")
+	tc.SetRevision(revision)
 
-		remoteCAs, err := a.establishTrust(ctx, trustedCluster)
-		if err != nil {
-			return nil, trace.Wrap(err)
-		}
+	if err := a.onTrustedClusterWrite(ctx, tc); err != nil {
+		return nil, trace.Wrap(err)
+	}
 
-		// Force name to the name of the trusted cluster.
-		trustedCluster.SetName(remoteCAs[0].GetClusterName())
+	return tc, nil
+}
 
-		if err := a.addCertAuthorities(ctx, trustedCluster, remoteCAs); err != nil {
-			return nil, trace.Wrap(err)
-		}
+// configureCAsForTrustedCluster modifies remote CAs for use as trusted cluster CAs.
+func configureCAsForTrustedCluster(tc types.TrustedCluster, cas []types.CertAuthority) {
+	// modify the remote CAs for use as tc cas.
+	for _, ca := range cas {
+		// change the name of the remote ca to the name of the trusted cluster.
+		ca.SetName(tc.GetName())
 
-		if err := a.deactivateCertAuthority(ctx, trustedCluster); err != nil {
-			return nil, trace.Wrap(err)
+		// wipe out roles sent from the remote cluster and set roles from the trusted cluster
+		ca.SetRoles(nil)
+		if ca.GetType() == types.UserCA {
+			for _, r := range tc.GetRoles() {
+				ca.AddRole(r)
+			}
+			ca.SetRoleMap(tc.GetRoleMap())
 		}
 	}
+}
 
-	tc, err := a.Services.UpsertTrustedCluster(ctx, trustedCluster)
-	if err != nil {
-		return nil, trace.Wrap(err)
+func (a *Server) onTrustedClusterWrite(ctx context.Context, tc types.TrustedCluster) error {
+	var cerr error
+	if tc.GetEnabled() {
+		cerr = a.createReverseTunnel(ctx, tc)
+	} else {
+		if err := a.DeleteReverseTunnel(ctx, tc.GetName()); err != nil && !trace.IsNotFound(err) {
+			cerr = err
+		}
 	}
 
 	if err := a.emitter.EmitAuditEvent(ctx, &apievents.TrustedClusterCreate{
@@ -177,14 +163,14 @@ func (a *Server) UpsertTrustedCluster(ctx context.Context, trustedCluster types.
 		},
 		UserMetadata: authz.ClientUserMetadata(ctx),
 		ResourceMetadata: apievents.ResourceMetadata{
-			Name: trustedCluster.GetName(),
+			Name: tc.GetName(),
 		},
 		ConnectionMetadata: authz.ConnectionMetadata(ctx),
 	}); err != nil {
-		logger.WithError(err).Warn("Failed to emit trusted cluster create event.")
+		a.logger.WarnContext(ctx, "failed to emit trusted cluster create event", "error", err)
 	}
 
-	return tc, nil
+	return trace.Wrap(cerr)
 }
 
 func (a *Server) checkLocalRoles(ctx context.Context, roleMap types.RoleMap) error {
@@ -207,6 +193,29 @@ func (a *Server) checkLocalRoles(ctx context.Context, roleMap types.RoleMap) err
 	return nil
 }
 
+func (a *Server) getCAsForTrustedCluster(ctx context.Context, tc types.TrustedCluster) ([]types.CertAuthority, error) {
+	var cas []types.CertAuthority
+	// not all CA types are present for trusted clusters, but there isn't a meaningful downside to
+	// just grabbing everything.
+	for _, caType := range types.CertAuthTypes {
+		var ca types.CertAuthority
+		var err error
+		if tc.GetEnabled() {
+			ca, err = a.GetCertAuthority(ctx, types.CertAuthID{Type: caType, DomainName: tc.GetName()}, false)
+		} else {
+			ca, err = a.GetInactiveCertAuthority(ctx, types.CertAuthID{Type: caType, DomainName: tc.GetName()}, false)
+		}
+		if err != nil {
+			if trace.IsNotFound(err) {
+				continue
+			}
+			return nil, trace.Wrap(err)
+		}
+		cas = append(cas, ca)
+	}
+	return cas, nil
+}
+
 // DeleteTrustedCluster removes types.CertAuthority, services.ReverseTunnel,
 // and services.TrustedCluster resources.
 func (a *Server) DeleteTrustedCluster(ctx context.Context, name string) error {
@@ -229,7 +238,7 @@ func (a *Server) DeleteTrustedCluster(ctx context.Context, name string) error {
 		})
 	}
 
-	if err := a.DeleteCertAuthorities(ctx, ids...); err != nil {
+	if err := a.Services.DeleteTrustedClusterInternal(ctx, name, ids); err != nil {
 		return trace.Wrap(err)
 	}
 
@@ -239,10 +248,6 @@ func (a *Server) DeleteTrustedCluster(ctx context.Context, name string) error {
 		}
 	}
 
-	if err := a.Services.DeleteTrustedCluster(ctx, name); err != nil {
-		return trace.Wrap(err)
-	}
-
 	if err := a.emitter.EmitAuditEvent(ctx, &apievents.TrustedClusterDelete{
 		Metadata: apievents.Metadata{
 			Type: events.TrustedClusterDeleteEvent,
@@ -324,54 +329,30 @@ func (a *Server) establishTrust(ctx context.Context, trustedCluster types.Truste
 	return validateResponse.CAs, nil
 }
 
-func (a *Server) addCertAuthorities(ctx context.Context, trustedCluster types.TrustedCluster, remoteCAs []types.CertAuthority) error {
-	// the remote auth server has verified our token. add the
-	// remote certificate authority to our backend
-	for _, remoteCertAuthority := range remoteCAs {
-		// change the name of the remote ca to the name of the trusted cluster
-		remoteCertAuthority.SetName(trustedCluster.GetName())
-
-		// wipe out roles sent from the remote cluster and set roles from the trusted cluster
-		remoteCertAuthority.SetRoles(nil)
-		if remoteCertAuthority.GetType() == types.UserCA {
-			for _, r := range trustedCluster.GetRoles() {
-				remoteCertAuthority.AddRole(r)
-			}
-			remoteCertAuthority.SetRoleMap(trustedCluster.GetRoleMap())
-		}
-	}
-
-	// we use create here instead of upsert to prevent people from wiping out
-	// their own ca if it has the same name as the remote ca
-	_, err := a.CreateCertAuthorities(ctx, remoteCAs...)
-	return trace.Wrap(err)
-}
-
 // DeleteRemoteCluster deletes remote cluster resource, all certificate authorities
 // associated with it
-func (a *Server) DeleteRemoteCluster(ctx context.Context, clusterName string) error {
-	// To make sure remote cluster exists - to protect against random
-	// clusterName requests (e.g. when clusterName is set to local cluster name)
-	if _, err := a.GetRemoteCluster(ctx, clusterName); err != nil {
+func (a *Server) DeleteRemoteCluster(ctx context.Context, name string) error {
+	cn, err := a.GetClusterName()
+	if err != nil {
 		return trace.Wrap(err)
 	}
 
+	// This check ensures users are not deleting their root/own cluster.
+	if cn.GetClusterName() == name {
+		return trace.BadParameter("remote cluster %q is the name of this root cluster and cannot be removed.", name)
+	}
+
 	// we only expect host CAs to be present for remote clusters, but it doesn't hurt
 	// to err on the side of paranoia and delete all CA types.
 	var ids []types.CertAuthID
 	for _, caType := range types.CertAuthTypes {
 		ids = append(ids, types.CertAuthID{
 			Type:       caType,
-			DomainName: clusterName,
+			DomainName: name,
 		})
 	}
 
-	// delete cert authorities associated with the cluster
-	if err := a.DeleteCertAuthorities(ctx, ids...); err != nil {
-		return trace.Wrap(err)
-	}
-
-	return trace.Wrap(a.Services.DeleteRemoteCluster(ctx, clusterName))
+	return trace.Wrap(a.Services.DeleteRemoteClusterInternal(ctx, name, ids))
 }
 
 // GetRemoteCluster returns remote cluster by name
@@ -497,12 +478,6 @@ func (a *Server) validateTrustedCluster(ctx context.Context, validateRequest *au
 	if remoteClusterName == domainName {
 		return nil, trace.AccessDenied("remote cluster has same name as this cluster: %v", domainName)
 	}
-	_, err = a.GetTrustedCluster(ctx, remoteClusterName)
-	if err == nil {
-		return nil, trace.AccessDenied("remote cluster has same name as trusted cluster: %v", remoteClusterName)
-	} else if !trace.IsNotFound(err) {
-		return nil, trace.Wrap(err)
-	}
 
 	remoteCluster, err := types.NewRemoteCluster(remoteClusterName)
 	if err != nil {
@@ -522,15 +497,8 @@ func (a *Server) validateTrustedCluster(ctx context.Context, validateRequest *au
 	}
 	remoteCluster.SetConnectionStatus(teleport.RemoteClusterStatusOffline)
 
-	_, err = a.CreateRemoteCluster(ctx, remoteCluster)
-	if err != nil {
-		if !trace.IsAlreadyExists(err) {
-			return nil, trace.Wrap(err)
-		}
-	}
-
-	err = a.UpsertCertAuthority(ctx, remoteCA)
-	if err != nil {
+	_, err = a.CreateRemoteClusterInternal(ctx, remoteCluster, []types.CertAuthority{remoteCA})
+	if err != nil && !trace.IsAlreadyExists(err) {
 		return nil, trace.Wrap(err)
 	}
 
@@ -641,36 +609,6 @@ func (a *Server) sendValidateRequestToProxy(host string, validateRequest *authcl
 	return validateResponse, nil
 }
 
-// activateCertAuthority will activate both the user and host certificate
-// authority given in the services.TrustedCluster resource.
-func (a *Server) activateCertAuthority(ctx context.Context, t types.TrustedCluster) error {
-	return trace.Wrap(a.ActivateCertAuthorities(ctx, []types.CertAuthID{
-		{
-			Type:       types.UserCA,
-			DomainName: t.GetName(),
-		},
-		{
-			Type:       types.HostCA,
-			DomainName: t.GetName(),
-		},
-	}...))
-}
-
-// deactivateCertAuthority will deactivate both the user and host certificate
-// authority given in the services.TrustedCluster resource.
-func (a *Server) deactivateCertAuthority(ctx context.Context, t types.TrustedCluster) error {
-	return trace.Wrap(a.DeactivateCertAuthorities(ctx, []types.CertAuthID{
-		{
-			Type:       types.UserCA,
-			DomainName: t.GetName(),
-		},
-		{
-			Type:       types.HostCA,
-			DomainName: t.GetName(),
-		},
-	}...))
-}
-
 // createReverseTunnel will create a services.ReverseTunnel givenin the
 // services.TrustedCluster resource.
 func (a *Server) createReverseTunnel(ctx context.Context, t types.TrustedCluster) error {
diff --git a/lib/auth/trustedcluster_test.go b/lib/auth/trustedcluster_test.go
index ba7ffac769b62..f1581dbc64fee 100644
--- a/lib/auth/trustedcluster_test.go
+++ b/lib/auth/trustedcluster_test.go
@@ -469,22 +469,11 @@ func TestUpsertTrustedCluster(t *testing.T) {
 		})
 	require.NoError(t, err)
 
-	leafClusterCA := types.CertAuthority(suite.NewTestCA(types.HostCA, "trustedcluster"))
-	_, err = a.validateTrustedCluster(ctx, &authclient.ValidateTrustedClusterRequest{
-		Token:           validToken,
-		CAs:             []types.CertAuthority{leafClusterCA},
-		TeleportVersion: teleport.Version,
-	})
-	require.NoError(t, err)
-
-	_, err = a.Services.UpsertTrustedCluster(ctx, trustedCluster)
-	require.NoError(t, err)
-
 	ca := suite.NewTestCA(types.UserCA, "trustedcluster")
-	err = a.addCertAuthorities(ctx, trustedCluster, []types.CertAuthority{ca})
-	require.NoError(t, err)
 
-	err = a.UpsertCertAuthority(ctx, ca)
+	configureCAsForTrustedCluster(trustedCluster, []types.CertAuthority{ca})
+
+	_, err = a.Services.CreateTrustedCluster(ctx, trustedCluster, []types.CertAuthority{ca})
 	require.NoError(t, err)
 
 	err = a.createReverseTunnel(ctx, trustedCluster)
diff --git a/lib/services/local/trust.go b/lib/services/local/trust.go
index 72d2979dba675..2a2e454cdcb19 100644
--- a/lib/services/local/trust.go
+++ b/lib/services/local/trust.go
@@ -20,7 +20,6 @@ package local
 
 import (
 	"context"
-	"encoding/json"
 	"errors"
 	"log/slog"
 	"slices"
@@ -67,44 +66,164 @@ func (s *CA) CreateCertAuthority(ctx context.Context, ca types.CertAuthority) er
 
 // CreateCertAuthorities creates multiple cert authorities atomically.
 func (s *CA) CreateCertAuthorities(ctx context.Context, cas ...types.CertAuthority) (revision string, err error) {
-	var condacts []backend.ConditionalAction
-	var clusterNames []string
-	for _, ca := range cas {
-		if !slices.Contains(clusterNames, ca.GetName()) {
-			clusterNames = append(clusterNames, ca.GetName())
+	condacts, err := createCertAuthoritiesCondActs(cas, true /* active */)
+	if err != nil {
+		return "", trace.Wrap(err)
+	}
+
+	rev, err := s.AtomicWrite(ctx, condacts)
+	if err != nil {
+		if errors.Is(err, backend.ErrConditionFailed) {
+			var clusterNames []string
+			for _, ca := range cas {
+				if slices.Contains(clusterNames, ca.GetClusterName()) {
+					continue
+				}
+				clusterNames = append(clusterNames, ca.GetClusterName())
+			}
+			return "", trace.AlreadyExists("one or more CAs from cluster(s) %q already exist", strings.Join(clusterNames, ","))
 		}
+		return "", trace.Wrap(err)
+	}
+
+	return rev, nil
+}
+
+// createCertAuthoritiesCondActs sets up conditional actions for creating a set of CAs.
+func createCertAuthoritiesCondActs(cas []types.CertAuthority, active bool) ([]backend.ConditionalAction, error) {
+	condacts := make([]backend.ConditionalAction, 0, len(cas)*2)
+	for _, ca := range cas {
 		if err := services.ValidateCertAuthority(ca); err != nil {
-			return "", trace.Wrap(err)
+			return nil, trace.Wrap(err)
 		}
 
 		item, err := caToItem(backend.Key{}, ca)
 		if err != nil {
-			return "", trace.Wrap(err)
+			return nil, trace.Wrap(err)
 		}
 
-		condacts = append(condacts, []backend.ConditionalAction{
-			{
-				Key:       activeCAKey(ca.GetID()),
-				Condition: backend.NotExists(),
-				Action:    backend.Put(item),
-			},
-			{
-				Key:       inactiveCAKey(ca.GetID()),
-				Condition: backend.Whatever(),
-				Action:    backend.Delete(),
-			},
-		}...)
+		if active {
+			// for an enabled tc, we perform a conditional create for the active CA key
+			// and an unconditional delete for the inactive CA key since the active range
+			// is given priority over the inactive range.
+			condacts = append(condacts, []backend.ConditionalAction{
+				{
+					Key:       activeCAKey(ca.GetID()),
+					Condition: backend.NotExists(),
+					Action:    backend.Put(item),
+				},
+				{
+					Key:       inactiveCAKey(ca.GetID()),
+					Condition: backend.Whatever(),
+					Action:    backend.Delete(),
+				},
+			}...)
+		} else {
+			// for a disabled tc, we perform a conditional create for the inactive CA key
+			// and assert the non-existence of the active CA key.
+			condacts = append(condacts, []backend.ConditionalAction{
+				{
+					Key:       inactiveCAKey(ca.GetID()),
+					Condition: backend.NotExists(),
+					Action:    backend.Put(item),
+				},
+				{
+					Key:       activeCAKey(ca.GetID()),
+					Condition: backend.NotExists(),
+					Action:    backend.Nop(),
+				},
+			}...)
+		}
 	}
 
-	rev, err := s.AtomicWrite(ctx, condacts)
-	if err != nil {
-		if errors.Is(err, backend.ErrConditionFailed) {
-			return "", trace.AlreadyExists("one or more CAs from cluster(s) %q already exist", strings.Join(clusterNames, ","))
+	return condacts, nil
+}
+
+func updateCertAuthoritiesCondActs(cas []types.CertAuthority, active bool, currentlyActive bool) ([]backend.ConditionalAction, error) {
+	condacts := make([]backend.ConditionalAction, 0, len(cas)*2)
+	for _, ca := range cas {
+		if err := services.ValidateCertAuthority(ca); err != nil {
+			return nil, trace.Wrap(err)
+		}
+
+		item, err := caToItem(backend.Key{}, ca)
+		if err != nil {
+			return nil, trace.Wrap(err)
+		}
+
+		if active {
+			if currentlyActive {
+				// we are updating an active CA without changing its active status. we want to perform
+				// a conditional update on the acitve CA key and an unconditonal delete on the inactive
+				// CA key in order to correctly model active range priority.
+				condacts = append(condacts, []backend.ConditionalAction{
+					{
+						Key:       activeCAKey(ca.GetID()),
+						Condition: backend.Revision(item.Revision),
+						Action:    backend.Put(item),
+					},
+					{
+						Key:       inactiveCAKey(ca.GetID()),
+						Condition: backend.Whatever(),
+						Action:    backend.Delete(),
+					},
+				}...)
+			} else {
+				// we are updating a currently inactive CA to the active state. we want to perform
+				// a create on the active CA key and a revision-conditional delete on the inactive CA key
+				// to affect a "move-and-update" that respects the active range priority.
+				condacts = append(condacts, []backend.ConditionalAction{
+					{
+						Key:       activeCAKey(ca.GetID()),
+						Condition: backend.NotExists(),
+						Action:    backend.Put(item),
+					},
+					{
+						Key:       inactiveCAKey(ca.GetID()),
+						Condition: backend.Revision(item.Revision),
+						Action:    backend.Delete(),
+					},
+				}...)
+			}
+		} else {
+			if currentlyActive {
+				// we are updating an active CA to the inactive state. we want to perform a conditional
+				// delete on the active CA key and an unconditional put on the inactive CA key to
+				// affect a "move-and-update" that respects the active range priority.
+				condacts = append(condacts, []backend.ConditionalAction{
+					{
+						Key:       activeCAKey(ca.GetID()),
+						Condition: backend.Revision(item.Revision),
+						Action:    backend.Delete(),
+					},
+					{
+						Key:       inactiveCAKey(ca.GetID()),
+						Condition: backend.Whatever(),
+						Action:    backend.Put(item),
+					},
+				}...)
+
+			} else {
+				// we are updating an inactive CA without changing its active status. we want to perform
+				// a conditional update on the inactive CA key and assert the non-existence of the active
+				// CA key.
+				condacts = append(condacts, []backend.ConditionalAction{
+					{
+						Key:       inactiveCAKey(ca.GetID()),
+						Condition: backend.Revision(item.Revision),
+						Action:    backend.Put(item),
+					},
+					{
+						Key:       activeCAKey(ca.GetID()),
+						Condition: backend.NotExists(),
+						Action:    backend.Nop(),
+					},
+				}...)
+			}
 		}
-		return "", trace.Wrap(err)
 	}
 
-	return rev, nil
+	return condacts, nil
 }
 
 // UpsertCertAuthority updates or inserts a new certificate authority
@@ -198,10 +317,15 @@ func (s *CA) DeleteCertAuthority(ctx context.Context, id types.CertAuthID) error
 
 // DeleteCertAuthorities deletes multiple cert authorities atomically.
 func (s *CA) DeleteCertAuthorities(ctx context.Context, ids ...types.CertAuthID) error {
+	_, err := s.AtomicWrite(ctx, s.deleteCertAuthoritiesCondActs(ids))
+	return trace.Wrap(err)
+}
+
+func (s *CA) deleteCertAuthoritiesCondActs(ids []types.CertAuthID) []backend.ConditionalAction {
 	var condacts []backend.ConditionalAction
 	for _, id := range ids {
 		if err := id.Check(); err != nil {
-			return trace.Wrap(err)
+			continue
 		}
 		for _, key := range []backend.Key{activeCAKey(id), inactiveCAKey(id)} {
 			condacts = append(condacts, backend.ConditionalAction{
@@ -211,9 +335,7 @@ func (s *CA) DeleteCertAuthorities(ctx context.Context, ids ...types.CertAuthID)
 			})
 		}
 	}
-
-	_, err := s.AtomicWrite(ctx, condacts)
-	return trace.Wrap(err)
+	return condacts
 }
 
 // ActivateCertAuthority moves a CertAuthority from the deactivated list to
@@ -325,10 +447,26 @@ func (s *CA) DeactivateCertAuthorities(ctx context.Context, ids ...types.CertAut
 // GetCertAuthority returns certificate authority by given id. Parameter loadSigningKeys
 // controls if signing keys are loaded
 func (s *CA) GetCertAuthority(ctx context.Context, id types.CertAuthID, loadSigningKeys bool) (types.CertAuthority, error) {
+	return s.getCertAuthority(ctx, id, loadSigningKeys, true /* active */)
+}
+
+// GetInactiveCertAuthority returns inactive certificate authority by given id. Parameter loadSigningKeys
+// controls if signing keys are loaded.
+func (s *CA) GetInactiveCertAuthority(ctx context.Context, id types.CertAuthID, loadSigningKeys bool) (types.CertAuthority, error) {
+	return s.getCertAuthority(ctx, id, loadSigningKeys, false /* inactive */)
+}
+
+func (s *CA) getCertAuthority(ctx context.Context, id types.CertAuthID, loadSigningKeys bool, active bool) (types.CertAuthority, error) {
 	if err := id.Check(); err != nil {
 		return nil, trace.Wrap(err)
 	}
-	item, err := s.Get(ctx, activeCAKey(id))
+
+	key := activeCAKey(id)
+	if !active {
+		key = inactiveCAKey(id)
+	}
+
+	item, err := s.Get(ctx, key)
 	if err != nil {
 		return nil, trace.Wrap(err)
 	}
@@ -425,25 +563,135 @@ func (s *CA) UpdateUserCARoleMap(ctx context.Context, name string, roleMap types
 	return nil
 }
 
+// CreateTrustedCluster atomically creates a new trusted cluster along with associated resources.
+func (s *CA) CreateTrustedCluster(ctx context.Context, tc types.TrustedCluster, cas []types.CertAuthority) (revision string, err error) {
+	if err := services.ValidateTrustedCluster(tc); err != nil {
+		return "", trace.Wrap(err)
+	}
+
+	item, err := trustedClusterToItem(tc)
+	if err != nil {
+		return "", trace.Wrap(err)
+	}
+
+	condacts := []backend.ConditionalAction{
+		{
+			Key:       item.Key,
+			Condition: backend.NotExists(),
+			Action:    backend.Put(item),
+		},
+		// also assert that no remote cluster exists by this name, as
+		// we currently do not allow for a trusted cluster and remote
+		// cluster to share a name (CAs end up stored at the same location).
+		{
+			Key:       remoteClusterKey(tc.GetName()),
+			Condition: backend.NotExists(),
+			Action:    backend.Nop(),
+		},
+	}
+
+	// perform some initial trusted-cluster related validation. common ca validation is handled later
+	// on by the createCertAuthoritiesCondActs helper.
+	for _, ca := range cas {
+		if tc.GetName() != ca.GetClusterName() {
+			return "", trace.BadParameter("trusted cluster name %q does not match CA cluster name %q", tc.GetName(), ca.GetClusterName())
+		}
+	}
+
+	ccas, err := createCertAuthoritiesCondActs(cas, tc.GetEnabled())
+	if err != nil {
+		return "", trace.Wrap(err)
+	}
+
+	condacts = append(condacts, ccas...)
+
+	rev, err := s.AtomicWrite(ctx, condacts)
+	if err != nil {
+		if errors.Is(err, backend.ErrConditionFailed) {
+			if _, err := s.GetRemoteCluster(ctx, tc.GetName()); err == nil {
+				return "", trace.BadParameter("cannot create trusted cluster with same name as remote cluster %q, bidirectional trust is not supported", tc.GetName())
+			}
+
+			return "", trace.AlreadyExists("trusted cluster %q and/or one or more of its cert authorities already exists", tc.GetName())
+		}
+		return "", trace.Wrap(err)
+	}
+
+	return rev, nil
+}
+
+// UpdateTrustedCluster atomically updates a trusted cluster along with associated resources.
+func (s *CA) UpdateTrustedCluster(ctx context.Context, tc types.TrustedCluster, cas []types.CertAuthority) (revision string, err error) {
+	if err := services.ValidateTrustedCluster(tc); err != nil {
+		return "", trace.Wrap(err)
+	}
+
+	// fetch the current state. we'll need this later on to correctly construct our CA condacts, and
+	// it doesn't hurt to reject mismatched revisions early.
+	extant, err := s.GetTrustedCluster(ctx, tc.GetName())
+	if err != nil {
+		return "", trace.Wrap(err)
+	}
+
+	if tc.GetRevision() != extant.GetRevision() {
+		return "", trace.CompareFailed("trusted cluster %q has been modified, please retry", tc.GetName())
+	}
+
+	item, err := trustedClusterToItem(tc)
+	if err != nil {
+		return "", trace.Wrap(err)
+	}
+
+	condacts := []backend.ConditionalAction{
+		{
+			Key:       item.Key,
+			Condition: backend.Revision(item.Revision),
+			Action:    backend.Put(item),
+		},
+	}
+
+	// perform some initial trusted-cluster related validation. common ca validation is handled later
+	// on by the createCertAuthoritiesCondActs helper.
+	for _, ca := range cas {
+		if tc.GetName() != ca.GetClusterName() {
+			return "", trace.BadParameter("trusted cluster name %q does not match CA cluster name %q", tc.GetName(), ca.GetClusterName())
+		}
+	}
+
+	ccas, err := updateCertAuthoritiesCondActs(cas, tc.GetEnabled(), extant.GetEnabled())
+	if err != nil {
+		return "", trace.Wrap(err)
+	}
+
+	condacts = append(condacts, ccas...)
+
+	rev, err := s.AtomicWrite(ctx, condacts)
+	if err != nil {
+		if errors.Is(err, backend.ErrConditionFailed) {
+			return "", trace.CompareFailed("trusted cluster %q and/or one or more of its cert authorities have been modified, please retry", tc.GetName())
+		}
+		return "", trace.Wrap(err)
+	}
+
+	return rev, nil
+}
+
 // UpsertTrustedCluster creates or updates a TrustedCluster in the backend.
 func (s *CA) UpsertTrustedCluster(ctx context.Context, trustedCluster types.TrustedCluster) (types.TrustedCluster, error) {
 	if err := services.ValidateTrustedCluster(trustedCluster); err != nil {
 		return nil, trace.Wrap(err)
 	}
-	rev := trustedCluster.GetRevision()
-	value, err := services.MarshalTrustedCluster(trustedCluster)
+
+	item, err := trustedClusterToItem(trustedCluster)
 	if err != nil {
 		return nil, trace.Wrap(err)
 	}
-	_, err = s.Put(ctx, backend.Item{
-		Key:      backend.NewKey(trustedClustersPrefix, trustedCluster.GetName()),
-		Value:    value,
-		Expires:  trustedCluster.Expiry(),
-		Revision: rev,
-	})
+
+	_, err = s.Put(ctx, item)
 	if err != nil {
 		return nil, trace.Wrap(err)
 	}
+
 	return trustedCluster, nil
 }
 
@@ -482,16 +730,44 @@ func (s *CA) GetTrustedClusters(ctx context.Context) ([]types.TrustedCluster, er
 
 // DeleteTrustedCluster removes a TrustedCluster from the backend by name.
 func (s *CA) DeleteTrustedCluster(ctx context.Context, name string) error {
+	return s.DeleteTrustedClusterInternal(ctx, name, nil /* no cert authorities */)
+}
+
+// DeleteTrustedClusterInternal removes a trusted cluster and associated resources atomically.
+func (s *CA) DeleteTrustedClusterInternal(ctx context.Context, name string, caIDs []types.CertAuthID) error {
 	if name == "" {
 		return trace.BadParameter("missing trusted cluster name")
 	}
-	err := s.Delete(ctx, backend.NewKey(trustedClustersPrefix, name))
-	if err != nil {
-		if trace.IsNotFound(err) {
+
+	for _, id := range caIDs {
+		if err := id.Check(); err != nil {
+			return trace.Wrap(err)
+		}
+
+		if id.DomainName != name {
+			return trace.BadParameter("ca %q does not belong to trusted cluster %q", id.DomainName, name)
+		}
+	}
+
+	condacts := []backend.ConditionalAction{
+		{
+			Key:       backend.NewKey(trustedClustersPrefix, name),
+			Condition: backend.Exists(),
+			Action:    backend.Delete(),
+		},
+	}
+
+	condacts = append(condacts, s.deleteCertAuthoritiesCondActs(caIDs)...)
+
+	if _, err := s.AtomicWrite(ctx, condacts); err != nil {
+		if errors.Is(err, backend.ErrConditionFailed) {
 			return trace.NotFound("trusted cluster %q is not found", name)
 		}
+
+		return trace.Wrap(err)
 	}
-	return trace.Wrap(err)
+
+	return nil
 }
 
 // UpsertTunnelConnection updates or creates tunnel connection
@@ -608,25 +884,71 @@ func (s *CA) DeleteAllTunnelConnections() error {
 	return trace.Wrap(err)
 }
 
-// CreateRemoteCluster creates remote cluster
-func (s *CA) CreateRemoteCluster(
-	ctx context.Context, rc types.RemoteCluster,
-) (types.RemoteCluster, error) {
-	value, err := json.Marshal(rc)
+// CreateRemoteCluster creates a remote cluster
+func (s *CA) CreateRemoteCluster(ctx context.Context, rc types.RemoteCluster) (types.RemoteCluster, error) {
+	rev, err := s.CreateRemoteClusterInternal(ctx, rc, nil)
 	if err != nil {
 		return nil, trace.Wrap(err)
 	}
-	item := backend.Item{
-		Key:     backend.NewKey(remoteClustersPrefix, rc.GetName()),
-		Value:   value,
-		Expires: rc.Expiry(),
+
+	rc.SetRevision(rev)
+	return rc, nil
+}
+
+// CreateRemoteCluster atomically creates a new remote cluster along with associated resources.
+func (s *CA) CreateRemoteClusterInternal(ctx context.Context, rc types.RemoteCluster, cas []types.CertAuthority) (revision string, err error) {
+	if err := services.CheckAndSetDefaults(rc); err != nil {
+		return "", trace.Wrap(err)
 	}
-	lease, err := s.Create(ctx, item)
+
+	item, err := remoteClusterToItem(rc)
 	if err != nil {
-		return nil, trace.Wrap(err)
+		return "", trace.Wrap(err)
 	}
-	rc.SetRevision(lease.Revision)
-	return rc, nil
+
+	condacts := []backend.ConditionalAction{
+		{
+			Key:       item.Key,
+			Condition: backend.NotExists(),
+			Action:    backend.Put(item),
+		},
+		// also assert that no trusted cluster exists by this name, as
+		// we currently do not allow for a trusted cluster and remote
+		// cluster to share a name (CAs end up stored at the same location).
+		{
+			Key:       trustedClusterKey(rc.GetName()),
+			Condition: backend.NotExists(),
+			Action:    backend.Nop(),
+		},
+	}
+
+	// perform some initial remote-cluster related validation. common ca validation is handled later
+	// on by the createCertAuthoritiesCondActs helper.
+	for _, ca := range cas {
+		if rc.GetName() != ca.GetClusterName() {
+			return "", trace.BadParameter("remote cluster name %q does not match CA cluster name %q", rc.GetName(), ca.GetClusterName())
+		}
+	}
+
+	ccas, err := createCertAuthoritiesCondActs(cas, true /* remote cluster cas always considered active */)
+	if err != nil {
+		return "", trace.Wrap(err)
+	}
+
+	condacts = append(condacts, ccas...)
+
+	rev, err := s.AtomicWrite(ctx, condacts)
+	if err != nil {
+		if errors.Is(err, backend.ErrConditionFailed) {
+			if _, err := s.GetTrustedCluster(ctx, rc.GetName()); err == nil {
+				return "", trace.BadParameter("cannot create remote cluster with same name as trusted cluster %q, bidirectional trust is not supported", rc.GetName())
+			}
+			return "", trace.AlreadyExists("remote cluster %q and/or one or more of its cert authorities already exists", rc.GetName())
+		}
+		return "", trace.Wrap(err)
+	}
+
+	return rev, nil
 }
 
 // UpdateRemoteCluster updates selected remote cluster fields: expiry and labels
@@ -652,17 +974,12 @@ func (s *CA) UpdateRemoteCluster(ctx context.Context, rc types.RemoteCluster) (t
 		existing.SetConnectionStatus(rc.GetConnectionStatus())
 		existing.SetMetadata(rc.GetMetadata())
 
-		updateValue, err := services.MarshalRemoteCluster(existing)
+		item, err := remoteClusterToItem(existing)
 		if err != nil {
 			return nil, trace.Wrap(err)
 		}
 
-		lease, err := s.ConditionalUpdate(ctx, backend.Item{
-			Key:      backend.NewKey(remoteClustersPrefix, existing.GetName()),
-			Value:    updateValue,
-			Expires:  existing.Expiry(),
-			Revision: existing.GetRevision(),
-		})
+		lease, err := s.ConditionalUpdate(ctx, item)
 		if err != nil {
 			if trace.IsCompareFailed(err) {
 				// Retry!
@@ -707,17 +1024,12 @@ func (s *CA) PatchRemoteCluster(
 			return nil, trace.BadParameter("metadata.revision: cannot be patched")
 		}
 
-		updatedValue, err := services.MarshalRemoteCluster(updated)
+		item, err := remoteClusterToItem(updated)
 		if err != nil {
 			return nil, trace.Wrap(err)
 		}
 
-		lease, err := s.ConditionalUpdate(ctx, backend.Item{
-			Key:      backend.NewKey(remoteClustersPrefix, name),
-			Value:    updatedValue,
-			Expires:  updated.Expiry(),
-			Revision: updated.GetRevision(),
-		})
+		lease, err := s.ConditionalUpdate(ctx, item)
 		if err != nil {
 			if trace.IsCompareFailed(err) {
 				// Retry!
@@ -822,13 +1134,44 @@ func (s *CA) GetRemoteCluster(
 }
 
 // DeleteRemoteCluster deletes remote cluster by name
-func (s *CA) DeleteRemoteCluster(
-	ctx context.Context, clusterName string,
-) error {
-	if clusterName == "" {
+func (s *CA) DeleteRemoteCluster(ctx context.Context, clusterName string) error {
+	return s.DeleteRemoteClusterInternal(ctx, clusterName, nil /* no cert authorities */)
+}
+
+// DeleteRemoteClusterInternal atomically deletes a remote cluster along with associated resources.
+func (s *CA) DeleteRemoteClusterInternal(ctx context.Context, name string, ids []types.CertAuthID) error {
+	if name == "" {
 		return trace.BadParameter("missing parameter cluster name")
 	}
-	return s.Delete(ctx, backend.NewKey(remoteClustersPrefix, clusterName))
+
+	for _, id := range ids {
+		if err := id.Check(); err != nil {
+			return trace.Wrap(err)
+		}
+
+		if id.DomainName != name {
+			return trace.BadParameter("ca %q does not belong to remote cluster %q", id.DomainName, name)
+		}
+	}
+
+	condacts := []backend.ConditionalAction{
+		{
+			Key:       remoteClusterKey(name),
+			Condition: backend.Exists(),
+			Action:    backend.Delete(),
+		},
+	}
+
+	condacts = append(condacts, s.deleteCertAuthoritiesCondActs(ids)...)
+
+	if _, err := s.AtomicWrite(ctx, condacts); err != nil {
+		if errors.Is(err, backend.ErrConditionFailed) {
+			return trace.NotFound("remote cluster %q is not found", name)
+		}
+		return trace.Wrap(err)
+	}
+
+	return nil
 }
 
 // DeleteAllRemoteClusters deletes all remote clusters
@@ -853,6 +1196,42 @@ func caToItem(key backend.Key, ca types.CertAuthority) (backend.Item, error) {
 	}, nil
 }
 
+func trustedClusterToItem(tc types.TrustedCluster) (backend.Item, error) {
+	value, err := services.MarshalTrustedCluster(tc)
+	if err != nil {
+		return backend.Item{}, trace.Wrap(err)
+	}
+
+	return backend.Item{
+		Key:      trustedClusterKey(tc.GetName()),
+		Value:    value,
+		Expires:  tc.Expiry(),
+		Revision: tc.GetRevision(),
+	}, nil
+}
+
+func trustedClusterKey(name string) backend.Key {
+	return backend.NewKey(trustedClustersPrefix, name)
+}
+
+func remoteClusterToItem(rc types.RemoteCluster) (backend.Item, error) {
+	value, err := services.MarshalRemoteCluster(rc)
+	if err != nil {
+		return backend.Item{}, trace.Wrap(err)
+	}
+
+	return backend.Item{
+		Key:      remoteClusterKey(rc.GetName()),
+		Value:    value,
+		Expires:  rc.Expiry(),
+		Revision: rc.GetRevision(),
+	}, nil
+}
+
+func remoteClusterKey(name string) backend.Key {
+	return backend.NewKey(remoteClustersPrefix, name)
+}
+
 // activeCAKey builds the active key variant for the supplied ca id.
 func activeCAKey(id types.CertAuthID) backend.Key {
 	return backend.NewKey(authoritiesPrefix, string(id.Type), id.DomainName)
diff --git a/lib/services/local/trust_test.go b/lib/services/local/trust_test.go
index 34a85171d4887..3188c546e6c16 100644
--- a/lib/services/local/trust_test.go
+++ b/lib/services/local/trust_test.go
@@ -20,6 +20,7 @@ package local
 
 import (
 	"context"
+	"crypto/x509/pkix"
 	"fmt"
 	"testing"
 	"time"
@@ -32,11 +33,205 @@ import (
 
 	"github.com/gravitational/teleport"
 	"github.com/gravitational/teleport/api/types"
+	"github.com/gravitational/teleport/lib/auth/testauthority"
 	"github.com/gravitational/teleport/lib/backend"
 	"github.com/gravitational/teleport/lib/backend/lite"
 	"github.com/gravitational/teleport/lib/backend/memory"
+	"github.com/gravitational/teleport/lib/tlsca"
 )
 
+func TestUpdateCertAuthorityCondActs(t *testing.T) {
+	t.Parallel()
+	ctx, cancel := context.WithCancel(context.Background())
+	defer cancel()
+
+	// setup closure creates our initial state and returns its components
+	setup := func(active bool) (types.TrustedCluster, types.CertAuthority, *CA) {
+		bk, err := memory.New(memory.Config{})
+		require.NoError(t, err)
+		t.Cleanup(func() { require.NoError(t, bk.Close()) })
+		service := NewCAService(bk)
+
+		tc, err := types.NewTrustedCluster("tc", types.TrustedClusterSpecV2{
+			Enabled:              active,
+			Roles:                []string{"rrr"},
+			Token:                "xxx",
+			ProxyAddress:         "xxx",
+			ReverseTunnelAddress: "xxx",
+		})
+		require.NoError(t, err)
+
+		ca := newCertAuthority(t, types.HostCA, "tc")
+		revision, err := service.CreateTrustedCluster(ctx, tc, []types.CertAuthority{ca})
+		require.NoError(t, err)
+		tc.SetRevision(revision)
+		ca.SetRevision(revision)
+		return tc, ca, service
+	}
+
+	// putCA is a helper for injecting a CA into the backend, bypassing atomic condition protections
+	putCA := func(ctx context.Context, service *CA, ca types.CertAuthority, active bool) {
+		key := activeCAKey(ca.GetID())
+		if !active {
+			key = inactiveCAKey(ca.GetID())
+		}
+		item, err := caToItem(key, ca)
+		require.NoError(t, err)
+		_, err = service.Put(ctx, item)
+		require.NoError(t, err)
+	}
+
+	// delCA is a helper for deleting a CA from the backend, bypassing atomic condition protections
+	delCA := func(ctx context.Context, service *CA, ca types.CertAuthority, active bool) {
+		key := activeCAKey(ca.GetID())
+		if !active {
+			key = inactiveCAKey(ca.GetID())
+		}
+		require.NoError(t, service.Delete(ctx, key))
+	}
+
+	// -- update active in place ---
+	tc, ca, service := setup(true /* active */)
+
+	// verify basic update works
+	tc.SetRoles([]string{"rrr", "zzz"})
+	revision, err := service.UpdateTrustedCluster(ctx, tc, []types.CertAuthority{ca})
+	require.NoError(t, err)
+	tc.SetRevision(revision)
+	ca.SetRevision(revision)
+
+	gotTC, err := service.GetTrustedCluster(ctx, tc.GetName())
+	require.NoError(t, err)
+	require.Empty(t, cmp.Diff(tc, gotTC, cmpopts.IgnoreFields(types.Metadata{}, "Revision")))
+	_, err = service.GetCertAuthority(ctx, ca.GetID(), true)
+	require.NoError(t, err)
+	_, err = service.GetInactiveCertAuthority(ctx, ca.GetID(), true)
+	require.True(t, trace.IsNotFound(err), "err=%v", err)
+
+	// verify that an inactive CA doesn't prevent update
+	putCA(ctx, service, ca, false /* inactive */)
+	tc.SetRoles([]string{"rrr", "zzz", "aaa"})
+	revision, err = service.UpdateTrustedCluster(ctx, tc, []types.CertAuthority{ca})
+	require.NoError(t, err)
+	tc.SetRevision(revision)
+	ca.SetRevision(revision)
+
+	gotTC, err = service.GetTrustedCluster(ctx, tc.GetName())
+	require.NoError(t, err)
+	require.Empty(t, cmp.Diff(tc, gotTC, cmpopts.IgnoreFields(types.Metadata{}, "Revision")))
+	_, err = service.GetCertAuthority(ctx, ca.GetID(), true)
+	require.NoError(t, err)
+	_, err = service.GetInactiveCertAuthority(ctx, ca.GetID(), true)
+	require.True(t, trace.IsNotFound(err), "err=%v", err)
+
+	// verify that concurrent update of the active CA causes update to fail
+	putCA(ctx, service, ca, true /* active */)
+	tc.SetRoles([]string{"rrr", "zzz", "aaa", "bbb"})
+	_, err = service.UpdateTrustedCluster(ctx, tc, []types.CertAuthority{ca})
+	require.True(t, trace.IsCompareFailed(err), "err=%v", err)
+
+	// --- update inactive in place ---
+	tc, ca, service = setup(false /* inactive */)
+
+	// verify basic update works
+	tc.SetRoles([]string{"rrr", "zzz"})
+	revision, err = service.UpdateTrustedCluster(ctx, tc, []types.CertAuthority{ca})
+	require.NoError(t, err)
+	tc.SetRevision(revision)
+	ca.SetRevision(revision)
+
+	gotTC, err = service.GetTrustedCluster(ctx, tc.GetName())
+	require.NoError(t, err)
+	require.Empty(t, cmp.Diff(tc, gotTC, cmpopts.IgnoreFields(types.Metadata{}, "Revision")))
+	_, err = service.GetCertAuthority(ctx, ca.GetID(), true)
+	require.True(t, trace.IsNotFound(err), "err=%v", err)
+	_, err = service.GetInactiveCertAuthority(ctx, ca.GetID(), true)
+	require.NoError(t, err)
+
+	// verify that an active CA prevents update
+	putCA(ctx, service, ca, true /* active */)
+	tc.SetRoles([]string{"rrr", "zzz", "aaa"})
+	_, err = service.UpdateTrustedCluster(ctx, tc, []types.CertAuthority{ca})
+	require.True(t, trace.IsCompareFailed(err), "err=%v", err)
+	delCA(ctx, service, ca, true /* active */)
+
+	// verify that concurrent update of the inactive CA causes update to fail
+	putCA(ctx, service, ca, false /* inactive */)
+	tc.SetRoles([]string{"rrr", "zzz", "aaa", "bbb"})
+	_, err = service.UpdateTrustedCluster(ctx, tc, []types.CertAuthority{ca})
+	require.True(t, trace.IsCompareFailed(err), "err=%v", err)
+
+	// --- activate/deactivate ---
+	tc, ca, service = setup(false /* inactive */)
+
+	// verify that activating works
+	tc.SetEnabled(true)
+	revision, err = service.UpdateTrustedCluster(ctx, tc, []types.CertAuthority{ca})
+	require.NoError(t, err)
+	tc.SetRevision(revision)
+	ca.SetRevision(revision)
+
+	gotTC, err = service.GetTrustedCluster(ctx, tc.GetName())
+	require.NoError(t, err)
+	require.Empty(t, cmp.Diff(tc, gotTC, cmpopts.IgnoreFields(types.Metadata{}, "Revision")))
+	_, err = service.GetCertAuthority(ctx, ca.GetID(), true)
+	require.NoError(t, err)
+	_, err = service.GetInactiveCertAuthority(ctx, ca.GetID(), true)
+	require.True(t, trace.IsNotFound(err), "err=%v", err)
+
+	// verify that deactivating works
+	tc.SetEnabled(false)
+	revision, err = service.UpdateTrustedCluster(ctx, tc, []types.CertAuthority{ca})
+	require.NoError(t, err)
+	tc.SetRevision(revision)
+	ca.SetRevision(revision)
+
+	gotTC, err = service.GetTrustedCluster(ctx, tc.GetName())
+	require.NoError(t, err)
+	require.Empty(t, cmp.Diff(tc, gotTC, cmpopts.IgnoreFields(types.Metadata{}, "Revision")))
+	_, err = service.GetCertAuthority(ctx, ca.GetID(), true)
+	require.True(t, trace.IsNotFound(err), "err=%v", err)
+	_, err = service.GetInactiveCertAuthority(ctx, ca.GetID(), true)
+	require.NoError(t, err)
+
+	// verify that an active CA conflicts with activation
+	putCA(ctx, service, ca, true /* active */)
+	tc.SetEnabled(true)
+	_, err = service.UpdateTrustedCluster(ctx, tc, []types.CertAuthority{ca})
+	require.True(t, trace.IsCompareFailed(err), "err=%v", err)
+	delCA(ctx, service, ca, true /* active */)
+
+	// activation should work after deleting conlicting CA
+	revision, err = service.UpdateTrustedCluster(ctx, tc, []types.CertAuthority{ca})
+	require.NoError(t, err)
+	tc.SetRevision(revision)
+	ca.SetRevision(revision)
+
+	gotTC, err = service.GetTrustedCluster(ctx, tc.GetName())
+	require.NoError(t, err)
+	require.Empty(t, cmp.Diff(tc, gotTC, cmpopts.IgnoreFields(types.Metadata{}, "Revision")))
+	_, err = service.GetCertAuthority(ctx, ca.GetID(), true)
+	require.NoError(t, err)
+	_, err = service.GetInactiveCertAuthority(ctx, ca.GetID(), true)
+	require.True(t, trace.IsNotFound(err), "err=%v", err)
+
+	// verify that deactivation works even if there is an inaactive CA present
+	putCA(ctx, service, ca, false /* inactive */)
+	tc.SetEnabled(false)
+	revision, err = service.UpdateTrustedCluster(ctx, tc, []types.CertAuthority{ca})
+	require.NoError(t, err)
+	tc.SetRevision(revision)
+	ca.SetRevision(revision)
+
+	gotTC, err = service.GetTrustedCluster(ctx, tc.GetName())
+	require.NoError(t, err)
+	require.Empty(t, cmp.Diff(tc, gotTC, cmpopts.IgnoreFields(types.Metadata{}, "Revision")))
+	_, err = service.GetCertAuthority(ctx, ca.GetID(), true)
+	require.True(t, trace.IsNotFound(err), "err=%v", err)
+	_, err = service.GetInactiveCertAuthority(ctx, ca.GetID(), true)
+	require.NoError(t, err)
+}
+
 func TestRemoteClusterCRUD(t *testing.T) {
 	t.Parallel()
 	ctx := context.Background()
@@ -67,22 +262,38 @@ func TestRemoteClusterCRUD(t *testing.T) {
 	src.SetConnectionStatus(teleport.RemoteClusterStatusOnline)
 	src.SetLastHeartbeat(clock.Now().Add(-time.Hour))
 
-	// create remote clusters
-	gotRC, err := trustService.CreateRemoteCluster(ctx, rc)
+	// set up fake CAs for the remote clusters
+	ca := newCertAuthority(t, types.HostCA, "foo")
+	sca := newCertAuthority(t, types.HostCA, "bar")
+
+	// create remote cluster
+	revision, err := trustService.CreateRemoteClusterInternal(ctx, rc, []types.CertAuthority{ca})
 	require.NoError(t, err)
-	require.Empty(t, cmp.Diff(rc, gotRC, cmpopts.IgnoreFields(types.Metadata{}, "Revision")))
-	gotSRC, err := trustService.CreateRemoteCluster(ctx, src)
+	rc.SetRevision(revision)
+	ca.SetRevision(revision)
+
+	_, err = trustService.CreateRemoteClusterInternal(ctx, rc, []types.CertAuthority{ca})
+	require.True(t, trace.IsAlreadyExists(err), "err=%v", err)
+
+	revision, err = trustService.CreateRemoteClusterInternal(ctx, src, []types.CertAuthority{sca})
 	require.NoError(t, err)
-	require.Empty(t, cmp.Diff(src, gotSRC, cmpopts.IgnoreFields(types.Metadata{}, "Revision")))
+	src.SetRevision(revision)
+	sca.SetRevision(revision)
 
 	// get remote cluster make sure it's correct
-	gotRC, err = trustService.GetRemoteCluster(ctx, "foo")
+	gotRC, err := trustService.GetRemoteCluster(ctx, "foo")
 	require.NoError(t, err)
 	require.Equal(t, "foo", gotRC.GetName())
 	require.Equal(t, teleport.RemoteClusterStatusOffline, gotRC.GetConnectionStatus())
 	require.Equal(t, clock.Now().Nanosecond(), gotRC.GetLastHeartbeat().Nanosecond())
 	require.Equal(t, originalLabels, gotRC.GetMetadata().Labels)
 
+	// get remote cluster CA make sure it's correct
+	gotCA, err := trustService.GetCertAuthority(ctx, ca.GetID(), true)
+	require.NoError(t, err)
+
+	require.Empty(t, cmp.Diff(ca, gotCA, cmpopts.IgnoreFields(types.Metadata{}, "Revision")))
+
 	rc = gotRC
 	updatedLabels := map[string]string{
 		"e": "f",
@@ -99,10 +310,9 @@ func TestRemoteClusterCRUD(t *testing.T) {
 	require.NoError(t, err)
 	require.Empty(t, cmp.Diff(rc, gotRC, cmpopts.IgnoreFields(types.Metadata{}, "Revision")))
 
-	src = gotSRC
 	src.SetConnectionStatus(teleport.RemoteClusterStatusOffline)
 	src.SetLastHeartbeat(clock.Now())
-	gotSRC, err = trustService.UpdateRemoteCluster(ctx, src)
+	gotSRC, err := trustService.UpdateRemoteCluster(ctx, src)
 	require.NoError(t, err)
 	require.Empty(t, cmp.Diff(src, gotSRC, cmpopts.IgnoreFields(types.Metadata{}, "Revision")))
 
@@ -126,13 +336,26 @@ func TestRemoteClusterCRUD(t *testing.T) {
 	require.Len(t, allRC, 2)
 
 	// delete cluster
-	err = trustService.DeleteRemoteCluster(ctx, "foo")
+	err = trustService.DeleteRemoteClusterInternal(ctx, "foo", []types.CertAuthID{ca.GetID()})
 	require.NoError(t, err)
 
 	// make sure it's really gone
-	err = trustService.DeleteRemoteCluster(ctx, "foo")
-	require.Error(t, err)
-	require.ErrorIs(t, err, trace.NotFound(`key "/remoteClusters/foo" is not found`))
+	_, err = trustService.GetRemoteCluster(ctx, "foo")
+	require.True(t, trace.IsNotFound(err))
+	_, err = trustService.GetCertAuthority(ctx, ca.GetID(), true)
+	require.True(t, trace.IsNotFound(err))
+
+	// make sure we can't create trusted clusters with the same name as an extant remote cluster
+	tc, err := types.NewTrustedCluster("bar", types.TrustedClusterSpecV2{
+		Enabled:              true,
+		Roles:                []string{"bar", "baz"},
+		Token:                "qux",
+		ProxyAddress:         "quux",
+		ReverseTunnelAddress: "quuz",
+	})
+	require.NoError(t, err)
+	_, err = trustService.CreateTrustedCluster(ctx, tc, nil)
+	require.True(t, trace.IsBadParameter(err), "err=%v", err)
 }
 
 func TestPresenceService_PatchRemoteCluster(t *testing.T) {
@@ -290,10 +513,13 @@ func TestTrustedClusterCRUD(t *testing.T) {
 	})
 	require.NoError(t, err)
 
+	ca := newCertAuthority(t, types.HostCA, "foo")
+	sca := newCertAuthority(t, types.HostCA, "bar")
+
 	// create trusted clusters
-	_, err = trustService.UpsertTrustedCluster(ctx, tc)
+	_, err = trustService.CreateTrustedCluster(ctx, tc, []types.CertAuthority{ca})
 	require.NoError(t, err)
-	_, err = trustService.UpsertTrustedCluster(ctx, stc)
+	_, err = trustService.CreateTrustedCluster(ctx, stc, []types.CertAuthority{sca})
 	require.NoError(t, err)
 
 	// get trusted cluster make sure it's correct
@@ -306,17 +532,87 @@ func TestTrustedClusterCRUD(t *testing.T) {
 	require.Equal(t, "quux", gotTC.GetProxyAddress())
 	require.Equal(t, "quuz", gotTC.GetReverseTunnelAddress())
 
+	// get trusted cluster CA make sure it's correct
+	gotCA, err := trustService.GetCertAuthority(ctx, ca.GetID(), true)
+	require.NoError(t, err)
+	require.Empty(t, cmp.Diff(ca, gotCA, cmpopts.IgnoreFields(types.Metadata{}, "Revision")))
+
 	// get all clusters
 	allTC, err := trustService.GetTrustedClusters(ctx)
 	require.NoError(t, err)
 	require.Len(t, allTC, 2)
 
+	// verify that enabling/disabling correctly shows/hides CAs
+	tc.SetEnabled(false)
+	tc.SetRevision(gotTC.GetRevision())
+	ca.SetRevision(gotCA.GetRevision())
+	revision, err := trustService.UpdateTrustedCluster(ctx, tc, []types.CertAuthority{ca})
+	require.NoError(t, err)
+	_, err = trustService.GetCertAuthority(ctx, ca.GetID(), true)
+	require.True(t, trace.IsNotFound(err), "err=%v", err)
+
+	_, err = trustService.GetInactiveCertAuthority(ctx, ca.GetID(), true)
+	require.NoError(t, err)
+
+	tc.SetEnabled(true)
+	tc.SetRevision(revision)
+	ca.SetRevision(revision)
+	_, err = trustService.UpdateTrustedCluster(ctx, tc, []types.CertAuthority{ca})
+	require.NoError(t, err)
+
+	_, err = trustService.GetCertAuthority(ctx, ca.GetID(), true)
+	require.NoError(t, err)
+	_, err = trustService.GetInactiveCertAuthority(ctx, ca.GetID(), true)
+	require.True(t, trace.IsNotFound(err), "err=%v", err)
+
 	// delete cluster
-	err = trustService.DeleteTrustedCluster(ctx, "foo")
+	err = trustService.DeleteTrustedClusterInternal(ctx, "foo", []types.CertAuthID{ca.GetID()})
 	require.NoError(t, err)
 
 	// make sure it's really gone
 	_, err = trustService.GetTrustedCluster(ctx, "foo")
-	require.Error(t, err)
-	require.ErrorIs(t, err, trace.NotFound(`key "/trustedclusters/foo" is not found`))
+	require.True(t, trace.IsNotFound(err), "err=%v", err)
+	_, err = trustService.GetCertAuthority(ctx, ca.GetID(), true)
+	require.True(t, trace.IsNotFound(err), "err=%v", err)
+
+	// make sure we can't create remote clusters with the same name as an extant trusted cluster
+	rc, err := types.NewRemoteCluster("bar")
+	require.NoError(t, err)
+	_, err = trustService.CreateRemoteCluster(ctx, rc)
+	require.True(t, trace.IsBadParameter(err), "err=%v", err)
+}
+
+func newCertAuthority(t *testing.T, caType types.CertAuthType, domain string) types.CertAuthority {
+	t.Helper()
+
+	ta := testauthority.New()
+	priv, pub, err := ta.GenerateKeyPair()
+	require.NoError(t, err)
+
+	key, cert, err := tlsca.GenerateSelfSignedCA(pkix.Name{CommonName: domain}, nil, time.Hour)
+	require.NoError(t, err)
+
+	ca, err := types.NewCertAuthority(types.CertAuthoritySpecV2{
+		Type:        caType,
+		ClusterName: domain,
+		ActiveKeys: types.CAKeySet{
+			SSH: []*types.SSHKeyPair{{
+				PrivateKey:     priv,
+				PrivateKeyType: types.PrivateKeyType_RAW,
+				PublicKey:      pub,
+			}},
+			TLS: []*types.TLSKeyPair{{
+				Cert: cert,
+				Key:  key,
+			}},
+			JWT: []*types.JWTKeyPair{{
+				PublicKey:      pub,
+				PrivateKey:     priv,
+				PrivateKeyType: types.PrivateKeyType_RAW,
+			}},
+		},
+	})
+	require.NoError(t, err)
+
+	return ca
 }
diff --git a/lib/services/trust.go b/lib/services/trust.go
index c7cbfe0229bce..63775ae5b52bb 100644
--- a/lib/services/trust.go
+++ b/lib/services/trust.go
@@ -83,6 +83,26 @@ type Trust interface {
 // auth server for some local operations.
 type TrustInternal interface {
 	Trust
+
+	// CreateTrustedCluster atomically creates a new trusted cluster along with associated resources.
+	CreateTrustedCluster(context.Context, types.TrustedCluster, []types.CertAuthority) (revision string, err error)
+
+	// UpdateTrustedCluster atomically updates a trusted cluster along with associated resources.
+	UpdateTrustedCluster(context.Context, types.TrustedCluster, []types.CertAuthority) (revision string, err error)
+
+	// DeleteTrustedClusterInternal atomically deletes a trusted cluster along with associated resources.
+	DeleteTrustedClusterInternal(context.Context, string, []types.CertAuthID) error
+
+	// CreateRemoteCluster atomically creates a new remote cluster along with associated resources.
+	CreateRemoteClusterInternal(context.Context, types.RemoteCluster, []types.CertAuthority) (revision string, err error)
+
+	// DeleteRemotClusterInternal atomically deletes a remote cluster along with associated resources.
+	DeleteRemoteClusterInternal(context.Context, string, []types.CertAuthID) error
+
+	// GetInactiveCertAuthority returns inactive certificate authority by given id. Parameter loadSigningKeys
+	// controls if signing keys are loaded.
+	GetInactiveCertAuthority(ctx context.Context, id types.CertAuthID, loadSigningKeys bool) (types.CertAuthority, error)
+
 	// CreateCertAuthorities creates multiple cert authorities atomically.
 	CreateCertAuthorities(context.Context, ...types.CertAuthority) (revision string, err error)
 

From 768a0bd6848b0b1ef0677fc40e4995f128ee41e6 Mon Sep 17 00:00:00 2001
From: Tiago Silva <tiago.silva@goteleport.com>
Date: Tue, 29 Oct 2024 17:51:58 +0000
Subject: [PATCH 09/13] [entraid] add setup script for offline clusters.
 (#47863)

* [entraid] add setup script for offline clusters.

This PR adds a cli configuration for Entra ID where it's possible to default to system credentials instead of relying on OIDC for authentication in EntraID. OIDC is not always a possibility specially when the cluster is private and not internet acessible.

The UX is the following:

```text

Step 1: Run the Setup Script

1. Open **Azure Cloud Shell** (Bash) using **Google Chrome** or **Safari** for the best compatibility.
2. Upload the setup script using the **Upload** button in the Cloud Shell toolbar.
3. Once uploaded, execute the script by running the following command:
   $ bash entraid.sh

**Important Considerations**:
- You must have **Azure privileged administrator permissions** to complete the integration.
- Ensure you're using the **Bash** environment in Cloud Shell.
- During the script execution, you'll be prompted to run 'az login' to authenticate with Azure. **Teleport** does not store or persist your credentials.
- **Mozilla Firefox** users may experience connectivity issues in Azure Cloud Shell; using Chrome or Safari is recommended.

Once the script completes, type 'continue' to proceed, 'exit' to quit: continue

Step 2: Input Tenant ID and Client ID

With the output of Step 1, please copy and paste the following information:
Enter the Tenant ID: 1056b571-0390-4b08-86c8-2edba8d9ae79
Enter the Client ID: 1056b571-0390-4b08-86c8-2edba8d9ae79

Successfully created EntraID plugin "name".
```

Signed-off-by: Tiago Silva <tiago.silva@goteleport.com>

* move function to api

* handle code review comments

* Apply suggestions from code review

Co-authored-by: Marco Dinis <marco.dinis@goteleport.com>

* fix url

* enable group claims

* add godoc

* handle code review comments

* fix gomod

---------

Signed-off-by: Tiago Silva <tiago.silva@goteleport.com>
Co-authored-by: Marco Dinis <marco.dinis@goteleport.com>
---
 api/utils/entraid/federation_metadata.go      |  33 ++
 go.mod                                        |   2 +-
 lib/config/configuration.go                   |   3 +
 lib/integrations/azureoidc/enterprise_app.go  |  10 +-
 lib/integrations/azureoidc/provision_sso.go   |   3 +
 lib/msgraph/models.go                         |  14 +-
 tool/tctl/common/plugin/entraid.go            | 419 ++++++++++++++++++
 tool/tctl/common/plugin/plugins_command.go    |  20 +-
 .../common/plugin/plugins_command_test.go     |  31 ++
 tool/teleport/common/integration_configure.go |   2 +-
 tool/teleport/common/teleport.go              |   1 +
 11 files changed, 526 insertions(+), 12 deletions(-)
 create mode 100644 api/utils/entraid/federation_metadata.go
 create mode 100644 tool/tctl/common/plugin/entraid.go

diff --git a/api/utils/entraid/federation_metadata.go b/api/utils/entraid/federation_metadata.go
new file mode 100644
index 0000000000000..2dfa76080cdeb
--- /dev/null
+++ b/api/utils/entraid/federation_metadata.go
@@ -0,0 +1,33 @@
+/*
+Copyright 2024 Gravitational, Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+	http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+package entraid
+
+import (
+	"net/url"
+	"path"
+)
+
+// FederationMetadataURL returns the URL for the federation metadata endpoint
+func FederationMetadataURL(tenantID, appID string) string {
+	return (&url.URL{
+		Scheme: "https",
+		Host:   "login.microsoftonline.com",
+		Path:   path.Join(tenantID, "federationmetadata", "2007-06", "federationmetadata.xml"),
+		RawQuery: url.Values{
+			"appid": {appID},
+		}.Encode(),
+	}).String()
+}
diff --git a/go.mod b/go.mod
index 0775645a53dd4..b87b5936b42fa 100644
--- a/go.mod
+++ b/go.mod
@@ -88,6 +88,7 @@ require (
 	github.com/elimity-com/scim v0.0.0-20240320110924-172bf2aee9c8
 	github.com/envoyproxy/go-control-plane v0.13.0
 	github.com/evanphx/json-patch v5.9.0+incompatible
+	github.com/fatih/color v1.17.0
 	github.com/fsnotify/fsnotify v1.7.0
 	github.com/fsouza/fake-gcs-server v1.49.3
 	github.com/fxamacker/cbor/v2 v2.7.0
@@ -320,7 +321,6 @@ require (
 	github.com/evanphx/json-patch/v5 v5.9.0 // indirect
 	github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d // indirect
 	github.com/fatih/camelcase v1.0.0 // indirect
-	github.com/fatih/color v1.17.0 // indirect
 	github.com/felixge/httpsnoop v1.0.4 // indirect
 	github.com/gabriel-vasile/mimetype v1.4.3 // indirect
 	github.com/go-asn1-ber/asn1-ber v1.5.5 // indirect
diff --git a/lib/config/configuration.go b/lib/config/configuration.go
index 5ef37af409f81..87c41b8986b6f 100644
--- a/lib/config/configuration.go
+++ b/lib/config/configuration.go
@@ -297,6 +297,9 @@ type IntegrationConfAzureOIDC struct {
 	// When this is true, the integration script will produce
 	// a cache file necessary for TAG synchronization.
 	AccessGraphEnabled bool
+
+	// SkipOIDCConfiguration is a flag indicating that OIDC configuration should be skipped.
+	SkipOIDCConfiguration bool
 }
 
 // IntegrationConfDeployServiceIAM contains the arguments of
diff --git a/lib/integrations/azureoidc/enterprise_app.go b/lib/integrations/azureoidc/enterprise_app.go
index e159470d0bb39..e7de09225ec58 100644
--- a/lib/integrations/azureoidc/enterprise_app.go
+++ b/lib/integrations/azureoidc/enterprise_app.go
@@ -52,7 +52,7 @@ var appRoles = []string{
 //   - Provides Teleport with OIDC authentication to Azure
 //   - Is given the permissions to access certain Microsoft Graph API endpoints for this tenant.
 //   - Provides SSO to the Teleport cluster via SAML.
-func SetupEnterpriseApp(ctx context.Context, proxyPublicAddr string, authConnectorName string) (string, string, error) {
+func SetupEnterpriseApp(ctx context.Context, proxyPublicAddr string, authConnectorName string, skipOIDCSetup bool) (string, string, error) {
 	var appID, tenantID string
 
 	tenantID, err := getTenantID()
@@ -120,8 +120,12 @@ func SetupEnterpriseApp(ctx context.Context, proxyPublicAddr string, authConnect
 		}
 	}
 
-	if err := createFederatedAuthCredential(ctx, graphClient, *app.ID, proxyPublicAddr); err != nil {
-		return appID, tenantID, trace.Wrap(err, "failed to create an OIDC federated auth credential")
+	// Skip OIDC setup if requested.
+	// This is useful for clusters that can't use OIDC because they are not reachable from the public internet.
+	if !skipOIDCSetup {
+		if err := createFederatedAuthCredential(ctx, graphClient, *app.ID, proxyPublicAddr); err != nil {
+			return appID, tenantID, trace.Wrap(err, "failed to create an OIDC federated auth credential")
+		}
 	}
 
 	acsURL, err := url.Parse(proxyPublicAddr)
diff --git a/lib/integrations/azureoidc/provision_sso.go b/lib/integrations/azureoidc/provision_sso.go
index 07d4366040752..9bb17aa5771dd 100644
--- a/lib/integrations/azureoidc/provision_sso.go
+++ b/lib/integrations/azureoidc/provision_sso.go
@@ -48,6 +48,9 @@ func setupSSO(ctx context.Context, graphClient *msgraph.Client, appObjectID stri
 	webApp := &msgraph.WebApplication{}
 	webApp.RedirectURIs = &uris
 	app.Web = webApp
+	securityGroups := new(string)
+	*securityGroups = "SecurityGroup"
+	app.GroupMembershipClaims = securityGroups
 
 	err = graphClient.UpdateApplication(ctx, appObjectID, app)
 
diff --git a/lib/msgraph/models.go b/lib/msgraph/models.go
index f867ecbb634c5..829d55a040464 100644
--- a/lib/msgraph/models.go
+++ b/lib/msgraph/models.go
@@ -18,6 +18,7 @@ package msgraph
 
 import (
 	"encoding/json"
+	"slices"
 
 	"github.com/gravitational/trace"
 )
@@ -34,6 +35,12 @@ type DirectoryObject struct {
 
 type Group struct {
 	DirectoryObject
+	GroupTypes []string `json:"groupTypes,omitempty"`
+}
+
+func (g *Group) IsOffice365Group() bool {
+	const office365Group = "Unified"
+	return slices.Contains(g.GroupTypes, office365Group)
 }
 
 func (g *Group) isGroupMember() {}
@@ -53,9 +60,10 @@ func (u *User) GetID() *string { return u.ID }
 type Application struct {
 	DirectoryObject
 
-	AppID          *string         `json:"appId,omitempty"`
-	IdentifierURIs *[]string       `json:"identifierUris,omitempty"`
-	Web            *WebApplication `json:"web,omitempty"`
+	AppID                 *string         `json:"appId,omitempty"`
+	IdentifierURIs        *[]string       `json:"identifierUris,omitempty"`
+	Web                   *WebApplication `json:"web,omitempty"`
+	GroupMembershipClaims *string         `json:"groupMembershipClaims,omitempty"`
 }
 
 type WebApplication struct {
diff --git a/tool/tctl/common/plugin/entraid.go b/tool/tctl/common/plugin/entraid.go
new file mode 100644
index 0000000000000..ea5010504ca9f
--- /dev/null
+++ b/tool/tctl/common/plugin/entraid.go
@@ -0,0 +1,419 @@
+/*
+ * Teleport
+ * Copyright (C) 2024  Gravitational, Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+package plugin
+
+import (
+	"bufio"
+	"context"
+	"encoding/json"
+	"errors"
+	"fmt"
+	"io"
+	"os"
+	"path/filepath"
+	"strings"
+
+	"github.com/alecthomas/kingpin/v2"
+	"github.com/fatih/color"
+	"github.com/google/safetext/shsprintf"
+	"github.com/google/uuid"
+	"github.com/gravitational/trace"
+
+	pluginspb "github.com/gravitational/teleport/api/gen/proto/go/teleport/plugins/v1"
+	"github.com/gravitational/teleport/api/types"
+	entraapiutils "github.com/gravitational/teleport/api/utils/entraid"
+	"github.com/gravitational/teleport/lib/integrations/azureoidc"
+	"github.com/gravitational/teleport/lib/utils/oidc"
+	"github.com/gravitational/teleport/lib/web/scripts/oneoff"
+)
+
+var (
+	bold    = color.New(color.Bold).SprintFunc()
+	boldRed = color.New(color.Bold, color.FgRed).SprintFunc()
+
+	step1Template = bold("Step 1: Run the Setup Script") + `
+
+1. Open ` + bold("Azure Cloud Shell") + ` (Bash) [https://portal.azure.com/#cloudshell/] using ` + bold("Google Chrome") + ` or ` + bold("Safari") + ` for the best compatibility.
+2. Upload the setup script in ` + boldRed("%s") + ` using the ` + bold("Upload") + ` button in the Cloud Shell toolbar.
+3. Once uploaded, execute the script by running the following command:
+   $ bash %s
+
+` + bold("Important Considerations") + `:
+- You must have ` + bold("Azure privileged administrator permissions") + ` to complete the integration.
+- Ensure you're using the ` + bold("Bash") + ` environment in Cloud Shell.
+- During the script execution, you'll be prompted to run 'az login' to authenticate with Azure. ` + bold("Teleport") + ` does not store or persist your credentials.
+- ` + bold("Mozilla Firefox") + ` users may experience connectivity issues in Azure Cloud Shell; using Chrome or Safari is recommended.
+
+To rerun the script, type 'exit' to close and then restart the process.
+
+`
+
+	step2Template = `
+	
+` + bold("Step 2: Input Tenant ID and Client ID") + `
+
+With the output of Step 1, please copy and paste the following information:
+`
+)
+
+type entraArgs struct {
+	cmd                  *kingpin.CmdClause
+	authConnectorName    string
+	defaultOwners        []string
+	useSystemCredentials bool
+	accessGraph          bool
+	force                bool
+}
+
+func (p *PluginsCommand) initInstallEntra(parent *kingpin.CmdClause) {
+	p.install.entraID.cmd = parent.Command("entraid", "Install an EntraId integration.")
+	cmd := p.install.entraID.cmd
+	cmd.
+		Flag("name", "Name of the plugin resource to create").
+		Default("entra-id").
+		StringVar(&p.install.name)
+
+	cmd.
+		Flag("auth-connector-name", "Name of the SAML connector resource to create").
+		Default("entra-id-default").
+		StringVar(&p.install.entraID.authConnectorName)
+
+	cmd.
+		Flag("use-system-credentials", "Uses system credentials instead of OIDC.").
+		BoolVar(&p.install.entraID.useSystemCredentials)
+
+	cmd.Flag("default-owner", "List of Teleport users that are default owners for the imported access lists. Multiple flags allowed.").
+		Required().
+		StringsVar(&p.install.entraID.defaultOwners)
+
+	cmd.
+		Flag("access-graph", "Enables Access Graph cache build.").
+		Default("true").
+		BoolVar(&p.install.entraID.accessGraph)
+
+	cmd.
+		Flag("force", "Proceed with installation even if plugin already exists.").
+		Short('f').
+		Default("false").
+		BoolVar(&p.install.entraID.force)
+}
+
+type entraSettings struct {
+	accessGraphCache *azureoidc.TAGInfoCache
+	clientID         string
+	tenantID         string
+}
+
+var (
+	errCancel = trace.BadParameter("operation canceled")
+)
+
+func (p *PluginsCommand) entraSetupGuide(proxyPublicAddr string) (entraSettings, error) {
+	pwd, err := os.Getwd()
+	if err != nil {
+		return entraSettings{}, trace.Wrap(err, "failed to get working dir")
+	}
+	f, err := os.CreateTemp(pwd, "entraid-setup-*.sh")
+	if err != nil {
+		return entraSettings{}, trace.Wrap(err, "failed to create temp file")
+	}
+
+	defer os.Remove(f.Name())
+
+	buildScript, err := buildScript(proxyPublicAddr, p.install.entraID)
+	if err != nil {
+		return entraSettings{}, trace.Wrap(err, "failed to build script")
+	}
+
+	if _, err := f.Write([]byte(buildScript)); err != nil {
+		return entraSettings{}, trace.Wrap(err, "failed to write script to file")
+	}
+
+	if err := f.Close(); err != nil {
+		return entraSettings{}, trace.Wrap(err, "failed to close file")
+	}
+	fileLoc := f.Name()
+
+	fmt.Fprintf(os.Stdout, step1Template, fileLoc, filepath.Base(fileLoc))
+
+	op, err := readData(os.Stdin, os.Stdout,
+		`Once the script completes, type 'continue' to proceed, 'exit' to quit`,
+		func(input string) bool {
+			return input == "continue" || input == "exit"
+		}, "Invalid input. Please enter 'continue' or 'exit'.")
+	if err != nil {
+		return entraSettings{}, trace.Wrap(err, "failed to read operation")
+	}
+	if op == "exit" { // User chose to exit
+		return entraSettings{}, errCancel
+	}
+
+	validUUID := func(input string) bool {
+		_, err := uuid.Parse(input)
+		return err == nil
+	}
+
+	fmt.Fprint(os.Stdout, step2Template)
+
+	var settings entraSettings
+	settings.tenantID, err = readData(os.Stdin, os.Stdout, "Enter the Tenant ID", validUUID, "Invalid Tenant ID")
+	if err != nil {
+		return settings, trace.Wrap(err, "failed to read Tenant ID")
+	}
+
+	settings.clientID, err = readData(os.Stdin, os.Stdout, "Enter the Client ID", validUUID, "Invalid Client ID")
+	if err != nil {
+		return settings, trace.Wrap(err, "failed to read Client ID")
+	}
+
+	if p.install.entraID.accessGraph {
+		dataValidator := func(input string) bool {
+			settings.accessGraphCache, err = readTAGCache(input)
+			return err == nil
+		}
+		_, err = readData(os.Stdin, os.Stdout, "Enter the Access Graph Cache file location", dataValidator, "File does not exist or is invalid")
+		if err != nil {
+			return settings, trace.Wrap(err, "failed to read Access Graph Cache file")
+		}
+	}
+	return settings, nil
+}
+
+// InstallEntra is the entry point for the `tctl plugins install entraid` command.
+// This function guides users through an interactive setup process to configure EntraID integration,
+// directing them to execute a script in Azure Cloud Shell and provide the required configuration inputs.
+// The script creates an Azure EntraID Enterprise Application, enabling SAML logins in Teleport with
+// the following claims:
+// - givenname: user.givenname
+// - surname: user.surname
+// - emailaddress: user.mail
+// - name: user.userprincipalname
+// - groups: user.groups
+// Additionally, the script establishes a Trust Policy in the application to allow Teleport
+// to be recognized as a credential issuer when system credentials are not used.
+// If system credentials are present, the script will skip the Trust policy creation using
+// system credentials for EntraID authentication.
+// Finally, if no system credentials are in use, the script will set up an Azure OIDC integration
+// in Teleport and a Teleport plugin to synchronize access lists from EntraID to Teleport.
+func (p *PluginsCommand) InstallEntra(ctx context.Context, args installPluginArgs) error {
+	inputs := p.install
+
+	proxyPublicAddr, err := getProxyPublicAddr(ctx, args.authClient)
+	if err != nil {
+		return trace.Wrap(err)
+	}
+
+	settings, err := p.entraSetupGuide(proxyPublicAddr)
+	if err != nil {
+		if errors.Is(err, errCancel) {
+			return nil
+		}
+		return trace.Wrap(err)
+	}
+
+	var tagSyncSettings *types.PluginEntraIDAccessGraphSettings
+	if settings.accessGraphCache != nil {
+		tagSyncSettings = &types.PluginEntraIDAccessGraphSettings{
+			AppSsoSettingsCache: settings.accessGraphCache.AppSsoSettingsCache,
+		}
+	}
+
+	saml, err := types.NewSAMLConnector(inputs.entraID.authConnectorName, types.SAMLConnectorSpecV2{
+		AssertionConsumerService: strings.TrimRight(proxyPublicAddr, "/") + "/v1/webapi/saml/acs/" + inputs.entraID.authConnectorName,
+		AllowIDPInitiated:        true,
+		// AttributesToRoles is required, but Entra ID does not have a default group (like Okta's "Everyone"),
+		// so we add a dummy claim that will always be fulfilled and map them to the "requester" role.
+		AttributesToRoles: []types.AttributeMapping{
+			{
+				Name:  "http://schemas.microsoft.com/ws/2008/06/identity/claims/groups",
+				Value: "*",
+				Roles: []string{"requester"},
+			},
+		},
+		Display:             "Entra ID",
+		EntityDescriptorURL: entraapiutils.FederationMetadataURL(settings.tenantID, settings.clientID),
+	})
+	if err != nil {
+		return trace.Wrap(err, "failed to create SAML connector")
+	}
+
+	if _, err = args.authClient.CreateSAMLConnector(ctx, saml); err != nil {
+		if !trace.IsAlreadyExists(err) || !inputs.entraID.force {
+			return trace.Wrap(err, "failed to create SAML connector")
+		}
+		if _, err = args.authClient.UpsertSAMLConnector(ctx, saml); err != nil {
+			return trace.Wrap(err, "failed to upsert SAML connector")
+		}
+	}
+
+	if !inputs.entraID.useSystemCredentials {
+		integrationSpec, err := types.NewIntegrationAzureOIDC(
+			types.Metadata{Name: inputs.name},
+			&types.AzureOIDCIntegrationSpecV1{
+				TenantID: settings.tenantID,
+				ClientID: settings.clientID,
+			},
+		)
+		if err != nil {
+			return trace.Wrap(err, "failed to create Azure OIDC integration")
+		}
+
+		if _, err = args.authClient.CreateIntegration(ctx, integrationSpec); err != nil {
+			if !trace.IsAlreadyExists(err) || !inputs.entraID.force {
+				return trace.Wrap(err, "failed to create Azure OIDC integration")
+			}
+
+			integration, err := args.authClient.GetIntegration(ctx, integrationSpec.GetName())
+			if err != nil {
+				return trace.Wrap(err, "failed to get Azure OIDC integration")
+			}
+			integration.SetAWSOIDCIntegrationSpec(integrationSpec.GetAWSOIDCIntegrationSpec())
+			if _, err = args.authClient.UpdateIntegration(ctx, integration); err != nil {
+				return trace.Wrap(err, "failed to create Azure OIDC integration")
+			}
+		}
+	}
+
+	credentialsSource := types.EntraIDCredentialsSource_ENTRAID_CREDENTIALS_SOURCE_OIDC
+	if inputs.entraID.useSystemCredentials {
+		credentialsSource = types.EntraIDCredentialsSource_ENTRAID_CREDENTIALS_SOURCE_SYSTEM_CREDENTIALS
+	}
+	req := &pluginspb.CreatePluginRequest{
+		Plugin: &types.PluginV1{
+			Metadata: types.Metadata{
+				Name: inputs.name,
+				Labels: map[string]string{
+					"teleport.dev/hosted-plugin": "true",
+				},
+			},
+			Spec: types.PluginSpecV1{
+				Settings: &types.PluginSpecV1_EntraId{
+					EntraId: &types.PluginEntraIDSettings{
+						SyncSettings: &types.PluginEntraIDSyncSettings{
+							DefaultOwners:     inputs.entraID.defaultOwners,
+							SsoConnectorId:    inputs.entraID.authConnectorName,
+							CredentialsSource: credentialsSource,
+							TenantId:          settings.tenantID,
+						},
+						AccessGraphSettings: tagSyncSettings,
+					},
+				},
+			},
+		},
+	}
+
+	_, err = args.plugins.CreatePlugin(ctx, req)
+	if err != nil {
+		if !trace.IsAlreadyExists(err) || !inputs.entraID.force {
+			return trace.Wrap(err)
+		}
+		plugin := req.GetPlugin()
+		{
+			oldPlugin, err := args.plugins.GetPlugin(ctx, &pluginspb.GetPluginRequest{
+				Name: inputs.name,
+			})
+			if err != nil {
+				return trace.Wrap(err)
+			}
+			plugin.Metadata.Revision = oldPlugin.GetMetadata().Revision
+		}
+		if _, err = args.plugins.UpdatePlugin(ctx, &pluginspb.UpdatePluginRequest{
+			Plugin: plugin,
+		}); err != nil {
+			return trace.Wrap(err)
+		}
+	}
+
+	fmt.Printf("Successfully created EntraID plugin %q\n\n", p.install.name)
+
+	return nil
+}
+
+func buildScript(proxyPublicAddr string, entraCfg entraArgs) (string, error) {
+	// The script must execute the following command:
+	argsList := []string{
+		"integration", "configure", "azure-oidc",
+		fmt.Sprintf("--proxy-public-addr=%s", shsprintf.EscapeDefaultContext(proxyPublicAddr)),
+		fmt.Sprintf("--auth-connector-name=%s", shsprintf.EscapeDefaultContext(entraCfg.authConnectorName)),
+	}
+
+	if entraCfg.accessGraph {
+		argsList = append(argsList, "--access-graph")
+	}
+
+	if entraCfg.useSystemCredentials {
+		argsList = append(argsList, "--skip-oidc-integration")
+	}
+
+	script, err := oneoff.BuildScript(oneoff.OneOffScriptParams{
+		TeleportArgs:   strings.Join(argsList, " "),
+		SuccessMessage: "Success! You can now go back to the Teleport Web UI to use the integration with Azure.",
+	})
+	if err != nil {
+		return "", trace.Wrap(err)
+	}
+	return script, nil
+}
+
+func getProxyPublicAddr(ctx context.Context, authClient authClient) (string, error) {
+	pingResp, err := authClient.Ping(ctx)
+	if err != nil {
+		return "", trace.Wrap(err, "failed fetching cluster info")
+	}
+	proxyPublicAddr := pingResp.GetProxyPublicAddr()
+	oidcIssuer, err := oidc.IssuerFromPublicAddress(proxyPublicAddr, "")
+	return oidcIssuer, trace.Wrap(err)
+}
+
+// readTAGCache reads the TAG cache file and returns the TAGInfoCache object.
+// azureoidc.TAGInfoCache is a struct that contains the information necessary for Access Graph to analyze Azure SSO.
+// It contains a list of AppID and their corresponding FederatedSsoV2 information.
+func readTAGCache(fileLoc string) (*azureoidc.TAGInfoCache, error) {
+	if fileLoc == "" {
+		return nil, trace.BadParameter("no TAG cache file specified")
+	}
+
+	file, err := os.Open(fileLoc)
+	if err != nil {
+		return nil, trace.Wrap(err)
+	}
+	defer file.Close()
+
+	var result azureoidc.TAGInfoCache
+	if err := json.NewDecoder(file).Decode(&result); err != nil {
+		return nil, trace.Wrap(err)
+	}
+
+	return &result, nil
+}
+
+func readData(r io.Reader, w io.Writer, message string, validate func(string) bool, errorMessage string) (string, error) {
+	reader := bufio.NewReader(r)
+	for {
+		fmt.Fprintf(w, "%s: ", message)
+		input, _ := reader.ReadString('\n')
+		input = strings.TrimSpace(input) // Clean up any extra newlines or spaces
+
+		if !validate(input) {
+			fmt.Fprintf(w, "%s\n", errorMessage)
+			continue
+		}
+		return input, nil
+	}
+}
diff --git a/tool/tctl/common/plugin/plugins_command.go b/tool/tctl/common/plugin/plugins_command.go
index ba6c92f7ae5a9..df8b9eeb4ed3b 100644
--- a/tool/tctl/common/plugin/plugins_command.go
+++ b/tool/tctl/common/plugin/plugins_command.go
@@ -49,10 +49,11 @@ func logErrorMessage(err error) slog.Attr {
 }
 
 type pluginInstallArgs struct {
-	cmd  *kingpin.CmdClause
-	name string
-	okta oktaArgs
-	scim scimArgs
+	cmd     *kingpin.CmdClause
+	name    string
+	okta    oktaArgs
+	scim    scimArgs
+	entraID entraArgs
 }
 
 type scimArgs struct {
@@ -98,6 +99,7 @@ func (p *PluginsCommand) initInstall(parent *kingpin.CmdClause, config *servicec
 
 	p.initInstallOkta(p.install.cmd)
 	p.initInstallSCIM(p.install.cmd)
+	p.initInstallEntra(p.install.cmd)
 }
 
 func (p *PluginsCommand) initInstallSCIM(parent *kingpin.CmdClause) {
@@ -200,11 +202,18 @@ func (p *PluginsCommand) Cleanup(ctx context.Context, clusterAPI *authclient.Cli
 
 type authClient interface {
 	GetSAMLConnector(ctx context.Context, id string, withSecrets bool) (types.SAMLConnector, error)
+	CreateSAMLConnector(ctx context.Context, connector types.SAMLConnector) (types.SAMLConnector, error)
+	UpsertSAMLConnector(ctx context.Context, connector types.SAMLConnector) (types.SAMLConnector, error)
+	CreateIntegration(ctx context.Context, ig types.Integration) (types.Integration, error)
+	GetIntegration(ctx context.Context, name string) (types.Integration, error)
+	UpdateIntegration(ctx context.Context, ig types.Integration) (types.Integration, error)
 	Ping(ctx context.Context) (proto.PingResponse, error)
 }
 
 type pluginsClient interface {
 	CreatePlugin(ctx context.Context, in *pluginsv1.CreatePluginRequest, opts ...grpc.CallOption) (*emptypb.Empty, error)
+	GetPlugin(ctx context.Context, in *pluginsv1.GetPluginRequest, opts ...grpc.CallOption) (*types.PluginV1, error)
+	UpdatePlugin(ctx context.Context, in *pluginsv1.UpdatePluginRequest, opts ...grpc.CallOption) (*types.PluginV1, error)
 }
 
 type installPluginArgs struct {
@@ -310,6 +319,9 @@ func (p *PluginsCommand) TryRun(ctx context.Context, cmd string, client *authcli
 		err = p.InstallOkta(ctx, args)
 	case p.install.scim.cmd.FullCommand():
 		err = p.InstallSCIM(ctx, client)
+	case p.install.entraID.cmd.FullCommand():
+		args := installPluginArgs{authClient: client, plugins: client.PluginsClient()}
+		err = p.InstallEntra(ctx, args)
 	case p.delete.cmd.FullCommand():
 		err = p.Delete(ctx, client)
 	default:
diff --git a/tool/tctl/common/plugin/plugins_command_test.go b/tool/tctl/common/plugin/plugins_command_test.go
index e42f21e26310f..9033311f3272c 100644
--- a/tool/tctl/common/plugin/plugins_command_test.go
+++ b/tool/tctl/common/plugin/plugins_command_test.go
@@ -449,6 +449,16 @@ func (m *mockPluginsClient) CreatePlugin(ctx context.Context, in *pluginsv1.Crea
 	return result.Get(0).(*emptypb.Empty), result.Error(1)
 }
 
+func (m *mockPluginsClient) GetPlugin(ctx context.Context, in *pluginsv1.GetPluginRequest, opts ...grpc.CallOption) (*types.PluginV1, error) {
+	result := m.Called(ctx, in, opts)
+	return result.Get(0).(*types.PluginV1), result.Error(1)
+}
+
+func (m *mockPluginsClient) UpdatePlugin(ctx context.Context, in *pluginsv1.UpdatePluginRequest, opts ...grpc.CallOption) (*types.PluginV1, error) {
+	result := m.Called(ctx, in, opts)
+	return result.Get(0).(*types.PluginV1), result.Error(1)
+}
+
 type mockAuthClient struct {
 	mock.Mock
 }
@@ -457,6 +467,27 @@ func (m *mockAuthClient) GetSAMLConnector(ctx context.Context, id string, withSe
 	result := m.Called(ctx, id, withSecrets)
 	return result.Get(0).(types.SAMLConnector), result.Error(1)
 }
+func (m *mockAuthClient) CreateSAMLConnector(ctx context.Context, connector types.SAMLConnector) (types.SAMLConnector, error) {
+	result := m.Called(ctx, connector)
+	return result.Get(0).(types.SAMLConnector), result.Error(1)
+}
+func (m *mockAuthClient) UpsertSAMLConnector(ctx context.Context, connector types.SAMLConnector) (types.SAMLConnector, error) {
+	result := m.Called(ctx, connector)
+	return result.Get(0).(types.SAMLConnector), result.Error(1)
+}
+func (m *mockAuthClient) CreateIntegration(ctx context.Context, ig types.Integration) (types.Integration, error) {
+	result := m.Called(ctx, ig)
+	return result.Get(0).(types.Integration), result.Error(1)
+}
+func (m *mockAuthClient) UpdateIntegration(ctx context.Context, ig types.Integration) (types.Integration, error) {
+	result := m.Called(ctx, ig)
+	return result.Get(0).(types.Integration), result.Error(1)
+}
+
+func (m *mockAuthClient) GetIntegration(ctx context.Context, name string) (types.Integration, error) {
+	result := m.Called(ctx, name)
+	return result.Get(0).(types.Integration), result.Error(1)
+}
 
 func (m *mockAuthClient) Ping(ctx context.Context) (proto.PingResponse, error) {
 	result := m.Called(ctx)
diff --git a/tool/teleport/common/integration_configure.go b/tool/teleport/common/integration_configure.go
index bfd762d1322ec..97f531910e45e 100644
--- a/tool/teleport/common/integration_configure.go
+++ b/tool/teleport/common/integration_configure.go
@@ -251,7 +251,7 @@ func onIntegrationConfAzureOIDCCmd(ctx context.Context, params config.Integratio
 
 	fmt.Println("Teleport is setting up the Azure integration. This may take a few minutes.")
 
-	appID, tenantID, err := azureoidc.SetupEnterpriseApp(ctx, params.ProxyPublicAddr, params.AuthConnectorName)
+	appID, tenantID, err := azureoidc.SetupEnterpriseApp(ctx, params.ProxyPublicAddr, params.AuthConnectorName, params.SkipOIDCConfiguration)
 	if err != nil {
 		return trace.Wrap(err)
 	}
diff --git a/tool/teleport/common/teleport.go b/tool/teleport/common/teleport.go
index 3ccaa6ad1928a..9cd4436c68680 100644
--- a/tool/teleport/common/teleport.go
+++ b/tool/teleport/common/teleport.go
@@ -552,6 +552,7 @@ func Run(options Options) (app *kingpin.Application, executedCommand string, con
 	integrationConfAzureOIDCCmd.Flag("proxy-public-addr", "The public address of Teleport Proxy Service").Required().StringVar(&ccf.IntegrationConfAzureOIDCArguments.ProxyPublicAddr)
 	integrationConfAzureOIDCCmd.Flag("auth-connector-name", "The name of Entra ID SAML Auth connector in Teleport.").Required().StringVar(&ccf.IntegrationConfAzureOIDCArguments.AuthConnectorName)
 	integrationConfAzureOIDCCmd.Flag("access-graph", "Enable Access Graph integration.").BoolVar(&ccf.IntegrationConfAzureOIDCArguments.AccessGraphEnabled)
+	integrationConfAzureOIDCCmd.Flag("skip-oidc-integration", "Skip OIDC integration.").BoolVar(&ccf.IntegrationConfAzureOIDCArguments.SkipOIDCConfiguration)
 
 	integrationConfSAMLIdP := integrationConfigureCmd.Command("samlidp", "Manage SAML IdP integrations.")
 	integrationSAMLIdPGCPWorkforce := integrationConfSAMLIdP.Command("gcp-workforce", "Configures GCP Workforce Identity Federation pool and SAML provider.")

From a91a831dff42ce0a037b98c0f2c236f17ce4776d Mon Sep 17 00:00:00 2001
From: Matt Smith <matt.smith@goteleport.com>
Date: Tue, 29 Oct 2024 14:04:30 -0400
Subject: [PATCH 10/13] [16.4.3] bump cloud docs (#48087)

---
 docs/config.json | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/docs/config.json b/docs/config.json
index f955d894dbc16..0a63fa2737f71 100644
--- a/docs/config.json
+++ b/docs/config.json
@@ -132,7 +132,7 @@
       "aws_secret_access_key": "zyxw9876-this-is-an-example"
     },
     "cloud": {
-      "version": "16.4.2",
+      "version": "16.4.3",
       "major_version": "16",
       "sla": {
         "monthly_percentage": "99.9%",

From 045973e04fda0a24742353f96b4f964af2456de2 Mon Sep 17 00:00:00 2001
From: Paul Gottschling <paul.gottschling@goteleport.com>
Date: Tue, 29 Oct 2024 14:40:41 -0400
Subject: [PATCH 11/13] Require an introductory paragraph in docs pages
 (#43222)

Add a vale rule that requires there to be a paragraph between the second
frontmatter document separator (`---`) and the first H2-level heading of
a docs page. Introductory paragraphs are common omissions in docs pages,
but are important to help readers determine whether a guide is
appropriate for their use case.
---
 .../vale-styles/structure/intro-paragraph.yml | 34 +++++++++++++++++++
 1 file changed, 34 insertions(+)
 create mode 100644 docs/vale-styles/structure/intro-paragraph.yml

diff --git a/docs/vale-styles/structure/intro-paragraph.yml b/docs/vale-styles/structure/intro-paragraph.yml
new file mode 100644
index 0000000000000..3bee6806e4d66
--- /dev/null
+++ b/docs/vale-styles/structure/intro-paragraph.yml
@@ -0,0 +1,34 @@
+# This style enforces the presence of an introductory paragraph before the first
+# H2 of a docs page.
+extends: script
+level: error
+message: There must be a brief intro paragraph before the first H2-level section of a docs page. Use this to describe the purpose of the guide so a reader can determine whether they should continue reading. If the guide introduces a feature, describe the purpose and benefits of the feature. If there is already an "Introduction" H2 or similar, remove the heading.
+scope: raw
+script: |
+  text := import("text")
+  getMatches := func() {
+    docSeparators := text.re_find(`\n?---\n`, scope, 2)
+    // This is probably not a valid MDX file, but let other linters handler the
+    // error.
+    if docSeparators == undefined || len(docSeparators) != 2 {
+      return []
+    }
+
+    // Get the first H2 section
+    firstH2 := text.re_find(`\n## \w`, scope, 1)
+    if firstH2 == undefined {
+      return []
+    }
+
+    initialText := text.substr(scope, docSeparators[1][0].end,firstH2[0][0].begin)
+    // Check for at least one non-empty line before the first H2.
+    if !text.re_match(`\n[^\n]+\n`, initialText) {
+      return [{ 
+          begin: docSeparators[1][0].end,
+          end: firstH2[0][0].begin
+      }]
+    }
+
+  }
+
+  matches := getMatches()

From d75107272b860b995682f90e644fb7006b992a8d Mon Sep 17 00:00:00 2001
From: Noah Stride <noah.stride@goteleport.com>
Date: Tue, 29 Oct 2024 18:52:31 +0000
Subject: [PATCH 12/13] Fix "most, if not all," grammar (#48057)

---
 .../teleport/src/Discover/SelectResource/SelectResource.tsx     | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/web/packages/teleport/src/Discover/SelectResource/SelectResource.tsx b/web/packages/teleport/src/Discover/SelectResource/SelectResource.tsx
index 4af93b381df8b..cd6dbfda043cd 100644
--- a/web/packages/teleport/src/Discover/SelectResource/SelectResource.tsx
+++ b/web/packages/teleport/src/Discover/SelectResource/SelectResource.tsx
@@ -156,7 +156,7 @@ export function SelectResource({ onSelect }: SelectResourceProps) {
         <FeatureHeaderTitle>Select Resource To Add</FeatureHeaderTitle>
       </FeatureHeader>
       <HeaderSubtitle>
-        Teleport can integrate into most, if not all of your infrastructure.
+        Teleport can integrate into most, if not all, of your infrastructure.
         Search for what resource you want to add.
       </HeaderSubtitle>
       <Box height="90px" width="600px">

From 3220ffad778f84ef4392dabf77cce40e762fd376 Mon Sep 17 00:00:00 2001
From: Stephen Levine <stephen.levine@goteleport.com>
Date: Tue, 29 Oct 2024 15:03:31 -0400
Subject: [PATCH 13/13] [teleport-update] Use new webapi fields to find version
 (#47961)

* Adapt teleport-update to new webapi endpoints

* feedback
---
 api/client/webclient/webclient.go             | 31 +++++++++++++++---
 .../FIPS_and_Enterprise_flags.golden          | 10 ++++++
 lib/autoupdate/agent/updater.go               | 24 ++++++++------
 lib/autoupdate/agent/updater_test.go          | 32 +++++++++++++++++--
 4 files changed, 80 insertions(+), 17 deletions(-)
 create mode 100644 lib/autoupdate/agent/testdata/TestUpdater_Enable/FIPS_and_Enterprise_flags.golden

diff --git a/api/client/webclient/webclient.go b/api/client/webclient/webclient.go
index f3b6ba5586768..b5c684ebfb628 100644
--- a/api/client/webclient/webclient.go
+++ b/api/client/webclient/webclient.go
@@ -68,6 +68,9 @@ type Config struct {
 	Timeout time.Duration
 	// TraceProvider is used to retrieve a Tracer for creating spans
 	TraceProvider oteltrace.TracerProvider
+	// UpdateGroup is used to vary the webapi response based on the
+	// client's auto-update group.
+	UpdateGroup string
 }
 
 // CheckAndSetDefaults checks and sets defaults
@@ -169,9 +172,18 @@ func Find(cfg *Config) (*PingResponse, error) {
 	ctx, span := cfg.TraceProvider.Tracer("webclient").Start(cfg.Context, "webclient/Find")
 	defer span.End()
 
-	endpoint := fmt.Sprintf("https://%s/webapi/find", cfg.ProxyAddr)
+	endpoint := &url.URL{
+		Scheme: "https",
+		Host:   cfg.ProxyAddr,
+		Path:   "/webapi/find",
+	}
+	if cfg.UpdateGroup != "" {
+		endpoint.RawQuery = url.Values{
+			"group": []string{cfg.UpdateGroup},
+		}.Encode()
+	}
 
-	req, err := http.NewRequestWithContext(ctx, http.MethodGet, endpoint, nil)
+	req, err := http.NewRequestWithContext(ctx, http.MethodGet, endpoint.String(), nil)
 	if err != nil {
 		return nil, trace.Wrap(err)
 	}
@@ -205,12 +217,21 @@ func Ping(cfg *Config) (*PingResponse, error) {
 	ctx, span := cfg.TraceProvider.Tracer("webclient").Start(cfg.Context, "webclient/Ping")
 	defer span.End()
 
-	endpoint := fmt.Sprintf("https://%s/webapi/ping", cfg.ProxyAddr)
+	endpoint := &url.URL{
+		Scheme: "https",
+		Host:   cfg.ProxyAddr,
+		Path:   "/webapi/ping",
+	}
+	if cfg.UpdateGroup != "" {
+		endpoint.RawQuery = url.Values{
+			"group": []string{cfg.UpdateGroup},
+		}.Encode()
+	}
 	if cfg.ConnectorName != "" {
-		endpoint = fmt.Sprintf("%s/%s", endpoint, cfg.ConnectorName)
+		endpoint = endpoint.JoinPath(cfg.ConnectorName)
 	}
 
-	req, err := http.NewRequestWithContext(ctx, http.MethodGet, endpoint, nil)
+	req, err := http.NewRequestWithContext(ctx, http.MethodGet, endpoint.String(), nil)
 	if err != nil {
 		return nil, trace.Wrap(err)
 	}
diff --git a/lib/autoupdate/agent/testdata/TestUpdater_Enable/FIPS_and_Enterprise_flags.golden b/lib/autoupdate/agent/testdata/TestUpdater_Enable/FIPS_and_Enterprise_flags.golden
new file mode 100644
index 0000000000000..d9e09a2c95d71
--- /dev/null
+++ b/lib/autoupdate/agent/testdata/TestUpdater_Enable/FIPS_and_Enterprise_flags.golden
@@ -0,0 +1,10 @@
+version: v1
+kind: update_config
+spec:
+    proxy: localhost
+    group: ""
+    url_template: ""
+    enabled: true
+status:
+    active_version: 16.3.0
+    backup_version: ""
diff --git a/lib/autoupdate/agent/updater.go b/lib/autoupdate/agent/updater.go
index ade0704607cb9..7071f16e42d15 100644
--- a/lib/autoupdate/agent/updater.go
+++ b/lib/autoupdate/agent/updater.go
@@ -240,20 +240,26 @@ func (u *Updater) Enable(ctx context.Context, override OverrideConfig) error {
 	}
 
 	desiredVersion := override.ForceVersion
+	var flags InstallFlags
 	if desiredVersion == "" {
 		resp, err := webclient.Find(&webclient.Config{
-			Context:   ctx,
-			ProxyAddr: addr.Addr,
-			Insecure:  u.InsecureSkipVerify,
-			Timeout:   30 * time.Second,
-			//Group:     cfg.Spec.Group, // TODO(sclevine): add web API for verssion
-			Pool: u.Pool,
+			Context:     ctx,
+			ProxyAddr:   addr.Addr,
+			Insecure:    u.InsecureSkipVerify,
+			Timeout:     30 * time.Second,
+			UpdateGroup: cfg.Spec.Group,
+			Pool:        u.Pool,
 		})
 		if err != nil {
 			return trace.Errorf("failed to request version from proxy: %w", err)
 		}
-		desiredVersion, _ = "16.3.0", resp // TODO(sclevine): add web API for version
-		//desiredVersion := resp.AutoUpdate.AgentVersion
+		desiredVersion = resp.AutoUpdate.AgentVersion
+		if resp.Edition == "ent" {
+			flags |= FlagEnterprise
+		}
+		if resp.FIPS {
+			flags |= FlagFIPS
+		}
 	}
 
 	if desiredVersion == "" {
@@ -277,7 +283,7 @@ func (u *Updater) Enable(ctx context.Context, override OverrideConfig) error {
 	if template == "" {
 		template = cdnURITemplate
 	}
-	err = u.Installer.Install(ctx, desiredVersion, template, 0) // TODO(sclevine): add web API for flags
+	err = u.Installer.Install(ctx, desiredVersion, template, flags)
 	if err != nil {
 		return trace.Errorf("failed to install: %w", err)
 	}
diff --git a/lib/autoupdate/agent/updater_test.go b/lib/autoupdate/agent/updater_test.go
index d6d0128316c20..e817851fed1f7 100644
--- a/lib/autoupdate/agent/updater_test.go
+++ b/lib/autoupdate/agent/updater_test.go
@@ -20,6 +20,7 @@ package agent
 
 import (
 	"context"
+	"encoding/json"
 	"errors"
 	"net/http"
 	"net/http/httptest"
@@ -33,6 +34,7 @@ import (
 	"github.com/stretchr/testify/require"
 	"gopkg.in/yaml.v3"
 
+	"github.com/gravitational/teleport/api/client/webclient"
 	"github.com/gravitational/teleport/lib/utils/golden"
 )
 
@@ -129,10 +131,12 @@ func TestUpdater_Enable(t *testing.T) {
 		cfg        *UpdateConfig // nil -> file not present
 		userCfg    OverrideConfig
 		installErr error
+		flags      InstallFlags
 
 		removedVersion    string
 		installedVersion  string
 		installedTemplate string
+		requestGroup      string
 		errMatch          string
 	}{
 		{
@@ -150,6 +154,7 @@ func TestUpdater_Enable(t *testing.T) {
 			},
 			installedVersion:  "16.3.0",
 			installedTemplate: "https://example.com",
+			requestGroup:      "group",
 		},
 		{
 			name: "config from user",
@@ -255,6 +260,12 @@ func TestUpdater_Enable(t *testing.T) {
 			installedVersion:  "16.3.0",
 			installedTemplate: cdnURITemplate,
 		},
+		{
+			name:              "FIPS and Enterprise flags",
+			flags:             FlagEnterprise | FlagFIPS,
+			installedVersion:  "16.3.0",
+			installedTemplate: cdnURITemplate,
+		},
 		{
 			name:     "invalid metadata",
 			cfg:      &UpdateConfig{},
@@ -276,9 +287,20 @@ func TestUpdater_Enable(t *testing.T) {
 				require.NoError(t, err)
 			}
 
+			var requestedGroup string
 			server := httptest.NewTLSServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
-				// TODO(sclevine): add web API test including group verification
-				w.Write([]byte(`{}`))
+				requestedGroup = r.URL.Query().Get("group")
+				config := webclient.PingResponse{
+					AutoUpdate: webclient.AutoUpdateSettings{
+						AgentVersion: "16.3.0",
+					},
+				}
+				if tt.flags&FlagEnterprise != 0 {
+					config.Edition = "ent"
+				}
+				config.FIPS = tt.flags&FlagFIPS != 0
+				err := json.NewEncoder(w).Encode(config)
+				require.NoError(t, err)
 			}))
 			t.Cleanup(server.Close)
 
@@ -297,11 +319,13 @@ func TestUpdater_Enable(t *testing.T) {
 				installedTemplate string
 				linkedVersion     string
 				removedVersion    string
+				installedFlags    InstallFlags
 			)
 			updater.Installer = &testInstaller{
-				FuncInstall: func(_ context.Context, version, template string, _ InstallFlags) error {
+				FuncInstall: func(_ context.Context, version, template string, flags InstallFlags) error {
 					installedVersion = version
 					installedTemplate = template
+					installedFlags = flags
 					return tt.installErr
 				},
 				FuncLink: func(_ context.Context, version string) error {
@@ -329,6 +353,8 @@ func TestUpdater_Enable(t *testing.T) {
 			require.Equal(t, tt.installedTemplate, installedTemplate)
 			require.Equal(t, tt.installedVersion, linkedVersion)
 			require.Equal(t, tt.removedVersion, removedVersion)
+			require.Equal(t, tt.flags, installedFlags)
+			require.Equal(t, tt.requestGroup, requestedGroup)
 
 			data, err := os.ReadFile(cfgPath)
 			require.NoError(t, err)