diff --git a/.github/workflows/kube-integration-tests-non-root.yaml b/.github/workflows/kube-integration-tests-non-root.yaml index c0e70e2b90a3c..1250e216c55a7 100644 --- a/.github/workflows/kube-integration-tests-non-root.yaml +++ b/.github/workflows/kube-integration-tests-non-root.yaml @@ -9,6 +9,7 @@ on: env: TEST_KUBE: true KUBECONFIG: /home/.kube/config + ALPINE_VERSION: 3.20.3 jobs: changes: @@ -89,6 +90,38 @@ jobs: cp -r $HOME/.kube /home/ chown -R ci:ci /home/.kube + - name: Build Alpine image with webserver + run: | + + export SHORT_VERSION=${ALPINE_VERSION%.*} + + # download the alpine image + # store the files in the fixtures/alpine directory + # to avoid passing all the repository files to the docker build context. + cd ./fixtures/alpine + + # download alpine minirootfs and signature + curl -fSsLO https://dl-cdn.alpinelinux.org/alpine/v$SHORT_VERSION/releases/x86_64/alpine-minirootfs-$ALPINE_VERSION-x86_64.tar.gz + curl -fSsLO https://dl-cdn.alpinelinux.org/alpine/v$SHORT_VERSION/releases/x86_64/alpine-minirootfs-$ALPINE_VERSION-x86_64.tar.gz.asc + curl -fSsLO https://dl-cdn.alpinelinux.org/alpine/v$SHORT_VERSION/releases/x86_64/alpine-minirootfs-$ALPINE_VERSION-x86_64.tar.gz.sha256 + + # verify the checksum + sha256sum -c alpine-minirootfs-$ALPINE_VERSION-x86_64.tar.gz.sha256 + + # verify the signature + gpg --import ./alpine-ncopa.at.alpinelinux.org.asc + gpg --verify ./alpine-minirootfs-$ALPINE_VERSION-x86_64.tar.gz.asc ./alpine-minirootfs-$ALPINE_VERSION-x86_64.tar.gz + + # build the webserver + CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -o ./webserver ./webserver.go + + docker build -t alpine-webserver:v1 --build-arg=ALPINE_VERSION=$ALPINE_VERSION -f ./Dockerfile . + + # load the image into the kind cluster + kind load docker-image alpine-webserver:v1 + + cd - + - name: Run tests timeout-minutes: 40 run: | diff --git a/.gitignore b/.gitignore index c4bab5e3b225a..07370f91ac9a2 100644 --- a/.gitignore +++ b/.gitignore @@ -12,6 +12,8 @@ default.etcd # usually release tarballs get in the way *.gz +# ignore all tarballs except the alpine one +!fixtures/alpine/alpine-minirootfs-*.tar.gz *.zip # editors diff --git a/Makefile b/Makefile index 78768a2c0b0c7..83813f53c6fac 100644 --- a/Makefile +++ b/Makefile @@ -1474,7 +1474,7 @@ derive: .PHONY: derive-up-to-date derive-up-to-date: must-start-clean/host derive @if ! git diff --quiet; then \ - echo 'Please run make derive.'; \ + ./build.assets/please-run.sh "derived functions" "make derive"; \ exit 1; \ fi @@ -1508,15 +1508,15 @@ endif # Unlike protos-up-to-date, this target runs locally. .PHONY: protos-up-to-date/host protos-up-to-date/host: must-start-clean/host grpc/host - @if ! git diff --quiet; then \ - echo 'Please run make grpc.'; \ + ./build.assets/please-run.sh "protos gRPC" "make grpc"; \ exit 1; \ fi .PHONY: must-start-clean/host must-start-clean/host: @if ! git diff --quiet; then \ - echo 'This must be run from a repo with no unstaged commits.'; \ + @echo 'This must be run from a repo with no unstaged commits.'; \ + git diff; \ exit 1; \ fi @@ -1525,7 +1525,12 @@ must-start-clean/host: crds-up-to-date: must-start-clean/host $(MAKE) -C integrations/operator manifests @if ! git diff --quiet; then \ - echo 'Please run make -C integrations/operator manifests.'; \ + ./build.assets/please-run.sh "operator CRD manifests" "make -C integrations/operator crd"; \ + exit 1; \ + fi + $(MAKE) -C integrations/operator crd-docs + @if ! git diff --quiet; then \ + ./build.assets/please-run.sh "operator CRD docs" "make -C integrations/operator crd"; \ exit 1; \ fi $(MAKE) -C integrations/operator crd-docs @@ -1540,8 +1545,7 @@ crds-up-to-date: must-start-clean/host terraform-resources-up-to-date: must-start-clean/host $(MAKE) -C integrations/terraform docs @if ! git diff --quiet; then \ - echo 'Please run make -C integrations/terraform docs.'; \ - git diff; \ + ./build.assets/please-run.sh "TF provider docs" "make -C integrations/terraform docs"; \ exit 1; \ fi diff --git a/build.assets/please-run.sh b/build.assets/please-run.sh new file mode 100755 index 0000000000000..236684efbb2b1 --- /dev/null +++ b/build.assets/please-run.sh @@ -0,0 +1,40 @@ +#!/bin/sh + +# This script is a helper that tells developers what generated content is out of date +# and which command to run. +# When running on GitHub actions, the script will also create an error in the PR and +# collapse the diff to improve readability. + +set -eu + +# only echoes the string if we are in GitHub Actions +echo_gha() { + [ -n "${GITHUB_ACTIONS+x}" ] && echo "$@" +} + +main() { + if [ $# -ne 2 ]; then + echo "Usage: $0 " >&2 + exit 1 + fi + + KIND="$1" + GENERATE_COMMAND="$2" + + TITLE="$KIND are out-of-date" + MESSAGE="Please run the command \`$GENERATE_COMMAND\`" + + # Create a GitHub error + echo_gha "::error file=Makefile,title=$TITLE::$MESSAGE" + + echo "=============" + echo "$TITLE" + echo "$MESSAGE" + echo "=============" + + echo_gha "::group::Diff output" + git diff || true + echo_gha "::endgroup::" +} + +main "$@" \ No newline at end of file diff --git a/docs/config.json b/docs/config.json index a937123578112..fdb039d4ab1a5 100644 --- a/docs/config.json +++ b/docs/config.json @@ -420,7 +420,7 @@ }, { "source": "/application-access/jwt/", - "destination": "/enroll-resources/application-access/jwt/", + "destination": "/enroll-resources/application-access/jwt/jwt/", "permanent": true }, { diff --git a/docs/pages/admin-guides/management/guides/guides.mdx b/docs/pages/admin-guides/management/guides/guides.mdx index bc817ac0bae83..db09c71368850 100644 --- a/docs/pages/admin-guides/management/guides/guides.mdx +++ b/docs/pages/admin-guides/management/guides/guides.mdx @@ -8,6 +8,8 @@ You can integrate Teleport with third-party tools in order to complete various tasks in your cluster. These guides describe Teleport integrations that are not documented elsewhere: + - [AWS OIDC Integration with Teleport](awsoidc-integration.mdx). How + to set up the AWS OIDC integration to allow Teleport to interact with AWS. - [EC2 tags as Teleport agent labels](ec2-tags.mdx). How to set up Teleport agent labels based on EC2 tags. - [GCP tags and labels as Teleport agent labels](gcp-tags.mdx). How diff --git a/e b/e index cc7fefb390f14..533cf69ca067d 160000 --- a/e +++ b/e @@ -1 +1 @@ -Subproject commit cc7fefb390f14edad07539c81264b327070e2558 +Subproject commit 533cf69ca067dc10c47891721eac9bbb1c61b87d diff --git a/fixtures/alpine/Dockerfile b/fixtures/alpine/Dockerfile new file mode 100644 index 0000000000000..4b66c356c5466 --- /dev/null +++ b/fixtures/alpine/Dockerfile @@ -0,0 +1,9 @@ +FROM scratch + +ARG ALPINE_VERSION + +ADD alpine-minirootfs-$ALPINE_VERSION-x86_64.tar.gz / + +COPY webserver /webserver + +CMD [ "/webserver" ] diff --git a/fixtures/alpine/README.md b/fixtures/alpine/README.md new file mode 100644 index 0000000000000..a6506854275fd --- /dev/null +++ b/fixtures/alpine/README.md @@ -0,0 +1,50 @@ +# `alpine-webserver:v1` Build Process + +## Source + +The `alpine-webserver:v1` image is based on `alpine` `minirootfs`, but instead of relying on Docker Hub's official Alpine image, we source the original files directly from Alpine's CDN. This approach mitigates issues with Docker Hub and GitHub Action network failures, which have been a common cause of integration test failures. + +The build process is specified in the `.github/workflows/kube-integration-tests-non-root.yaml` file. + +## Download + +To download the new `alpine-minirootfs` image, follow the instructions: + +```bash +$ export ALPINE_VERSION=3.20.3 +$ export SHORT_VERSION=${ALPINE_VERSION%.*} + +# download alpine minirootfs and signature +$ curl -fSsLO https://dl-cdn.alpinelinux.org/alpine/v$SHORT_VERSION/releases/x86_64/alpine-minirootfs-$ALPINE_VERSION-x86_64.tar.gz +$ curl -fSsLO https://dl-cdn.alpinelinux.org/alpine/v$SHORT_VERSION/releases/x86_64/alpine-minirootfs-$ALPINE_VERSION-x86_64.tar.gz.asc +$ curl -fSsLO https://dl-cdn.alpinelinux.org/alpine/v$SHORT_VERSION/releases/x86_64/alpine-minirootfs-$ALPINE_VERSION-x86_64.tar.gz.sha256 + +``` + +## Source Validation + +The build process in `.github/workflows/kube-integration-tests-non-root.yaml` validates both the SHA-256 checksum and the GPG signature. The signature verification uses `alpine-ncopa.at.alpinelinux.org.asc`, which is the official public key used by Alpine Linux to sign its assets. This public key is available on the [Alpine Linux Downloads page](https://www.alpinelinux.org/downloads/). + +## Image Build Process + +The image is constructed from a scratch filesystem and incorporates only the necessary components to run the web server. Here’s the basic Dockerfile configuration: + +```Dockerfile +FROM scratch + +ARG ALPINE_VERSION + +ADD alpine-minirootfs-$ALPINE_VERSION-x86_64.tar.gz / + +COPY webserver /webserver + +CMD [ "/webserver" ] +``` + +This minimalist configuration ensures the image remains lightweight, secure, and tailored to only the required functionalities. + +## Image distribution + +After sucessfull build, the image is loaded into our `kind` cluster with the tag `alpine-webserver:v1`. + +Note: `:latest` can't be used otherwise Kubernetes will try loading the image from dockerhub and fail. \ No newline at end of file diff --git a/fixtures/alpine/alpine-ncopa.at.alpinelinux.org.asc b/fixtures/alpine/alpine-ncopa.at.alpinelinux.org.asc new file mode 100644 index 0000000000000..b4b34a5b3da96 --- /dev/null +++ b/fixtures/alpine/alpine-ncopa.at.alpinelinux.org.asc @@ -0,0 +1,52 @@ +-----BEGIN PGP PUBLIC KEY BLOCK----- +Version: GnuPG v2 + +mQINBFSIEDwBEADbib88gv1dBgeEez1TIh6A5lAzRl02JrdtYkDoPr5lQGYv0qKP +lWpd3jgGe8n90krGmT9W2nooRdyZjZ6UPbhYSJ+tub6VuKcrtwROXP2gNNqJA5j3 +vkXQ40725CVig7I3YCpzjsKRStwegZAelB8ZyC4zb15J7YvTVkd6qa/uuh8H21X2 +h/7IZJz50CMxyz8vkdyP2niIGZ4fPi0cVtsg8l4phbNJ5PwFOLMYl0b5geKMviyR +MxxQ33iNa9X+RcWeR751IQfax6xNcbOrxNRzfzm77fY4KzBezcnqJFnrl/p8qgBq +GHKmrrcjv2MF7dCWHGAPm1/vdPPjUpOcEOH4uGvX7P4w2qQ0WLBTDDO47/BiuY9A +DIwEF1afNXiJke4fmjDYMKA+HrnhocvI48VIX5C5+C5aJOKwN2EOpdXSvmsysTSt +gIc4ffcaYugfAIEn7ZdgcYmTlbIphHmOmOgt89J+6Kf9X6mVRmumI3cZWetf2FEV +fS9v24C2c8NRw3LESoDT0iiWsCHcsixCYqqvjzJBJ0TSEIVCZepOOBp8lfMl4YEZ +BVMzOx558LzbF2eR/XEsr3AX7Ga1jDu2N5WzIOa0YvJl1xcQxc0RZumaMlZ81dV/ +uu8G2+HTrJMZK933ov3pbxaZ38/CbCA90SBk5xqVqtTNAHpIkdGj90v2lwARAQAB +tCVOYXRhbmFlbCBDb3BhIDxuY29wYUBhbHBpbmVsaW51eC5vcmc+iQI2BBMBCAAg +BQJUiBA8AhsDBQsJCAcCBhUICQoLAgMWAgECHgECF4AACgkQKTrNCQfZSVrcNxAA +mEzX9PQaczzlPAlDe3m1AN0lP6E/1pYWLBGs6qGh18cWxdjyOWsO47nA1P+cTGSS +AYe4kIOIx9kp2SxObdKeZTuZCBdWfQu/cuRE12ugQQFERlpwVRNd6NYuT3WyZ7v8 +ZXRw4f33FIt4CSrW1/AyM/vrA+tWNo7bbwr/CFaIcL8kINPccdFOpWh14erONd/P +Eb3gO81yXIA6c1Vl4mce2JS0hd6EFohxS5yMQJMRIS/Zg8ufT3yHJXIaSnG+KRP7 +WWLR0ZaLraCykYi/EW9mmQ49LxQqvKOgjpRW9aNgDA+arKl1umjplkAFI1GZ0/qA +sgKm4agdvLGZiCZqDXcRWNolG5PeOUUpim1f59pGnupZ3Rbz4BF84U+1uL+yd0OR +5Y98AxWFyq0dqKz/zFYwQkMVnl9yW0pkJmP7r6PKj0bhWksQX+RjYPosj3wxPZ7i +SKMX7xZaqon/CHpH9/Xm8CabGcDITrS6h+h8x0FFT/MV/LKgc3q8E4mlXelew1Rt +xK4hzXFpXKl0WcQg54fj1Wqy47FlkArG50di0utCBGlmVZQA8nqE5oYkFLppiFXz +1SXCXojff/XZdNF2WdgV8aDKOYTK1WDPUSLmqY+ofOkQL49YqZ9M5FR8hMAbvL6e +4CbxVXCkWJ6Q9Lg79AzS3pvOXCJ/CUDQs7B30v026Ba5Ag0EVIgQPAEQAMHuPAv/ +B0KP9SEA1PsX5+37k46lTP7lv7VFd7VaD1rAUM/ZyD2fWgrJprcCPEpdMfuszfOH +jGVQ708VQ+vlD3vFoOZE+KgeKnzDG9FzYXXPmxkWzEEqI168ameF/LQhN12VF1mq +5LbukiAKx2ytb1I8onvCvNJDvH1D/3BxSj7ThV9bP/bFufcOHFBMFwtyBmUaR5Wx +96Bq+7DEbTrxhshoQgUqILEudUyhZa05/TrpUvC4f8qc0deaqJFO1zD6guZxRWZd +SWJdcFzTadyg36P4eyFMxa1Ft7BlDKdKLAFlCGgR0jfOnKRmdRKGRNFTLQ68aBld +N4wxBuMwe0tmRw9zYwWwD43Aq9E26YtuxVR1wb3zUmi+47QH4ANAzMioimE9Mj5S +qYrgzQJ0IGwIjBt+HNzHvYX+kyMuVFK41k2Vo6oUOVHuQMu3UgLvSPMsyw69d+Iw +K/rrsQwuutrvJ8Qcda3rea1HvWBVcY/uyoRsOsCS7itS6MK6KKTKaW8iskmEb2/h +Q1ZB1QaWm2sQ8Xcmb3QZgtyBfZKuC95T/mAXPT0uET6bTpP5DdEi3wFs+qw/c9FZ +SNDZ4hfNuS24d2u3Rh8LWt/U83ieAutNntOLGhvuZm1jLYt2KvzXE8cLt3V75/ZF +O+xEV7rLuOtrHKWlzgJQzsDp1gM4Tz9ULeY7ABEBAAGJAh8EGAEIAAkFAlSIEDwC +GwwACgkQKTrNCQfZSVrIgBAArhCdo3ItpuEKWcxx22oMwDm+0dmXmzqcPnB8y9Tf +NcocToIXP47H1+XEenZdTYZJOrdqzrK6Y1PplwQv6hqFToypgbQTeknrZ8SCDyEK +cU4id2r73THTzgNSiC4QAE214i5kKd6PMQn7XYVjsxvin3ZalS2x4m8UFal2C9nj +o8HqoTsDOSRy0mzoqAqXmeAe3X9pYme/CUwA6R8hHEgX7jUhm/ArVW5wZboAinw5 +BmKBjWiIwT1vxfvwgbC0EA1O24G4zQqEJ2ILmcM3RvWwtFFWasQqV7qnKdpD8EIb +oPa8Ocl7joDc5seK8BzsI7tXN4Yjw0aHCOlZ15fWHPYKgDFRQaRFffODPNbxQNiz +Yru3pbEWDLIUoQtJyKl+o2+8m4aWCYNzJ1WkEQje9RaBpHNDcyen5yC73tCEJsvT +ZuMI4Xqc4xgLt8woreKE57GRdg2fO8fO40X3R/J5YM6SqG7y2uwjVCHFBeO2Nkkr +8nOno+Rbn2b03c9MapMT4ll8jJds4xwhhpIjzPLWd2ZcX/ZGqmsnKPiroe9p1VPo +lN72Ohr9lS+OXfvOPV2N+Ar5rCObmhnYbXGgU/qyhk1qkRu+w2bBZOOQIdaCfh5A +Hbn3ZGGGQskgWZDFP4xZ3DWXFSWMPuvEjbmUn2xrh9oYsjsOGy9tyBFFySU2vyZP +Mkc= +=FcYC +-----END PGP PUBLIC KEY BLOCK----- diff --git a/fixtures/alpine/webserver.go b/fixtures/alpine/webserver.go new file mode 100644 index 0000000000000..b41a818ffc18c --- /dev/null +++ b/fixtures/alpine/webserver.go @@ -0,0 +1,26 @@ +// Teleport +// Copyright (C) 2024 Gravitational, Inc. +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with this program. If not, see . + +package main + +import "net/http" + +func main() { + http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { + w.Write([]byte("Hello, world!")) + }) + http.ListenAndServe(":80", nil) +} diff --git a/integration/hostuser_test.go b/integration/hostuser_test.go index 242908525cdf3..2f7a741e513f5 100644 --- a/integration/hostuser_test.go +++ b/integration/hostuser_test.go @@ -583,6 +583,28 @@ func TestRootHostUsers(t *testing.T) { require.NoError(t, err) require.False(t, hasExpirations) }) + + t.Run("Test migrate unmanaged user", func(t *testing.T) { + t.Cleanup(func() { cleanupUsersAndGroups([]string{testuser}, []string{types.TeleportKeepGroup}) }) + + users := srv.NewHostUsers(context.Background(), presence, "host_uuid") + _, err := host.UserAdd(testuser, nil, host.UserOpts{}) + require.NoError(t, err) + + closer, err := users.UpsertUser(testuser, services.HostUsersInfo{Mode: services.HostUserModeKeep, Groups: []string{types.TeleportKeepGroup}}) + require.NoError(t, err) + require.Nil(t, closer) + + u, err := user.Lookup(testuser) + require.NoError(t, err) + + gids, err := u.GroupIds() + require.NoError(t, err) + + keepGroup, err := user.LookupGroup(types.TeleportKeepGroup) + require.NoError(t, err) + require.Contains(t, gids, keepGroup.Gid) + }) } type hostUsersBackendWithExp struct { diff --git a/integration/kube_integration_test.go b/integration/kube_integration_test.go index d097a160583dc..14793b1b399e5 100644 --- a/integration/kube_integration_test.go +++ b/integration/kube_integration_test.go @@ -1473,7 +1473,7 @@ func testKubeEphemeralContainers(t *testing.T, suite *KubeSuite) { sessCreatorTerm := NewTerminal(250) group := &errgroup.Group{} group.Go(func() error { - cmd := []string{"/bin/sh", "echo", "hello from an ephemeral container"} + cmd := []string{"/bin/sh", "-c", "echo hello from an ephemeral container"} debugPod, _, err := generateDebugContainer(contName, cmd, pod) if err != nil { return trace.Wrap(err) @@ -1578,7 +1578,7 @@ func generateDebugContainer(name string, cmd []string, pod *v1.Pod) (*v1.Pod, *v ec := &v1.EphemeralContainer{ EphemeralContainerCommon: v1.EphemeralContainerCommon{ Name: name, - Image: "alpine:latest", + Image: localPodImage, Command: cmd, ImagePullPolicy: v1.PullIfNotPresent, Stdin: true, @@ -1933,6 +1933,13 @@ func newNamespace(name string) *v1.Namespace { } } +// localPodImage is a container image that is used for testing +// It's a docker image that runs a simple web server +// that listens on port 80 and returns "Hello, World!" on GET / +// This image is vendored in the Teleport repository in the +// fixtures/alpine directory. Check the Dockerfile there for details. +const localPodImage = "alpine-webserver:v1" + func newPod(ns, name string) *v1.Pod { return &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ @@ -1942,7 +1949,7 @@ func newPod(ns, name string) *v1.Pod { Spec: v1.PodSpec{ Containers: []v1.Container{{ Name: "nginx", - Image: "nginx:alpine", + Image: localPodImage, }}, }, } diff --git a/integrations/operator/Makefile b/integrations/operator/Makefile index f22f4e5347ec6..ca7c7c234f929 100644 --- a/integrations/operator/Makefile +++ b/integrations/operator/Makefile @@ -72,6 +72,9 @@ help: ## Display this help. ##@ Development +.PHONY: crd ## Single command to generate anything CRD-related (manifests and docs) +crd: crdgen crd-docs + .PHONY: crdgen crdgen: ## Generate CRDs make -C crdgen @@ -139,6 +142,10 @@ test: export KUBEBUILDER_ASSETS=$(shell $(ENVTEST) use $(ENVTEST_K8S_VERSION) -p test: go test ./... -coverprofile cover.out +.PHONY: echo-kubebuilder-assets +echo-kubebuilder-assets: + @echo KUBEBUILDER_ASSETS=$(shell $(ENVTEST) use $(ENVTEST_K8S_VERSION) -p path) + .PHONY: crdgen-test crdgen-test: ## Run crdgen tests. make -C crdgen test diff --git a/integrations/operator/apis/resources/v3/oidcconnector_types.go b/integrations/operator/apis/resources/v3/oidcconnector_types.go index 3eedf1d9b5264..d23f25f4fe6d6 100644 --- a/integrations/operator/apis/resources/v3/oidcconnector_types.go +++ b/integrations/operator/apis/resources/v3/oidcconnector_types.go @@ -21,6 +21,7 @@ package v3 import ( "encoding/json" + "github.com/gravitational/trace" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "github.com/gravitational/teleport/api/types" @@ -97,14 +98,49 @@ func (spec *TeleportOIDCConnectorSpec) DeepCopyInto(out *TeleportOIDCConnectorSp } } +// Custom json.Marshaller and json.Unmarshaler are here to cope with inconsistencies between our CRD and go types. +// They are invoked when the kubernetes client converts the unstructured object into a typed resource. +// We have two inconsistencies: +// - the utils.Strings typr that marshals inconsistently: single elements are strings, multiple elements are lists +// - the max_age setting which is an embedded pointer to another single-value message, which breaks JSON parsing + // MarshalJSON serializes a spec into a JSON string func (spec TeleportOIDCConnectorSpec) MarshalJSON() ([]byte, error) { type Alias TeleportOIDCConnectorSpec + + var maxAge types.Duration + if spec.MaxAge != nil { + maxAge = spec.MaxAge.Value + } + return json.Marshal(&struct { - RedirectURLs []string `json:"redirect_url"` + RedirectURLs []string `json:"redirect_url,omitempty"` + MaxAge types.Duration `json:"max_age,omitempty"` Alias }{ RedirectURLs: spec.RedirectURLs, + MaxAge: maxAge, Alias: (Alias)(spec), }) } + +// UnmarshalJSON serializes a JSON string into a spec. This override is required to deal with the +// MaxAge field which is special case because it' an object embedded into the spec. +func (spec *TeleportOIDCConnectorSpec) UnmarshalJSON(data []byte) error { + *spec = *new(TeleportOIDCConnectorSpec) + type Alias TeleportOIDCConnectorSpec + + temp := &struct { + MaxAge types.Duration `json:"max_age"` + *Alias + }{ + Alias: (*Alias)(spec), + } + if err := json.Unmarshal(data, &temp); err != nil { + return trace.Wrap(err, "unmarshalling custom teleport oidc connector spec") + } + if temp.MaxAge != 0 { + spec.MaxAge = &types.MaxAge{Value: temp.MaxAge} + } + return nil +} diff --git a/integrations/operator/apis/resources/v3/oidcconnector_types_test.go b/integrations/operator/apis/resources/v3/oidcconnector_types_test.go index c6abb53659989..5c511d5d82905 100644 --- a/integrations/operator/apis/resources/v3/oidcconnector_types_test.go +++ b/integrations/operator/apis/resources/v3/oidcconnector_types_test.go @@ -21,9 +21,11 @@ package v3 import ( "encoding/json" "testing" + "time" "github.com/stretchr/testify/require" + "github.com/gravitational/teleport/api/types" "github.com/gravitational/teleport/api/types/wrappers" ) @@ -50,6 +52,11 @@ func TestTeleportOIDCConnectorSpec_MarshalJSON(t *testing.T) { TeleportOIDCConnectorSpec{RedirectURLs: wrappers.Strings{"foo", "bar"}}, `{"redirect_url":["foo","bar"],"issuer_url":"","client_id":"","client_secret":""}`, }, + { + "MaxAge", + TeleportOIDCConnectorSpec{MaxAge: &types.MaxAge{Value: types.Duration(time.Hour)}}, + `{"max_age":"1h0m0s","issuer_url":"","client_id":"","client_secret":""}`, + }, } for _, tc := range tests { tc := tc @@ -60,3 +67,39 @@ func TestTeleportOIDCConnectorSpec_MarshalJSON(t *testing.T) { }) } } +func TestTeleportOIDCConnectorSpec_UnmarshalJSON(t *testing.T) { + tests := []struct { + name string + expectedSpec TeleportOIDCConnectorSpec + inputJSON string + }{ + { + "Empty string", + TeleportOIDCConnectorSpec{RedirectURLs: wrappers.Strings{""}}, + `{"redirect_url":[""],"issuer_url":"","client_id":"","client_secret":""}`, + }, + { + "Single string", + TeleportOIDCConnectorSpec{RedirectURLs: wrappers.Strings{"foo"}}, + `{"redirect_url":["foo"],"issuer_url":"","client_id":"","client_secret":""}`, + }, + { + "Multiple strings", + TeleportOIDCConnectorSpec{RedirectURLs: wrappers.Strings{"foo", "bar"}}, + `{"redirect_url":["foo","bar"],"issuer_url":"","client_id":"","client_secret":""}`, + }, + { + "MaxAge", + TeleportOIDCConnectorSpec{MaxAge: &types.MaxAge{Value: types.Duration(time.Hour)}}, + `{"max_age":"1h0m0s","issuer_url":"","client_id":"","client_secret":""}`, + }, + } + for _, tc := range tests { + tc := tc + t.Run(tc.name, func(t *testing.T) { + var spec TeleportOIDCConnectorSpec + require.NoError(t, json.Unmarshal([]byte(tc.inputJSON), &spec)) + require.Equal(t, tc.expectedSpec, spec) + }) + } +} diff --git a/integrations/operator/controllers/resources/oidc_connector_controller_test.go b/integrations/operator/controllers/resources/oidc_connector_controller_test.go index 35228bc8188f7..39359c2704967 100644 --- a/integrations/operator/controllers/resources/oidc_connector_controller_test.go +++ b/integrations/operator/controllers/resources/oidc_connector_controller_test.go @@ -21,6 +21,7 @@ package resources_test import ( "context" "testing" + "time" "github.com/google/go-cmp/cmp" "github.com/gravitational/trace" @@ -46,6 +47,7 @@ var oidcSpec = types.OIDCConnectorSpecV3{ Roles: []string{"roleA"}, }}, RedirectURLs: []string{"https://redirect"}, + MaxAge: &types.MaxAge{Value: types.Duration(time.Hour)}, } type oidcTestingPrimitives struct { diff --git a/lib/auth/native/boring.go b/lib/auth/native/boring.go new file mode 100644 index 0000000000000..0c4a8dfc30ede --- /dev/null +++ b/lib/auth/native/boring.go @@ -0,0 +1,32 @@ +// Teleport +// Copyright (C) 2024 Gravitational, Inc. +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with this program. If not, see . + +//go:build boringcrypto + +package native + +import "crypto/boring" + +// IsBoringBinary checks if the binary was compiled with BoringCrypto. +// +// It's possible to enable the boringcrypto GOEXPERIMENT (which will enable the +// boringcrypto build tag) even on platforms that don't support the boringcrypto +// module, which results in crypto packages being available and working, but not +// actually using a certified cryptographic module, so we have to check +// [boring.Enabled] even if this is compiled in. +func IsBoringBinary() bool { + return boring.Enabled() +} diff --git a/lib/auth/native/native.go b/lib/auth/native/native.go index f3b84d45de69a..6e1543cabc7ee 100644 --- a/lib/auth/native/native.go +++ b/lib/auth/native/native.go @@ -22,10 +22,8 @@ import ( "crypto/ed25519" "crypto/rand" "crypto/rsa" - "crypto/sha256" "crypto/x509" "encoding/pem" - "reflect" "sync" "testing" "time" @@ -48,15 +46,6 @@ var precomputedKeys = make(chan *rsa.PrivateKey, 25) // startPrecomputeOnce is used to start the background task that precomputes key pairs. var startPrecomputeOnce sync.Once -// IsBoringBinary checks if the binary was compiled with BoringCrypto. -func IsBoringBinary() bool { - // Check the package name for one of the boring primitives, if the package - // path is from BoringCrypto, we know this binary was compiled against the - // dev.boringcrypto branch of Go. - hash := sha256.New() - return reflect.TypeOf(hash).Elem().PkgPath() == "crypto/internal/boring" -} - // GenerateKeyPair generates a new RSA key pair. func GenerateKeyPair() ([]byte, []byte, error) { priv, err := GeneratePrivateKey() diff --git a/lib/auth/native/notboring.go b/lib/auth/native/notboring.go new file mode 100644 index 0000000000000..3fa57fb55e5cb --- /dev/null +++ b/lib/auth/native/notboring.go @@ -0,0 +1,27 @@ +// Teleport +// Copyright (C) 2024 Gravitational, Inc. +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with this program. If not, see . + +//go:build !boringcrypto + +package native + +// IsBoringBinary checks if the binary was compiled with BoringCrypto. +// +// The boringcrypto GOEXPERIMENT always sets the boringcrypto build tag, so if +// this is compiled in, we're not using BoringCrypto. +func IsBoringBinary() bool { + return false +} diff --git a/lib/auth/webauthncli/fido2.go b/lib/auth/webauthncli/fido2.go index d3030ca211529..a20b714fc2fcb 100644 --- a/lib/auth/webauthncli/fido2.go +++ b/lib/auth/webauthncli/fido2.go @@ -25,6 +25,7 @@ import ( "context" "crypto/sha256" "encoding/base64" + "encoding/hex" "encoding/json" "errors" "fmt" @@ -220,7 +221,7 @@ func fido2Login( if uv { opts.UV = libfido2.True } - assertions, err := dev.Assertion(actualRPID, ccdHash[:], allowedCreds, pin, opts) + assertions, err := devAssertion(dev, info, actualRPID, ccdHash[:], allowedCreds, pin, opts) if errors.Is(err, libfido2.ErrUnsupportedOption) && uv && pin != "" { // Try again if we are getting "unsupported option" and the PIN is set. // Happens inconsistently in some authenticator series (YubiKey 5). @@ -228,7 +229,7 @@ func fido2Login( // authenticator will set the UV bit regardless of it being requested. log.Debugf("FIDO2: Device %v: retrying assertion without UV", info.path) opts.UV = libfido2.Default - assertions, err = dev.Assertion(actualRPID, ccdHash[:], allowedCreds, pin, opts) + assertions, err = devAssertion(dev, info, actualRPID, ccdHash[:], allowedCreds, pin, opts) } if errors.Is(err, libfido2.ErrNoCredentials) { // U2F devices error instantly with ErrNoCredentials. @@ -312,13 +313,75 @@ func usesAppID(dev FIDODevice, info *deviceInfo, ccdHash []byte, allowedCreds [] isRegistered := func(id string) bool { const pin = "" // Not necessary here. - _, err := dev.Assertion(id, ccdHash, allowedCreds, pin, opts) + _, err := devAssertion(dev, info, id, ccdHash, allowedCreds, pin, opts) return err == nil || (!info.fido2 && errors.Is(err, libfido2.ErrUserPresenceRequired)) } return isRegistered(appID) && !isRegistered(rpID) } +func devAssertion( + dev FIDODevice, + info *deviceInfo, + rpID string, + ccdHash []byte, + allowedCreds [][]byte, + pin string, + opts *libfido2.AssertionOpts, +) ([]*libfido2.Assertion, error) { + // Handle U2F devices separately when there is more than one allowed + // credential. + // This avoids "internal errors" on older Yubikey models (eg, FIDO U2F + // Security Key firmware 4.1.8). + if !info.fido2 && len(allowedCreds) > 1 { + cred, ok := findFirstKnownCredential(dev, info, rpID, ccdHash, allowedCreds) + if ok { + isCredentialCheck := pin == "" && opts != nil && opts.UP == libfido2.False + if isCredentialCheck { + // No need to assert again, reply as the U2F authenticator would. + return nil, trace.Wrap(libfido2.ErrUserPresenceRequired) + } + + if log.IsLevelEnabled(log.DebugLevel) { + credPrefix := hex.EncodeToString(cred) + const prefixLen = 10 + if len(credPrefix) > prefixLen { + credPrefix = credPrefix[:prefixLen] + } + log.Debugf("FIDO2: Device %v: Using credential %v...", info.path, credPrefix) + } + + allowedCreds = [][]byte{cred} + } + } + + assertion, err := dev.Assertion(rpID, ccdHash, allowedCreds, pin, opts) + return assertion, trace.Wrap(err) +} + +func findFirstKnownCredential( + dev FIDODevice, + info *deviceInfo, + rpID string, + ccdHash []byte, + allowedCreds [][]byte, +) ([]byte, bool) { + const pin = "" + opts := &libfido2.AssertionOpts{ + UP: libfido2.False, + } + for _, cred := range allowedCreds { + _, err := dev.Assertion(rpID, ccdHash, [][]byte{cred}, pin, opts) + // FIDO2 devices return err=nil on up=false queries; U2F devices return + // libfido2.ErrUserPresenceRequired. + // https://github.com/Yubico/libfido2/blob/03c18d396eb209a42bbf62f5f4415203cba2fc50/src/u2f.c#L787-L791. + if err == nil || (!info.fido2 && errors.Is(err, libfido2.ErrUserPresenceRequired)) { + return cred, true + } + } + return nil, false +} + func pickAssertion( assertions []*libfido2.Assertion, prompt LoginPrompt, user string, passwordless bool, ) (*libfido2.Assertion, error) { @@ -452,7 +515,7 @@ func fido2Register( // Does the device hold an excluded credential? const pin = "" // not required to filter - switch _, err := dev.Assertion(rp.ID, ccdHash[:], excludeList, pin, &libfido2.AssertionOpts{ + switch _, err := devAssertion(dev, info, rp.ID, ccdHash[:], excludeList, pin, &libfido2.AssertionOpts{ UP: libfido2.False, }); { case errors.Is(err, libfido2.ErrNoCredentials): diff --git a/lib/auth/webauthncli/fido2_test.go b/lib/auth/webauthncli/fido2_test.go index 0e4486ab85db3..e1fc0890981d5 100644 --- a/lib/auth/webauthncli/fido2_test.go +++ b/lib/auth/webauthncli/fido2_test.go @@ -1954,6 +1954,102 @@ func TestFIDO2Register_u2fExcludedCredentials(t *testing.T) { require.NoError(t, err, "FIDO2Register errored, expected a successful registration") } +// TestFIDO2Login_u2fInternalError tests the scenario described by issue +// https://github.com/gravitational/teleport/issues/44912. +func TestFIDO2Login_u2fInternalError(t *testing.T) { + resetFIDO2AfterTests(t) + + dev1 := mustNewFIDO2Device("/dev1", "" /* pin */, &libfido2.DeviceInfo{ + Options: authOpts, + }) + dev2 := mustNewFIDO2Device("/dev2", "" /* pin */, &libfido2.DeviceInfo{ + Options: authOpts, + }) + u2fDev := mustNewFIDO2Device("/u2f", "" /* pin */, nil /* info */) + u2fDev.u2fOnly = true + u2fDev.errorOnUnknownCredential = true + + f2 := newFakeFIDO2(dev1, dev2, u2fDev) + f2.setCallbacks() + + const origin = "https://example.com" + ctx := context.Background() + + // Register all authenticators. + cc := &wantypes.CredentialCreation{ + Response: wantypes.PublicKeyCredentialCreationOptions{ + Challenge: make([]byte, 32), + RelyingParty: wantypes.RelyingPartyEntity{ + CredentialEntity: protocol.CredentialEntity{ + Name: "example.com", + }, + ID: "example.com", + }, + User: wantypes.UserEntity{ + CredentialEntity: protocol.CredentialEntity{ + Name: "alpaca", + }, + DisplayName: "Alpaca", + ID: []byte{1, 2, 3, 4, 5}, // arbitrary + }, + Parameters: []wantypes.CredentialParameter{ + {Type: protocol.PublicKeyCredentialType, Algorithm: webauthncose.AlgES256}, + }, + AuthenticatorSelection: wantypes.AuthenticatorSelection{ + RequireResidentKey: protocol.ResidentKeyNotRequired(), + ResidentKey: protocol.ResidentKeyRequirementDiscouraged, + UserVerification: protocol.VerificationDiscouraged, + }, + Attestation: protocol.PreferNoAttestation, + }, + } + allowedCreds := make([]wantypes.CredentialDescriptor, 0, len(f2.devices)) + for _, dev := range f2.devices { + ctx, cancel := context.WithTimeout(ctx, 1*time.Second) + mfaResp, err := wancli.FIDO2Register(ctx, origin, cc, dev) + cancel() + require.NoError(t, err, "FIDO2Register failed") + + allowedCreds = append(allowedCreds, wantypes.CredentialDescriptor{ + Type: protocol.PublicKeyCredentialType, + CredentialID: mfaResp.GetWebauthn().RawId, + }) + } + + // Sanity check: authenticator errors in the presence of unknown credentials. + u2fDev.open() + _, err := u2fDev.Assertion( + "example.com", + []byte(`55cde2973243a946b85a477d2e164a35d2e4f3daaeb11ac5e9a1c4cf3297033e`), // clientDataHash + [][]byte{ + u2fDev.credentialID(), + bytes.Repeat([]byte("A"), 96), + }, + "", // pin + &libfido2.AssertionOpts{UP: libfido2.False}, + ) + require.ErrorIs(t, err, libfido2.ErrInternal, "u2fDev.Assert error mismatch") + u2fDev.Close() + + t.Run("login with multiple credentials", func(t *testing.T) { + assertion := &wantypes.CredentialAssertion{ + Response: wantypes.PublicKeyCredentialRequestOptions{ + Challenge: make([]byte, 32), + RelyingPartyID: "example.com", + AllowedCredentials: allowedCreds, + UserVerification: protocol.VerificationDiscouraged, + }, + } + + ctx, cancel := context.WithTimeout(ctx, 1*time.Second) + defer cancel() + _, _, err := wancli.FIDO2Login(ctx, origin, assertion, u2fDev, &wancli.LoginOpts{ + User: "alpaca", + }) + require.NoError(t, err, "FIDO2Login failed") + }) +} + func resetFIDO2AfterTests(t *testing.T) { pollInterval := wancli.FIDO2PollInterval devLocations := wancli.FIDODeviceLocations @@ -2015,6 +2111,10 @@ type fakeFIDO2Device struct { // Causes libfido2.ErrNotFIDO2 on Info. u2fOnly bool + // errorOnUnknownCredential makes the device fail assertions if an unknown + // credential is present. + errorOnUnknownCredential bool + // assertionErrors is a chain of errors to return from Assertion. // Errors are returned from start to end and removed, one-by-one, on each // invocation of the Assertion method. @@ -2291,6 +2391,9 @@ func (f *fakeFIDO2Device) Assertion( found = true break } + if f.errorOnUnknownCredential { + return nil, fmt.Errorf("failed to get assertion: %w", libfido2.ErrInternal) + } } if !found { return nil, libfido2.ErrNoCredentials @@ -2316,6 +2419,13 @@ func (f *fakeFIDO2Device) Assertion( credIDs := make(map[string]struct{}) for _, cred := range credentialIDs { credIDs[string(cred)] = struct{}{} + + // Simulate "internal error" on unknown credential handles. + // Sometimes happens with Yubikeys firmware 4.1.8. + // Requires a tap to happen. + if f.errorOnUnknownCredential && !bytes.Equal(cred, f.key.KeyHandle) { + return nil, fmt.Errorf("failed to get assertion: %w", libfido2.ErrInternal) + } } // Assemble one assertion for each allowed credential we hold. diff --git a/lib/client/api.go b/lib/client/api.go index a3f33038d0f8d..c4e2d3f916c0c 100644 --- a/lib/client/api.go +++ b/lib/client/api.go @@ -2267,13 +2267,11 @@ func playSession(ctx context.Context, sessionID string, speed float64, streamer } playing = !playing case keyLeft, keyDown: - current := time.Duration(player.LastPlayed() * int64(time.Millisecond)) - player.SetPos(max(current-skipDuration, 0)) // rewind + player.SetPos(max(player.LastPlayed()-skipDuration, 0)) // rewind term.Clear() term.SetCursorPos(1, 1) case keyRight, keyUp: - current := time.Duration(player.LastPlayed() * int64(time.Millisecond)) - player.SetPos(current + skipDuration) // advance forward + player.SetPos(player.LastPlayed() + skipDuration) // advance forward } } }() diff --git a/lib/player/player.go b/lib/player/player.go index fa52f790d8611..d29bacc17acbe 100644 --- a/lib/player/player.go +++ b/lib/player/player.go @@ -62,7 +62,7 @@ type Player struct { advanceTo atomic.Int64 emit chan events.AuditEvent - wake chan int64 + wake chan time.Duration done chan struct{} // playPause holds a channel to be closed when @@ -81,7 +81,12 @@ type Player struct { translator sessionPrintTranslator } -const normalPlayback = math.MinInt64 +const ( + normalPlayback = time.Duration(0) + // MaxIdleTime defines the max idle time when skipping idle + // periods on the recording. + MaxIdleTime = 500 * time.Millisecond +) // Streamer is the underlying streamer that provides // access to recorded session events. @@ -134,18 +139,19 @@ func New(cfg *Config) (*Player, error) { } p := &Player{ - clock: clk, - log: log, - sessionID: cfg.SessionID, - streamer: cfg.Streamer, - emit: make(chan events.AuditEvent, 1024), - playPause: make(chan chan struct{}, 1), - wake: make(chan int64), - done: make(chan struct{}), + clock: clk, + log: log, + sessionID: cfg.SessionID, + streamer: cfg.Streamer, + emit: make(chan events.AuditEvent, 1024), + playPause: make(chan chan struct{}, 1), + wake: make(chan time.Duration), + done: make(chan struct{}), + skipIdleTime: cfg.SkipIdleTime, } p.speed.Store(float64(defaultPlaybackSpeed)) - p.advanceTo.Store(normalPlayback) + p.advanceTo.Store(int64(normalPlayback)) // start in a paused state p.playPause <- make(chan struct{}) @@ -183,7 +189,7 @@ func (p *Player) stream() { defer cancel() eventsC, errC := p.streamer.StreamSessionEvents(ctx, p.sessionID, 0) - lastDelay := int64(0) + var lastDelay time.Duration for { select { case <-p.done: @@ -215,7 +221,7 @@ func (p *Player) stream() { currentDelay := getDelay(evt) if currentDelay > 0 && currentDelay >= lastDelay { - switch adv := p.advanceTo.Load(); { + switch adv := time.Duration(p.advanceTo.Load()); { case adv >= currentDelay: // no timing delay necessary, we are fast forwarding break @@ -223,12 +229,12 @@ func (p *Player) stream() { // any negative value other than normalPlayback means // we rewind (by restarting the stream and seeking forward // to the rewind point) - p.advanceTo.Store(adv * -1) + p.advanceTo.Store(int64(adv) * -1) go p.stream() return default: if adv != normalPlayback { - p.advanceTo.Store(normalPlayback) + p.advanceTo.Store(int64(normalPlayback)) // we're catching back up to real time, so the delay // is calculated not from the last event but from the @@ -256,7 +262,7 @@ func (p *Player) stream() { // // TODO: consider a select with a timeout to detect blocked readers? p.emit <- evt - p.lastPlayed.Store(currentDelay) + p.lastPlayed.Store(int64(currentDelay)) } } } @@ -308,14 +314,14 @@ func (p *Player) SetPos(d time.Duration) error { if d == 0 { d = 1 * time.Millisecond } - if d.Milliseconds() < p.lastPlayed.Load() { + if d < time.Duration(p.lastPlayed.Load()) { d = -1 * d } - p.advanceTo.Store(d.Milliseconds()) + p.advanceTo.Store(int64(d)) // try to wake up the player if it's waiting to emit an event select { - case p.wake <- d.Milliseconds(): + case p.wake <- d: default: } @@ -332,18 +338,18 @@ func (p *Player) SetPos(d time.Duration) error { // // A nil return value indicates that the delay has elapsed and that // the next even can be emitted. -func (p *Player) applyDelay(lastDelay, currentDelay int64) error { +func (p *Player) applyDelay(lastDelay, currentDelay time.Duration) error { loop: for { // TODO(zmb3): changing play speed during a long sleep // will not apply until after the sleep completes speed := p.speed.Load().(float64) - scaled := float64(currentDelay-lastDelay) / speed + scaled := time.Duration(float64(currentDelay-lastDelay) / speed) if p.skipIdleTime { - scaled = min(scaled, 500.0*float64(time.Millisecond)) + scaled = min(scaled, MaxIdleTime) } - timer := p.clock.NewTimer(time.Duration(scaled) * time.Millisecond) + timer := p.clock.NewTimer(scaled) defer timer.Stop() start := time.Now() @@ -357,7 +363,7 @@ loop: case newPos == interruptForPause: // the user paused playback while we were waiting to emit the next event: // 1) figure out much of the sleep we completed - dur := float64(time.Since(start).Milliseconds()) * speed + dur := time.Duration(float64(time.Since(start)) * speed) // 2) wait here until the user resumes playback if err := p.waitWhilePaused(); errors.Is(err, errSeekWhilePaused) { @@ -369,7 +375,7 @@ loop: // now that we're playing again, update our delay to account // for the portion that was already satisfied and apply the // remaining delay - lastDelay += int64(dur) + lastDelay += dur timer.Stop() continue loop case newPos > currentDelay: @@ -454,8 +460,8 @@ func (p *Player) waitWhilePaused() error { // LastPlayed returns the time of the last played event, // expressed as milliseconds since the start of the session. -func (p *Player) LastPlayed() int64 { - return p.lastPlayed.Load() +func (p *Player) LastPlayed() time.Duration { + return time.Duration(p.lastPlayed.Load()) } // translateEvent translates events if applicable and return if they should be @@ -490,13 +496,13 @@ var databaseTranslators = map[string]newSessionPrintTranslatorFunc{ // player. var SupportedDatabaseProtocols = maps.Keys(databaseTranslators) -func getDelay(e events.AuditEvent) int64 { +func getDelay(e events.AuditEvent) time.Duration { switch x := e.(type) { case *events.DesktopRecording: - return x.DelayMilliseconds + return time.Duration(x.DelayMilliseconds) * time.Millisecond case *events.SessionPrint: - return x.DelayMilliseconds + return time.Duration(x.DelayMilliseconds) * time.Millisecond default: - return int64(0) + return time.Duration(0) } } diff --git a/lib/player/player_test.go b/lib/player/player_test.go index 836b58a506f89..83fac3bb32d97 100644 --- a/lib/player/player_test.go +++ b/lib/player/player_test.go @@ -26,6 +26,7 @@ import ( "time" "github.com/jonboulle/clockwork" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" apievents "github.com/gravitational/teleport/api/types/events" @@ -169,7 +170,7 @@ func TestClose(t *testing.T) { _, ok := <-p.C() require.False(t, ok, "player channel should have been closed") require.NoError(t, p.Err()) - require.Equal(t, int64(1000), p.LastPlayed()) + require.Equal(t, time.Second, p.LastPlayed()) } func TestSeekForward(t *testing.T) { @@ -321,6 +322,34 @@ func TestUseDatabaseTranslator(t *testing.T) { }) } +func TestSkipIdlePeriods(t *testing.T) { + eventCount := 3 + delayMilliseconds := 60000 + clk := clockwork.NewFakeClock() + p, err := player.New(&player.Config{ + Clock: clk, + SessionID: "test-session", + SkipIdleTime: true, + Streamer: &simpleStreamer{count: int64(eventCount), delay: int64(delayMilliseconds)}, + }) + require.NoError(t, err) + require.NoError(t, p.Play()) + + for i := range eventCount { + // Consume events in an eventually loop to avoid firing the clock + // events before the timer is set. + require.EventuallyWithT(t, func(t *assert.CollectT) { + clk.Advance(player.MaxIdleTime) + select { + case evt := <-p.C(): + assert.Equal(t, int64(i), evt.GetIndex()) + default: + assert.Fail(t, "expected to receive event after short period, but got nothing") + } + }, 3*time.Second, 100*time.Millisecond) + } +} + // simpleStreamer streams a fake session that contains // count events, emitted at a particular interval type simpleStreamer struct { diff --git a/lib/service/discovery.go b/lib/service/discovery.go index 845c9edbc4de2..b69f5f558994c 100644 --- a/lib/service/discovery.go +++ b/lib/service/discovery.go @@ -98,7 +98,8 @@ func (process *TeleportProcess) initDiscoveryService() error { Emitter: asyncEmitter, AccessPoint: accessPoint, ServerID: process.Config.HostUUID, - Log: process.log, + Log: process.logger, + LegacyLogger: process.log, ClusterName: conn.ClientIdentity.ClusterName, ClusterFeatures: process.GetClusterFeatures, PollInterval: process.Config.Discovery.PollInterval, diff --git a/lib/service/service.go b/lib/service/service.go index 7f0b89e1bc3b1..83c11173898ba 100644 --- a/lib/service/service.go +++ b/lib/service/service.go @@ -160,6 +160,7 @@ import ( "github.com/gravitational/teleport/lib/utils" awsutils "github.com/gravitational/teleport/lib/utils/aws" "github.com/gravitational/teleport/lib/utils/cert" + "github.com/gravitational/teleport/lib/utils/hostid" logutils "github.com/gravitational/teleport/lib/utils/log" vc "github.com/gravitational/teleport/lib/versioncontrol" "github.com/gravitational/teleport/lib/versioncontrol/endpoint" @@ -2830,7 +2831,7 @@ func (process *TeleportProcess) initSSH() error { storagePresence := local.NewPresenceService(process.storage.BackendStorage) // read the host UUID: - serverID, err := utils.ReadOrMakeHostUUID(cfg.DataDir) + serverID, err := hostid.ReadOrCreateFile(cfg.DataDir) if err != nil { return trace.Wrap(err) } @@ -4307,7 +4308,7 @@ func (process *TeleportProcess) initProxyEndpoint(conn *Connector) error { } // read the host UUID: - serverID, err := utils.ReadOrMakeHostUUID(cfg.DataDir) + serverID, err := hostid.ReadOrCreateFile(cfg.DataDir) if err != nil { return trace.Wrap(err) } @@ -6307,7 +6308,7 @@ func readOrGenerateHostID(ctx context.Context, cfg *servicecfg.Config, kubeBacke if err := persistHostIDToStorages(ctx, cfg, kubeBackend); err != nil { return trace.Wrap(err) } - } else if kubeBackend != nil && utils.HostUUIDExistsLocally(cfg.DataDir) { + } else if kubeBackend != nil && hostid.ExistsLocally(cfg.DataDir) { // This case is used when loading a Teleport pre-11 agent with storage attached. // In this case, we have to copy the "host_uuid" from the agent to the secret // in case storage is removed later. @@ -6346,14 +6347,14 @@ func readHostIDFromStorages(ctx context.Context, dataDir string, kubeBackend kub } // Even if running in Kubernetes fallback to local storage if `host_uuid` was // not found in secret. - hostID, err := utils.ReadHostUUID(dataDir) + hostID, err := hostid.ReadFile(dataDir) return hostID, trace.Wrap(err) } // persistHostIDToStorages writes the cfg.HostUUID to local data and to // Kubernetes Secret if this process is running on a Kubernetes Cluster. func persistHostIDToStorages(ctx context.Context, cfg *servicecfg.Config, kubeBackend kubernetesBackend) error { - if err := utils.WriteHostUUID(cfg.DataDir, cfg.HostUUID); err != nil { + if err := hostid.WriteFile(cfg.DataDir, cfg.HostUUID); err != nil { if errors.Is(err, fs.ErrPermission) { cfg.Logger.ErrorContext(ctx, "Teleport does not have permission to write to the data directory. Ensure that you are running as a user with appropriate permissions.", "data_dir", cfg.DataDir) } @@ -6372,7 +6373,7 @@ func persistHostIDToStorages(ctx context.Context, cfg *servicecfg.Config, kubeBa // loadHostIDFromKubeSecret reads the host_uuid from the Kubernetes secret with // the expected key: `/host_uuid`. func loadHostIDFromKubeSecret(ctx context.Context, kubeBackend kubernetesBackend) (string, error) { - item, err := kubeBackend.Get(ctx, backend.NewKey(utils.HostUUIDFile)) + item, err := kubeBackend.Get(ctx, backend.NewKey(hostid.FileName)) if err != nil { return "", trace.Wrap(err) } @@ -6385,7 +6386,7 @@ func writeHostIDToKubeSecret(ctx context.Context, kubeBackend kubernetesBackend, _, err := kubeBackend.Put( ctx, backend.Item{ - Key: backend.NewKey(utils.HostUUIDFile), + Key: backend.NewKey(hostid.FileName), Value: []byte(id), }, ) diff --git a/lib/service/service_test.go b/lib/service/service_test.go index b94d776627999..6c5cb7b606e4e 100644 --- a/lib/service/service_test.go +++ b/lib/service/service_test.go @@ -69,6 +69,7 @@ import ( "github.com/gravitational/teleport/lib/services" "github.com/gravitational/teleport/lib/services/local" "github.com/gravitational/teleport/lib/utils" + "github.com/gravitational/teleport/lib/utils/hostid" ) func TestMain(m *testing.M) { @@ -1177,7 +1178,7 @@ func Test_readOrGenerateHostID(t *testing.T) { dataDir := t.TempDir() // write host_uuid file to temp dir. if len(tt.args.hostIDContent) > 0 { - err := utils.WriteHostUUID(dataDir, tt.args.hostIDContent) + err := hostid.WriteFile(dataDir, tt.args.hostIDContent) require.NoError(t, err) } diff --git a/lib/srv/discovery/access_graph.go b/lib/srv/discovery/access_graph.go index f3ba28a2d479f..dc7fee7a29dd9 100644 --- a/lib/srv/discovery/access_graph.go +++ b/lib/srv/discovery/access_graph.go @@ -66,7 +66,7 @@ func (s *Server) reconcileAccessGraph(ctx context.Context, currentTAGResources * upsert, toDel := aws_sync.ReconcileResults(currentTAGResources, &aws_sync.Resources{}) if err := push(stream, upsert, toDel); err != nil { - s.Log.WithError(err).Error("Error pushing empty resources to TAGs") + s.Log.ErrorContext(ctx, "Error pushing empty resources to TAGs", "error", err) } return trace.Wrap(errNoAccessGraphFetchers) } @@ -109,7 +109,7 @@ func (s *Server) reconcileAccessGraph(ctx context.Context, currentTAGResources * // Aggregate all errors into a single error. err := trace.NewAggregate(errs...) if err != nil { - s.Log.WithError(err).Error("Error polling TAGs") + s.Log.ErrorContext(ctx, "Error polling TAGs", "error", err) } result := aws_sync.MergeResources(results...) // Merge all results into a single result @@ -122,7 +122,7 @@ func (s *Server) reconcileAccessGraph(ctx context.Context, currentTAGResources * } if pushErr != nil { - s.Log.WithError(pushErr).Error("Error pushing TAGs") + s.Log.ErrorContext(ctx, "Error pushing TAGs", "error", pushErr) return nil } // Update the currentTAGResources with the result of the reconciliation. @@ -135,7 +135,7 @@ func (s *Server) reconcileAccessGraph(ctx context.Context, currentTAGResources * }, }, }); err != nil { - s.Log.WithError(err).Error("Error submitting usage event") + s.Log.ErrorContext(ctx, "Error submitting usage event", "error", err) } return nil @@ -315,7 +315,7 @@ func (s *Server) initializeAndWatchAccessGraph(ctx context.Context, reloadCh <-c defer func() { lease.Stop() if err := lease.Wait(); err != nil { - s.Log.WithError(err).Warn("error cleaning up semaphore") + s.Log.WarnContext(ctx, "Error cleaning up semaphore", "error", err) } }() @@ -336,12 +336,12 @@ func (s *Server) initializeAndWatchAccessGraph(ctx context.Context, reloadCh <-c stream, err := client.AWSEventsStream(ctx) if err != nil { - s.Log.WithError(err).Error("Failed to get access graph service stream") + s.Log.ErrorContext(ctx, "Failed to get access graph service stream", "error", err) return trace.Wrap(err) } header, err := stream.Header() if err != nil { - s.Log.WithError(err).Error("Failed to get access graph service stream header") + s.Log.ErrorContext(ctx, "Failed to get access graph service stream header", "error", err) return trace.Wrap(err) } const ( @@ -361,7 +361,7 @@ func (s *Server) initializeAndWatchAccessGraph(ctx context.Context, reloadCh <-c defer wg.Done() defer cancel() if !accessGraphConn.WaitForStateChange(ctx, connectivity.Ready) { - s.Log.Info("access graph service connection was closed") + s.Log.InfoContext(ctx, "Access graph service connection was closed") } }() @@ -411,7 +411,7 @@ func grpcCredentials(config AccessGraphConfig, certs []tls.Certificate) (grpc.Di func (s *Server) initAccessGraphWatchers(ctx context.Context, cfg *Config) error { fetchers, err := s.accessGraphFetchersFromMatchers(ctx, cfg.Matchers, "" /* discoveryConfigName */) if err != nil { - s.Log.WithError(err).Error("Error initializing access graph fetchers") + s.Log.ErrorContext(ctx, "Error initializing access graph fetchers", "error", err) } s.staticTAGSyncFetchers = fetchers @@ -424,7 +424,7 @@ func (s *Server) initAccessGraphWatchers(ctx context.Context, cfg *Config) error // We will wait for the config to change and re-evaluate the fetchers // before starting the sync. if len(allFetchers) == 0 { - s.Log.Debug("No AWS sync fetchers configured. Access graph sync will not be enabled.") + s.Log.DebugContext(ctx, "No AWS sync fetchers configured. Access graph sync will not be enabled.") select { case <-ctx.Done(): return @@ -435,10 +435,10 @@ func (s *Server) initAccessGraphWatchers(ctx context.Context, cfg *Config) error } // reset the currentTAGResources to force a full sync if err := s.initializeAndWatchAccessGraph(ctx, reloadCh); errors.Is(err, errTAGFeatureNotEnabled) { - s.Log.Warn("Access Graph specified in config, but the license does not include Teleport Policy. Access graph sync will not be enabled.") + s.Log.WarnContext(ctx, "Access Graph specified in config, but the license does not include Teleport Policy. Access graph sync will not be enabled.") break } else if err != nil { - s.Log.Warnf("Error initializing and watching access graph: %v", err) + s.Log.WarnContext(ctx, "Error initializing and watching access graph", "error", err) } select { diff --git a/lib/srv/discovery/database_watcher.go b/lib/srv/discovery/database_watcher.go index c3ab1abb437bf..77b03d68113bb 100644 --- a/lib/srv/discovery/database_watcher.go +++ b/lib/srv/discovery/database_watcher.go @@ -52,7 +52,7 @@ func (s *Server) startDatabaseWatchers() error { defer mu.Unlock() return utils.FromSlice(newDatabases, types.Database.GetName) }, - Log: s.Log.WithField("kind", types.KindDatabase), + Log: s.LegacyLogger.WithField("kind", types.KindDatabase), OnCreate: s.onDatabaseCreate, OnUpdate: s.onDatabaseUpdate, OnDelete: s.onDatabaseDelete, @@ -64,7 +64,7 @@ func (s *Server) startDatabaseWatchers() error { watcher, err := common.NewWatcher(s.ctx, common.WatcherConfig{ FetchersFn: s.getAllDatabaseFetchers, - Log: s.Log.WithField("kind", types.KindDatabase), + Log: s.LegacyLogger.WithField("kind", types.KindDatabase), DiscoveryGroup: s.DiscoveryGroup, Interval: s.PollInterval, TriggerFetchC: s.newDiscoveryConfigChangedSub(), @@ -94,7 +94,7 @@ func (s *Server) startDatabaseWatchers() error { mu.Unlock() if err := reconciler.Reconcile(s.ctx); err != nil { - s.Log.WithError(err).Warn("Unable to reconcile database resources.") + s.Log.WarnContext(s.ctx, "Unable to reconcile database resources", "error", err) } else if s.onDatabaseReconcile != nil { s.onDatabaseReconcile() } @@ -126,7 +126,7 @@ func (s *Server) getAllDatabaseFetchers() []common.Fetcher { func (s *Server) getCurrentDatabases() map[string]types.Database { databases, err := s.AccessPoint.GetDatabases(s.ctx) if err != nil { - s.Log.WithError(err).Warn("Failed to get databases from cache.") + s.Log.WarnContext(s.ctx, "Failed to get databases from cache", "error", err) return nil } @@ -136,7 +136,7 @@ func (s *Server) getCurrentDatabases() map[string]types.Database { } func (s *Server) onDatabaseCreate(ctx context.Context, database types.Database) error { - s.Log.Debugf("Creating database %s.", database.GetName()) + s.Log.DebugContext(ctx, "Creating database", "database", database.GetName()) err := s.AccessPoint.CreateDatabase(ctx, database) // If the database already exists but has cloud origin and an empty // discovery group, then update it. @@ -161,18 +161,18 @@ func (s *Server) onDatabaseCreate(ctx context.Context, database types.Database) }, }) if err != nil { - s.Log.WithError(err).Debug("Error emitting usage event.") + s.Log.DebugContext(ctx, "Error emitting usage event", "error", err) } return nil } func (s *Server) onDatabaseUpdate(ctx context.Context, database, _ types.Database) error { - s.Log.Debugf("Updating database %s.", database.GetName()) + s.Log.DebugContext(ctx, "Updating database", "database", database.GetName()) return trace.Wrap(s.AccessPoint.UpdateDatabase(ctx, database)) } func (s *Server) onDatabaseDelete(ctx context.Context, database types.Database) error { - s.Log.Debugf("Deleting database %s.", database.GetName()) + s.Log.DebugContext(ctx, "Deleting database", "database", database.GetName()) return trace.Wrap(s.AccessPoint.DeleteDatabase(ctx, database.GetName())) } diff --git a/lib/srv/discovery/discovery.go b/lib/srv/discovery/discovery.go index 829b16c3c01f7..a27b9c18b45fe 100644 --- a/lib/srv/discovery/discovery.go +++ b/lib/srv/discovery/discovery.go @@ -23,6 +23,7 @@ import ( "crypto/tls" "errors" "fmt" + "log/slog" "slices" "strings" "sync" @@ -59,6 +60,7 @@ import ( aws_sync "github.com/gravitational/teleport/lib/srv/discovery/fetchers/aws-sync" "github.com/gravitational/teleport/lib/srv/discovery/fetchers/db" "github.com/gravitational/teleport/lib/srv/server" + logutils "github.com/gravitational/teleport/lib/utils/log" "github.com/gravitational/teleport/lib/utils/spreadwork" ) @@ -119,7 +121,10 @@ type Config struct { // AccessPoint is a discovery access point AccessPoint authclient.DiscoveryAccessPoint // Log is the logger. - Log logrus.FieldLogger + Log *slog.Logger + // LegacyLogger is the old logger + // Deprecated: use Log instead. + LegacyLogger logrus.FieldLogger // ServerID identifies the Teleport instance where this service runs. ServerID string // onDatabaseReconcile is called after each database resource reconciliation. @@ -222,7 +227,10 @@ kubernetes matchers are present.`) } if c.Log == nil { - c.Log = logrus.New() + c.Log = slog.Default() + } + if c.LegacyLogger == nil { + c.LegacyLogger = logrus.New() } if c.protocolChecker == nil { c.protocolChecker = fetchers.NewProtoChecker(false) @@ -243,11 +251,13 @@ kubernetes matchers are present.`) return trace.BadParameter("cluster features are required") } - c.Log = c.Log.WithField(teleport.ComponentKey, teleport.ComponentDiscovery) + c.Log = c.Log.With(teleport.ComponentKey, teleport.ComponentDiscovery) + c.LegacyLogger = c.LegacyLogger.WithField(teleport.ComponentKey, teleport.ComponentDiscovery) if c.DiscoveryGroup == "" { - c.Log.Warn("discovery_service.discovery_group is not set. This field is required for the discovery service to work properly.\n" + - "Please set discovery_service.discovery_group according to the documentation: https://goteleport.com/docs/reference/config/#discovery-service") + const warningMessage = "discovery_service.discovery_group is not set. This field is required for the discovery service to work properly.\n" + + "Please set discovery_service.discovery_group according to the documentation: https://goteleport.com/docs/reference/config/#discovery-service" + c.Log.WarnContext(context.Background(), warningMessage) } c.Matchers.Azure = services.SimplifyAzureMatchers(c.Matchers.Azure) @@ -497,7 +507,7 @@ func (s *Server) initAWSWatchers(matchers []types.AWSMatcher) error { _, otherMatchers = splitMatchers(otherMatchers, db.IsAWSMatcherType) // Add non-integration kube fetchers. - kubeFetchers, err := fetchers.MakeEKSFetchersFromAWSMatchers(s.Log, s.CloudClients, otherMatchers) + kubeFetchers, err := fetchers.MakeEKSFetchersFromAWSMatchers(s.LegacyLogger, s.CloudClients, otherMatchers) if err != nil { return trace.Wrap(err) } @@ -525,7 +535,7 @@ func (s *Server) initKubeAppWatchers(matchers []types.KubernetesMatcher) error { KubernetesClient: kubeClient, FilterLabels: matcher.Labels, Namespaces: matcher.Namespaces, - Log: s.Log, + Log: s.LegacyLogger, ClusterName: s.DiscoveryGroup, ProtocolChecker: s.Config.protocolChecker, }) @@ -623,7 +633,7 @@ func (s *Server) kubeFetchersFromMatchers(matchers Matchers) ([]common.Fetcher, return matcherType == types.AWSMatcherEKS }) if len(awsKubeMatchers) > 0 { - eksFetchers, err := fetchers.MakeEKSFetchersFromAWSMatchers(s.Log, s.CloudClients, awsKubeMatchers) + eksFetchers, err := fetchers.MakeEKSFetchersFromAWSMatchers(s.LegacyLogger, s.CloudClients, awsKubeMatchers) if err != nil { return nil, trace.Wrap(err) } @@ -681,7 +691,7 @@ func (s *Server) initAzureWatchers(ctx context.Context, matchers []types.AzureMa Regions: matcher.Regions, FilterLabels: matcher.ResourceTags, ResourceGroups: matcher.ResourceGroups, - Log: s.Log, + Log: s.LegacyLogger, }) if err != nil { return trace.Wrap(err) @@ -762,7 +772,7 @@ func (s *Server) initGCPWatchers(ctx context.Context, matchers []types.GCPMatche Location: location, FilterLabels: matcher.GetLabels(), ProjectID: projectID, - Log: s.Log, + Log: s.LegacyLogger, }) if err != nil { return trace.Wrap(err) @@ -875,7 +885,7 @@ func (s *Server) handleEC2Instances(instances *server.EC2Instances) error { } if err := s.emitUsageEvents(instances.MakeEvents()); err != nil { - s.Log.WithError(err).Debug("Error emitting usage event.") + s.Log.DebugContext(s.ctx, "Error emitting usage event", "error", err) } return nil @@ -894,7 +904,7 @@ func (s *Server) heartbeatEICEInstance(instances *server.EC2Instances) { for _, ec2Instance := range instances.Instances { eiceNode, err := common.NewAWSNodeFromEC2v1Instance(ec2Instance.OriginalInstance, awsInfo) if err != nil { - s.Log.WithField("instance_id", ec2Instance.InstanceID).Warnf("Error converting to Teleport EICE Node: %v", err) + s.Log.WarnContext(s.ctx, "Error converting to Teleport EICE Node", "error", err, "instance_id", ec2Instance.InstanceID) s.awsEC2ResourcesStatus.incrementFailed(awsResourceGroup{ discoveryConfig: instances.DiscoveryConfig, @@ -905,7 +915,7 @@ func (s *Server) heartbeatEICEInstance(instances *server.EC2Instances) { existingNode, err := s.nodeWatcher.GetNode(s.ctx, eiceNode.GetName()) if err != nil && !trace.IsNotFound(err) { - s.Log.Warnf("Error finding the existing node with name %q: %v", eiceNode.GetName(), err) + s.Log.WarnContext(s.ctx, "Error finding the existing node", "node_name", eiceNode.GetName(), "error", err) continue } @@ -937,7 +947,7 @@ func (s *Server) heartbeatEICEInstance(instances *server.EC2Instances) { err := spreadwork.ApplyOverTime(s.ctx, applyOverTimeConfig, nodesToUpsert, func(eiceNode types.Server) { if _, err := s.AccessPoint.UpsertNode(s.ctx, eiceNode); err != nil { instanceID := eiceNode.GetAWSInstanceID() - s.Log.WithField("instance_id", instanceID).Warnf("Error upserting EC2 instance: %v", err) + s.Log.WarnContext(s.ctx, "Error upserting EC2 instance", "instance_id", instanceID, "error", err) s.awsEC2ResourcesStatus.incrementFailed(awsResourceGroup{ discoveryConfig: instances.DiscoveryConfig, integration: instances.Integration, @@ -945,7 +955,7 @@ func (s *Server) heartbeatEICEInstance(instances *server.EC2Instances) { } }) if err != nil { - s.Log.Warnf("Failed to upsert EC2 nodes: %v", err) + s.Log.WarnContext(s.ctx, "Failed to upsert EC2 nodes", "error", err) } } @@ -959,8 +969,7 @@ func (s *Server) handleEC2RemoteInstallation(instances *server.EC2Instances) err return trace.Wrap(err) } - s.Log.Debugf("Running Teleport installation on these instances: AccountID: %s, Instances: %s", - instances.AccountID, genEC2InstancesLogStr(instances.Instances)) + s.Log.DebugContext(s.ctx, "Running Teleport installation on instances", "account_id", instances.AccountID, "instances", genEC2InstancesLogStr(instances.Instances)) req := server.SSMRunRequest{ DocumentName: instances.DocumentName, @@ -1005,11 +1014,17 @@ func (s *Server) handleEC2RemoteInstallation(instances *server.EC2Instances) err func (s *Server) logHandleInstancesErr(err error) { var aErr awserr.Error if errors.As(err, &aErr) && aErr.Code() == ssm.ErrCodeInvalidInstanceId { - s.Log.WithError(err).Error("SSM SendCommand failed with ErrCodeInvalidInstanceId. Make sure that the instances have AmazonSSMManagedInstanceCore policy assigned. Also check that SSM agent is running and registered with the SSM endpoint on that instance and try restarting or reinstalling it in case of issues. See https://docs.aws.amazon.com/systems-manager/latest/APIReference/API_SendCommand.html#API_SendCommand_Errors for more details.") + const errorMessage = "SSM SendCommand failed with ErrCodeInvalidInstanceId. " + + "Make sure that the instances have AmazonSSMManagedInstanceCore policy assigned. " + + "Also check that SSM agent is running and registered with the SSM endpoint on that instance and try restarting or reinstalling it in case of issues. " + + "See https://docs.aws.amazon.com/systems-manager/latest/APIReference/API_SendCommand.html#API_SendCommand_Errors for more details." + s.Log.ErrorContext(s.ctx, + errorMessage, + "error", err) } else if trace.IsNotFound(err) { - s.Log.Debug("All discovered EC2 instances are already part of the cluster.") + s.Log.DebugContext(s.ctx, "All discovered EC2 instances are already part of the cluster") } else { - s.Log.WithError(err).Error("Failed to enroll discovered EC2 instances.") + s.Log.ErrorContext(s.ctx, "Failed to enroll discovered EC2 instances", "error", err) } } @@ -1022,13 +1037,13 @@ func (s *Server) watchCARotation(ctx context.Context) { nodes, err := s.findUnrotatedEC2Nodes(ctx) if err != nil { if trace.IsNotFound(err) { - s.Log.Debug("No OpenSSH nodes require CA rotation") + s.Log.DebugContext(ctx, "No OpenSSH nodes require CA rotation") continue } - s.Log.Errorf("Error finding OpenSSH nodes requiring CA rotation: %s", err) + s.Log.ErrorContext(ctx, "Error finding OpenSSH nodes requiring CA rotation", "error", err) continue } - s.Log.Debugf("Found %d nodes requiring rotation", len(nodes)) + s.Log.DebugContext(ctx, "Found nodes requiring rotation", "nodes_count", len(nodes)) s.caRotationCh <- nodes case <-s.ctx.Done(): return @@ -1085,7 +1100,7 @@ func (s *Server) findUnrotatedEC2Nodes(ctx context.Context) ([]types.Server, err func (s *Server) handleEC2Discovery() { if err := s.nodeWatcher.WaitInitialization(); err != nil { - s.Log.WithError(err).Error("Failed to initialize nodeWatcher.") + s.Log.ErrorContext(s.ctx, "Failed to initialize nodeWatcher", "error", err) return } @@ -1096,8 +1111,7 @@ func (s *Server) handleEC2Discovery() { select { case instances := <-s.ec2Watcher.InstancesC: ec2Instances := instances.EC2 - s.Log.Debugf("EC2 instances discovered (AccountID: %s, Instances: %v), starting installation", - ec2Instances.AccountID, genEC2InstancesLogStr(ec2Instances.Instances)) + s.Log.DebugContext(s.ctx, "EC2 instances discovered, starting installation", "account_id", ec2Instances.AccountID, "instances", genEC2InstancesLogStr(ec2Instances.Instances)) s.awsEC2ResourcesStatus.incrementFound(awsResourceGroup{ discoveryConfig: instances.EC2.DiscoveryConfig, @@ -1155,9 +1169,7 @@ func (s *Server) handleAzureInstances(instances *server.AzureInstances) error { return trace.Wrap(errNoInstances) } - s.Log.Debugf("Running Teleport installation on these virtual machines: SubscriptionID: %s, VMs: %s", - instances.SubscriptionID, genAzureInstancesLogStr(instances.Instances), - ) + s.Log.DebugContext(s.ctx, "Running Teleport installation on virtual machines", "subscription_id", instances.SubscriptionID, "vms", genAzureInstancesLogStr(instances.Instances)) req := server.AzureRunRequest{ Client: client, Instances: instances.Instances, @@ -1172,14 +1184,14 @@ func (s *Server) handleAzureInstances(instances *server.AzureInstances) error { return trace.Wrap(err) } if err := s.emitUsageEvents(instances.MakeEvents()); err != nil { - s.Log.WithError(err).Debug("Error emitting usage event.") + s.Log.DebugContext(s.ctx, "Error emitting usage event", "error", err) } return nil } func (s *Server) handleAzureDiscovery() { if err := s.nodeWatcher.WaitInitialization(); err != nil { - s.Log.WithError(err).Error("Failed to initialize nodeWatcher.") + s.Log.ErrorContext(s.ctx, "Failed to initialize nodeWatcher", "error", err) return } @@ -1188,14 +1200,12 @@ func (s *Server) handleAzureDiscovery() { select { case instances := <-s.azureWatcher.InstancesC: azureInstances := instances.Azure - s.Log.Debugf("Azure instances discovered (SubscriptionID: %s, Instances: %v), starting installation", - azureInstances.SubscriptionID, genAzureInstancesLogStr(azureInstances.Instances), - ) + s.Log.DebugContext(s.ctx, "Azure instances discovered, starting installation", "subscription_id", azureInstances.SubscriptionID, "instances", genAzureInstancesLogStr(azureInstances.Instances)) if err := s.handleAzureInstances(azureInstances); err != nil { if errors.Is(err, errNoInstances) { - s.Log.Debug("All discovered Azure VMs are already part of the cluster.") + s.Log.DebugContext(s.ctx, "All discovered Azure VMs are already part of the cluster") } else { - s.Log.WithError(err).Error("Failed to enroll discovered Azure VMs.") + s.Log.ErrorContext(s.ctx, "Failed to enroll discovered Azure VMs", "error", err) } } case <-s.ctx.Done(): @@ -1241,9 +1251,7 @@ func (s *Server) handleGCPInstances(instances *server.GCPInstances) error { return trace.Wrap(errNoInstances) } - s.Log.Debugf("Running Teleport installation on these virtual machines: ProjectID: %s, VMs: %s", - instances.ProjectID, genGCPInstancesLogStr(instances.Instances), - ) + s.Log.DebugContext(s.ctx, "Running Teleport installation on virtual machines", "project_id", instances.ProjectID, "vms", genGCPInstancesLogStr(instances.Instances)) req := server.GCPRunRequest{ Client: client, Instances: instances.Instances, @@ -1257,14 +1265,14 @@ func (s *Server) handleGCPInstances(instances *server.GCPInstances) error { return trace.Wrap(err) } if err := s.emitUsageEvents(instances.MakeEvents()); err != nil { - s.Log.WithError(err).Debug("Error emitting usage event.") + s.Log.DebugContext(s.ctx, "Error emitting usage event", "error", err) } return nil } func (s *Server) handleGCPDiscovery() { if err := s.nodeWatcher.WaitInitialization(); err != nil { - s.Log.WithError(err).Error("Failed to initialize nodeWatcher.") + s.Log.ErrorContext(s.ctx, "Failed to initialize nodeWatcher", "error", err) return } go s.gcpWatcher.Run() @@ -1272,14 +1280,12 @@ func (s *Server) handleGCPDiscovery() { select { case instances := <-s.gcpWatcher.InstancesC: gcpInstances := instances.GCP - s.Log.Debugf("GCP instances discovered (ProjectID: %s, Instances %v), starting installation", - gcpInstances.ProjectID, genGCPInstancesLogStr(gcpInstances.Instances), - ) + s.Log.DebugContext(s.ctx, "GCP instances discovered, starting installation", "project_id", gcpInstances.ProjectID, "instances", genGCPInstancesLogStr(gcpInstances.Instances)) if err := s.handleGCPInstances(gcpInstances); err != nil { if errors.Is(err, errNoInstances) { - s.Log.Debug("All discovered GCP VMs are already part of the cluster.") + s.Log.DebugContext(s.ctx, "All discovered GCP VMs are already part of the cluster") } else { - s.Log.WithError(err).Error("Failed to enroll discovered GCP VMs.") + s.Log.ErrorContext(s.ctx, "Failed to enroll discovered GCP VMs", "error", err) } } case <-s.ctx.Done(): @@ -1342,7 +1348,7 @@ func (s *Server) submitFetchEvent(cloudProvider, resourceType string) { }, }) if err != nil { - s.Log.WithError(err).Debug("Error emitting discovery fetch event.") + s.Log.DebugContext(s.ctx, "Error emitting discovery fetch event", "error", err) } } @@ -1435,7 +1441,7 @@ func (s *Server) loadExistingDynamicDiscoveryConfigs() error { for { dcs, respNextKey, err := s.AccessPoint.ListDiscoveryConfigs(s.ctx, 0, nextKey) if err != nil { - s.Log.WithError(err).Warnf("failed to list discovery configs") + s.Log.WarnContext(s.ctx, "Failed to list discovery configs", "error", err) return trace.Wrap(err) } @@ -1444,7 +1450,7 @@ func (s *Server) loadExistingDynamicDiscoveryConfigs() error { continue } if err := s.upsertDynamicMatchers(s.ctx, dc); err != nil { - s.Log.WithError(err).Warnf("failed to update dynamic matchers for discovery config %q", dc.GetName()) + s.Log.WarnContext(s.ctx, "Failed to update dynamic matchers for discovery config", "discovery_config", dc.GetName(), "error", err) continue } s.dynamicDiscoveryConfig[dc.GetName()] = dc @@ -1470,7 +1476,7 @@ func (s *Server) startDynamicWatcherUpdater() { case types.OpPut: dc, ok := event.Resource.(*discoveryconfig.DiscoveryConfig) if !ok { - s.Log.Warnf("dynamic matcher watcher: unexpected resource type %T", event.Resource) + s.Log.WarnContext(s.ctx, "Dynamic matcher watcher: unexpected resource type", "expected", logutils.TypeAttr(dc), "got", logutils.TypeAttr(event.Resource)) return } @@ -1498,7 +1504,7 @@ func (s *Server) startDynamicWatcherUpdater() { } if err := s.upsertDynamicMatchers(s.ctx, dc); err != nil { - s.Log.WithError(err).Warnf("failed to update dynamic matchers for discovery config %q", dc.GetName()) + s.Log.WarnContext(s.ctx, "Failed to update dynamic matchers for discovery config", "discovery_config", dc.GetName(), "error", err) continue } s.dynamicDiscoveryConfig[dc.GetName()] = dc @@ -1515,10 +1521,10 @@ func (s *Server) startDynamicWatcherUpdater() { delete(s.dynamicDiscoveryConfig, name) s.notifyDiscoveryConfigChanged() default: - s.Log.Warnf("Skipping unknown event type %s", event.Type) + s.Log.WarnContext(s.ctx, "Skipping unknown event type %s", "got", event.Type) } case <-s.dynamicMatcherWatcher.Done(): - s.Log.Warnf("dynamic matcher watcher error: %v", s.dynamicMatcherWatcher.Error()) + s.Log.WarnContext(s.ctx, "Dynamic matcher watcher error", "error", s.dynamicMatcherWatcher.Error()) return } } @@ -1650,7 +1656,7 @@ func (s *Server) discardUnsupportedMatchers(m *Matchers) { validAWSMatchers := make([]types.AWSMatcher, 0, len(m.AWS)) for i, m := range m.AWS { if m.Integration == "" { - s.Log.Warnf("discarding AWS matcher [%d] - missing integration", i) + s.Log.WarnContext(s.ctx, "Discarding AWS matcher - missing integration", "matcher_pos", i) continue } validAWSMatchers = append(validAWSMatchers, m) @@ -1658,17 +1664,17 @@ func (s *Server) discardUnsupportedMatchers(m *Matchers) { m.AWS = validAWSMatchers if len(m.GCP) > 0 { - s.Log.Warnf("discarding GCP matchers - missing integration") + s.Log.WarnContext(s.ctx, "Discarding GCP matchers - missing integration") m.GCP = []types.GCPMatcher{} } if len(m.Azure) > 0 { - s.Log.Warnf("discarding Azure matchers - missing integration") + s.Log.WarnContext(s.ctx, "Discarding Azure matchers - missing integration") m.Azure = []types.AzureMatcher{} } if len(m.Kubernetes) > 0 { - s.Log.Warnf("discarding Kubernetes matchers - missing integration") + s.Log.WarnContext(s.ctx, "Discarding Kubernetes matchers - missing integration") m.Kubernetes = []types.KubernetesMatcher{} } } @@ -1687,7 +1693,7 @@ func (s *Server) Stop() { } if s.dynamicMatcherWatcher != nil { if err := s.dynamicMatcherWatcher.Close(); err != nil { - s.Log.Warnf("dynamic matcher watcher closing error: ", trace.Wrap(err)) + s.Log.WarnContext(s.ctx, "Dynamic matcher watcher closing error", "error", err) } } } @@ -1719,7 +1725,7 @@ func (s *Server) initTeleportNodeWatcher() (err error) { s.nodeWatcher, err = services.NewNodeWatcher(s.ctx, services.NodeWatcherConfig{ ResourceWatcherConfig: services.ResourceWatcherConfig{ Component: teleport.ComponentDiscovery, - Log: s.Log, + Log: s.LegacyLogger, Client: s.AccessPoint, MaxStaleness: time.Minute, }, diff --git a/lib/srv/discovery/discovery_test.go b/lib/srv/discovery/discovery_test.go index 100973a56e242..84e2d9e7bcf01 100644 --- a/lib/srv/discovery/discovery_test.go +++ b/lib/srv/discovery/discovery_test.go @@ -87,6 +87,7 @@ import ( "github.com/gravitational/teleport/lib/srv/discovery/common" "github.com/gravitational/teleport/lib/srv/server" usagereporter "github.com/gravitational/teleport/lib/usagereporter/teleport" + libutils "github.com/gravitational/teleport/lib/utils" ) func TestMain(m *testing.M) { @@ -679,7 +680,9 @@ func TestDiscoveryServer(t *testing.T) { require.NoError(t, err) } - logger := logrus.New() + legacyLogger := logrus.New() + logger := libutils.NewSlogLoggerForTests() + reporter := &mockUsageReporter{} installer := &mockSSMInstaller{ installedInstances: make(map[string]struct{}), @@ -700,6 +703,7 @@ func TestDiscoveryServer(t *testing.T) { Matchers: tc.staticMatchers, Emitter: tc.emitter, Log: logger, + LegacyLogger: legacyLogger, DiscoveryGroup: defaultDiscoveryGroup, clock: fakeClock, }) @@ -759,7 +763,8 @@ func TestDiscoveryServer(t *testing.T) { func TestDiscoveryServerConcurrency(t *testing.T) { t.Parallel() ctx := context.Background() - logger := logrus.New() + legacyLogger := logrus.New() + logger := libutils.NewSlogLoggerForTests() defaultDiscoveryGroup := "dg01" awsMatcher := types.AWSMatcher{ @@ -839,6 +844,7 @@ func TestDiscoveryServerConcurrency(t *testing.T) { Matchers: staticMatcher, Emitter: emitter, Log: logger, + LegacyLogger: legacyLogger, DiscoveryGroup: defaultDiscoveryGroup, }) require.NoError(t, err) @@ -1336,9 +1342,11 @@ func TestDiscoveryInCloudKube(t *testing.T) { require.NoError(t, w.Close()) }) - logger := logrus.New() - logger.SetOutput(w) - logger.SetLevel(logrus.DebugLevel) + legacyLogger := logrus.New() + logger := libutils.NewSlogLoggerForTests() + + legacyLogger.SetOutput(w) + legacyLogger.SetLevel(logrus.DebugLevel) clustersNotUpdated := make(chan string, 10) go func() { // reconcileRegexp is the regex extractor of a log message emitted by reconciler when @@ -1377,6 +1385,7 @@ func TestDiscoveryInCloudKube(t *testing.T) { }, Emitter: authClient, Log: logger, + LegacyLogger: legacyLogger, DiscoveryGroup: mainDiscoveryGroup, }) @@ -2668,7 +2677,9 @@ func TestAzureVMDiscovery(t *testing.T) { require.NoError(t, err) } - logger := logrus.New() + legacyLogger := logrus.New() + logger := libutils.NewSlogLoggerForTests() + emitter := &mockEmitter{} reporter := &mockUsageReporter{} installer := &mockAzureInstaller{ @@ -2683,6 +2694,7 @@ func TestAzureVMDiscovery(t *testing.T) { Matchers: tc.staticMatchers, Emitter: emitter, Log: logger, + LegacyLogger: legacyLogger, DiscoveryGroup: defaultDiscoveryGroup, }) @@ -2974,7 +2986,8 @@ func TestGCPVMDiscovery(t *testing.T) { require.NoError(t, err) } - logger := logrus.New() + legacyLogger := logrus.New() + logger := libutils.NewSlogLoggerForTests() emitter := &mockEmitter{} reporter := &mockUsageReporter{} installer := &mockGCPInstaller{ @@ -2989,6 +3002,7 @@ func TestGCPVMDiscovery(t *testing.T) { Matchers: tc.staticMatchers, Emitter: emitter, Log: logger, + LegacyLogger: legacyLogger, DiscoveryGroup: defaultDiscoveryGroup, }) @@ -3035,7 +3049,8 @@ func TestServer_onCreate(t *testing.T) { Config: &Config{ DiscoveryGroup: "test-cluster", AccessPoint: accessPoint, - Log: logrus.New(), + Log: libutils.NewSlogLoggerForTests(), + LegacyLogger: logrus.New(), }, } diff --git a/lib/srv/discovery/kube_integration_watcher.go b/lib/srv/discovery/kube_integration_watcher.go index 58c7228b4f031..d8efaceda4bf8 100644 --- a/lib/srv/discovery/kube_integration_watcher.go +++ b/lib/srv/discovery/kube_integration_watcher.go @@ -68,7 +68,7 @@ func (s *Server) startKubeIntegrationWatchers() error { s.submitFetchersEvent(kubeIntegrationFetchers) return kubeIntegrationFetchers }, - Log: s.Log.WithField("kind", types.KindKubernetesCluster), + Log: s.LegacyLogger.WithField("kind", types.KindKubernetesCluster), DiscoveryGroup: s.DiscoveryGroup, Interval: s.PollInterval, Origin: types.OriginCloud, @@ -88,13 +88,13 @@ func (s *Server) startKubeIntegrationWatchers() error { existingServers, err := clt.GetKubernetesServers(s.ctx) if err != nil { - s.Log.WithError(err).Warn("Failed to get Kubernetes servers from cache.") + s.Log.WarnContext(s.ctx, "Failed to get Kubernetes servers from cache", "error", err) continue } existingClusters, err := clt.GetKubernetesClusters(s.ctx) if err != nil { - s.Log.WithError(err).Warn("Failed to get Kubernetes clusters from cache.") + s.Log.WarnContext(s.ctx, "Failed to get Kubernetes clusters from cache", "error", err) continue } @@ -120,7 +120,7 @@ func (s *Server) startKubeIntegrationWatchers() error { agentVersion, err := s.getKubeAgentVersion(releaseChannels) if err != nil { - s.Log.WithError(err).Warn("Could not get agent version to enroll EKS clusters") + s.Log.WarnContext(s.ctx, "Could not get agent version to enroll EKS clusters", "error", err) continue } @@ -195,19 +195,19 @@ func (s *Server) enrollEKSClusters(region, integration string, clusters []types. AgentVersion: agentVersion, }) if err != nil { - s.Log.WithError(err).Errorf("failed to enroll EKS clusters %v", clusterNames) + s.Log.ErrorContext(ctx, "Failed to enroll EKS clusters", "cluster_names", clusterNames, "error", err) continue } for _, r := range rsp.Results { if r.Error != "" { if !strings.Contains(r.Error, "teleport-kube-agent is already installed on the cluster") { - s.Log.Errorf("failed to enroll EKS cluster %q: %s", r.EksClusterName, r.Error) + s.Log.ErrorContext(ctx, "Failed to enroll EKS cluster", "cluster_name", r.EksClusterName, "error", err) } else { - s.Log.Debugf("EKS cluster %q already has installed kube agent", r.EksClusterName) + s.Log.DebugContext(ctx, "EKS cluster already has installed kube agent", "cluster_name", r.EksClusterName) } } else { - s.Log.Infof("successfully enrolled EKS cluster %q", r.EksClusterName) + s.Log.InfoContext(ctx, "Successfully enrolled EKS cluster", "cluster_name", r.EksClusterName) } } } diff --git a/lib/srv/discovery/kube_integration_watcher_test.go b/lib/srv/discovery/kube_integration_watcher_test.go index f6cab69c9ec46..556796981c996 100644 --- a/lib/srv/discovery/kube_integration_watcher_test.go +++ b/lib/srv/discovery/kube_integration_watcher_test.go @@ -52,6 +52,7 @@ import ( "github.com/gravitational/teleport/lib/services" "github.com/gravitational/teleport/lib/srv/discovery/common" "github.com/gravitational/teleport/lib/srv/discovery/fetchers" + libutils "github.com/gravitational/teleport/lib/utils" ) func TestServer_getKubeFetchers(t *testing.T) { @@ -380,7 +381,8 @@ func TestDiscoveryKubeIntegrationEKS(t *testing.T) { AWS: tc.awsMatchers, }, Emitter: authClient, - Log: logrus.New(), + Log: libutils.NewSlogLoggerForTests(), + LegacyLogger: logrus.New(), DiscoveryGroup: mainDiscoveryGroup, }) diff --git a/lib/srv/discovery/kube_services_watcher.go b/lib/srv/discovery/kube_services_watcher.go index eb6d68cc964f7..8a80aea590b89 100644 --- a/lib/srv/discovery/kube_services_watcher.go +++ b/lib/srv/discovery/kube_services_watcher.go @@ -50,7 +50,7 @@ func (s *Server) startKubeAppsWatchers() error { GetCurrentResources: func() map[string]types.Application { apps, err := s.AccessPoint.GetApps(s.ctx) if err != nil { - s.Log.WithError(err).Warn("Unable to get applications from cache.") + s.Log.WarnContext(s.ctx, "Unable to get applications from cache", "error", err) return nil } @@ -61,7 +61,7 @@ func (s *Server) startKubeAppsWatchers() error { defer mu.Unlock() return utils.FromSlice(appResources, types.Application.GetName) }, - Log: s.Log.WithField("kind", types.KindApp), + Log: s.LegacyLogger.WithField("kind", types.KindApp), OnCreate: s.onAppCreate, OnUpdate: s.onAppUpdate, OnDelete: s.onAppDelete, @@ -74,7 +74,7 @@ func (s *Server) startKubeAppsWatchers() error { watcher, err := common.NewWatcher(s.ctx, common.WatcherConfig{ FetchersFn: common.StaticFetchers(s.kubeAppsFetchers), Interval: 5 * time.Minute, - Log: s.Log.WithField("kind", types.KindApp), + Log: s.LegacyLogger.WithField("kind", types.KindApp), DiscoveryGroup: s.DiscoveryGroup, Origin: types.OriginDiscoveryKubernetes, }) @@ -102,7 +102,7 @@ func (s *Server) startKubeAppsWatchers() error { mu.Unlock() if err := reconciler.Reconcile(s.ctx); err != nil { - s.Log.WithError(err).Warn("Unable to reconcile resources.") + s.Log.WarnContext(s.ctx, "Unable to reconcile resources", "error", err) } case <-s.ctx.Done(): @@ -114,7 +114,7 @@ func (s *Server) startKubeAppsWatchers() error { } func (s *Server) onAppCreate(ctx context.Context, app types.Application) error { - s.Log.Debugf("Creating app %s", app.GetName()) + s.Log.DebugContext(ctx, "Creating app", "app_name", app.GetName()) err := s.AccessPoint.CreateApp(ctx, app) // If the resource already exists, it means that the resource was created // by a previous discovery_service instance that didn't support the discovery @@ -139,17 +139,17 @@ func (s *Server) onAppCreate(ctx context.Context, app types.Application) error { }, }) if err != nil { - s.Log.WithError(err).Debug("Error emitting usage event.") + s.Log.DebugContext(ctx, "Error emitting usage event", "error", err) } return nil } func (s *Server) onAppUpdate(ctx context.Context, app, _ types.Application) error { - s.Log.Debugf("Updating app %s.", app.GetName()) + s.Log.DebugContext(ctx, "Updating app", "app_name", app.GetName()) return trace.Wrap(s.AccessPoint.UpdateApp(ctx, app)) } func (s *Server) onAppDelete(ctx context.Context, app types.Application) error { - s.Log.Debugf("Deleting app %s.", app.GetName()) + s.Log.DebugContext(ctx, "Deleting app", "app_name", app.GetName()) return trace.Wrap(s.AccessPoint.DeleteApp(ctx, app.GetName())) } diff --git a/lib/srv/discovery/kube_watcher.go b/lib/srv/discovery/kube_watcher.go index e18cc23e68c99..5247ff213b2e7 100644 --- a/lib/srv/discovery/kube_watcher.go +++ b/lib/srv/discovery/kube_watcher.go @@ -49,7 +49,7 @@ func (s *Server) startKubeWatchers() error { GetCurrentResources: func() map[string]types.KubeCluster { kcs, err := s.AccessPoint.GetKubernetesClusters(s.ctx) if err != nil { - s.Log.WithError(err).Warn("Unable to get Kubernetes clusters from cache.") + s.Log.WarnContext(s.ctx, "Unable to get Kubernetes clusters from cache", "error", err) return nil } @@ -60,7 +60,7 @@ func (s *Server) startKubeWatchers() error { defer mu.Unlock() return utils.FromSlice(kubeResources, types.KubeCluster.GetName) }, - Log: s.Log.WithField("kind", types.KindKubernetesCluster), + Log: s.LegacyLogger.WithField("kind", types.KindKubernetesCluster), OnCreate: s.onKubeCreate, OnUpdate: s.onKubeUpdate, OnDelete: s.onKubeDelete, @@ -76,7 +76,7 @@ func (s *Server) startKubeWatchers() error { s.submitFetchersEvent(kubeNonIntegrationFetchers) return kubeNonIntegrationFetchers }, - Log: s.Log.WithField("kind", types.KindKubernetesCluster), + Log: s.LegacyLogger.WithField("kind", types.KindKubernetesCluster), DiscoveryGroup: s.DiscoveryGroup, Interval: s.PollInterval, Origin: types.OriginCloud, @@ -106,7 +106,7 @@ func (s *Server) startKubeWatchers() error { mu.Unlock() if err := reconciler.Reconcile(s.ctx); err != nil { - s.Log.WithError(err).Warn("Unable to reconcile resources.") + s.Log.WarnContext(s.ctx, "Unable to reconcile resources", "error", err) } case <-s.ctx.Done(): @@ -118,7 +118,7 @@ func (s *Server) startKubeWatchers() error { } func (s *Server) onKubeCreate(ctx context.Context, kubeCluster types.KubeCluster) error { - s.Log.Debugf("Creating kube_cluster %s.", kubeCluster.GetName()) + s.Log.DebugContext(ctx, "Creating kube_cluster", "kube_cluster_name", kubeCluster.GetName()) err := s.AccessPoint.CreateKubernetesCluster(ctx, kubeCluster) // If the kube already exists but has an empty discovery group, update it. if err != nil { @@ -138,17 +138,17 @@ func (s *Server) onKubeCreate(ctx context.Context, kubeCluster types.KubeCluster }, }) if err != nil { - s.Log.WithError(err).Debug("Error emitting usage event.") + s.Log.DebugContext(ctx, "Error emitting usage event", "error", err) } return nil } func (s *Server) onKubeUpdate(ctx context.Context, kubeCluster, _ types.KubeCluster) error { - s.Log.Debugf("Updating kube_cluster %s.", kubeCluster.GetName()) + s.Log.DebugContext(ctx, "Updating kube_cluster", "kube_cluster_name", kubeCluster.GetName()) return trace.Wrap(s.AccessPoint.UpdateKubernetesCluster(ctx, kubeCluster)) } func (s *Server) onKubeDelete(ctx context.Context, kubeCluster types.KubeCluster) error { - s.Log.Debugf("Deleting kube_cluster %s.", kubeCluster.GetName()) + s.Log.DebugContext(ctx, "Deleting kube_cluster", "kube_cluster_name", kubeCluster.GetName()) return trace.Wrap(s.AccessPoint.DeleteKubernetesCluster(ctx, kubeCluster.GetName())) } diff --git a/lib/srv/discovery/reconciler.go b/lib/srv/discovery/reconciler.go index 26b17410e1bd6..dd9dc1d605f9c 100644 --- a/lib/srv/discovery/reconciler.go +++ b/lib/srv/discovery/reconciler.go @@ -20,12 +20,12 @@ package discovery import ( "context" + "log/slog" "sync" "time" "github.com/gravitational/trace" "github.com/jonboulle/clockwork" - "github.com/sirupsen/logrus" "github.com/gravitational/teleport/api/types" "github.com/gravitational/teleport/api/utils/retryutils" @@ -46,7 +46,7 @@ type serverInfoUpserter interface { type labelReconcilerConfig struct { clock clockwork.Clock - log logrus.FieldLogger + log *slog.Logger accessPoint serverInfoUpserter } @@ -58,7 +58,7 @@ func (c *labelReconcilerConfig) checkAndSetDefaults() error { c.clock = clockwork.NewRealClock() } if c.log == nil { - c.log = logrus.New() + c.log = slog.Default() } return nil } @@ -124,7 +124,7 @@ func (r *labelReconciler) run(ctx context.Context) { for _, si := range batch { if err := r.cfg.accessPoint.UpsertServerInfo(ctx, si); err != nil { - r.cfg.log.WithError(err).Error("Failed to upsert server info.") + r.cfg.log.ErrorContext(ctx, "Failed to upsert server info", "error", err) // Allow the server info to be queued again. delete(r.discoveredServers, si.GetName()) } diff --git a/lib/srv/discovery/status.go b/lib/srv/discovery/status.go index 7fe0b0f39398d..321619bb02636 100644 --- a/lib/srv/discovery/status.go +++ b/lib/srv/discovery/status.go @@ -25,7 +25,6 @@ import ( "time" "github.com/gravitational/trace" - "github.com/sirupsen/logrus" "google.golang.org/protobuf/types/known/timestamppb" discoveryconfigv1 "github.com/gravitational/teleport/api/gen/proto/go/teleport/discoveryconfig/v1" @@ -45,6 +44,12 @@ import ( // - AWS Sync (TAG) status // - AWS EC2 Auto Discover status func (s *Server) updateDiscoveryConfigStatus(discoveryConfigName string) { + // Static configurations (ie those in `teleport.yaml/discovery_config..matchers`) do not have a DiscoveryConfig resource. + // Those are discarded because there's no Status to update. + if discoveryConfigName == "" { + return + } + discoveryConfigStatus := discoveryconfig.Status{ State: discoveryconfigv1.DiscoveryConfigState_DISCOVERY_CONFIG_STATE_SYNCING.String(), LastSyncTime: s.clock.Now(), @@ -63,9 +68,9 @@ func (s *Server) updateDiscoveryConfigStatus(discoveryConfigName string) { _, err := s.AccessPoint.UpdateDiscoveryConfigStatus(ctx, discoveryConfigName, discoveryConfigStatus) switch { case trace.IsNotImplemented(err): - s.Log.Warn("UpdateDiscoveryConfigStatus method is not implemented in Auth Server. Please upgrade it to a recent version.") + s.Log.WarnContext(ctx, "UpdateDiscoveryConfigStatus method is not implemented in Auth Server. Please upgrade it to a recent version.") case err != nil: - s.Log.WithError(err).WithField("discovery_config_name", discoveryConfigName).Info("Error updating discovery config status") + s.Log.InfoContext(ctx, "Error updating discovery config status", "discovery_config_name", discoveryConfigName, "error", err) } } @@ -422,7 +427,7 @@ func (s *Server) acquireSemaphoreForUserTask(userTaskName string) (releaseFn fun cancel() lease.Stop() if err := lease.Wait(); err != nil { - s.Log.WithError(err).WithField("semaphore", userTaskName).Warn("error cleaning up UserTask semaphore") + s.Log.WarnContext(ctx, "Error cleaning up UserTask semaphore", "semaphore", semaphoreName, "error", err) } } @@ -522,13 +527,13 @@ func (s *Server) upsertTasksForAWSEC2FailedEnrollments() { } if err := s.mergeUpsertDiscoverEC2Task(g, instancesIssueByID); err != nil { - s.Log.WithError(err).WithFields(logrus.Fields{ - "integration": g.integration, - "issue_type": g.issueType, - "aws_account_id": g.accountID, - "aws_region": g.region, - }, - ).Warning("Failed to create discover ec2 user task.", g.integration, g.issueType, g.accountID, g.region) + s.Log.WarnContext(s.ctx, "Failed to create discover ec2 user task", + "integration", g.integration, + "issue_type", g.issueType, + "aws_account_id", g.accountID, + "aws_region", g.region, + "error", err, + ) continue } diff --git a/lib/srv/regular/sshserver.go b/lib/srv/regular/sshserver.go index df0e87f4abb48..6425e1dab27b7 100644 --- a/lib/srv/regular/sshserver.go +++ b/lib/srv/regular/sshserver.go @@ -72,6 +72,7 @@ import ( "github.com/gravitational/teleport/lib/sshutils/x11" "github.com/gravitational/teleport/lib/teleagent" "github.com/gravitational/teleport/lib/utils" + "github.com/gravitational/teleport/lib/utils/hostid" "github.com/gravitational/teleport/lib/utils/uds" ) @@ -726,7 +727,7 @@ func New( options ...ServerOption, ) (*Server, error) { // read the host UUID: - uuid, err := utils.ReadOrMakeHostUUID(dataDir) + uuid, err := hostid.ReadOrCreateFile(dataDir) if err != nil { return nil, trace.Wrap(err) } diff --git a/lib/srv/usermgmt.go b/lib/srv/usermgmt.go index a4460afbc9529..e12bf8c8c633b 100644 --- a/lib/srv/usermgmt.go +++ b/lib/srv/usermgmt.go @@ -688,6 +688,7 @@ func (u *HostUserManagement) getHostUser(username string) (*HostUser, error) { return &HostUser{ Name: username, UID: usr.Uid, + GID: usr.Gid, Home: usr.HomeDir, Groups: groups, }, trace.NewAggregate(groupErrs...) diff --git a/lib/teleterm/services/connectmycomputer/connectmycomputer.go b/lib/teleterm/services/connectmycomputer/connectmycomputer.go index 1cc0f8914a052..26ecc8aafe8d9 100644 --- a/lib/teleterm/services/connectmycomputer/connectmycomputer.go +++ b/lib/teleterm/services/connectmycomputer/connectmycomputer.go @@ -41,6 +41,7 @@ import ( "github.com/gravitational/teleport/lib/defaults" "github.com/gravitational/teleport/lib/teleterm/clusters" "github.com/gravitational/teleport/lib/utils" + "github.com/gravitational/teleport/lib/utils/hostid" ) type RoleSetup struct { @@ -395,7 +396,7 @@ func (n *NodeJoinWait) getNodeNameFromHostUUIDFile(ctx context.Context, cluster // the file is empty. // // Here we need to be able to distinguish between both of those two cases. - out, err := utils.ReadPath(utils.GetHostUUIDPath(dataDir)) + out, err := utils.ReadPath(hostid.GetPath(dataDir)) if err != nil { if trace.IsNotFound(err) { continue @@ -536,7 +537,7 @@ type NodeDelete struct { // Run grabs the host UUID of an agent from a disk and deletes the node with that name. func (n *NodeDelete) Run(ctx context.Context, presence Presence, cluster *clusters.Cluster) error { - hostUUID, err := utils.ReadHostUUID(getAgentDataDir(n.cfg.AgentsDir, cluster.ProfileName)) + hostUUID, err := hostid.ReadFile(getAgentDataDir(n.cfg.AgentsDir, cluster.ProfileName)) if trace.IsNotFound(err) { return nil } @@ -585,7 +586,7 @@ type NodeName struct { // Get returns the host UUID of the agent from a disk. func (n *NodeName) Get(cluster *clusters.Cluster) (string, error) { - hostUUID, err := utils.ReadHostUUID(getAgentDataDir(n.cfg.AgentsDir, cluster.ProfileName)) + hostUUID, err := hostid.ReadFile(getAgentDataDir(n.cfg.AgentsDir, cluster.ProfileName)) return hostUUID, trace.Wrap(err) } diff --git a/lib/teleterm/services/connectmycomputer/connectmycomputer_test.go b/lib/teleterm/services/connectmycomputer/connectmycomputer_test.go index 9a0af0b749edf..e7b453b94b2bc 100644 --- a/lib/teleterm/services/connectmycomputer/connectmycomputer_test.go +++ b/lib/teleterm/services/connectmycomputer/connectmycomputer_test.go @@ -35,7 +35,7 @@ import ( "github.com/gravitational/teleport/lib/defaults" "github.com/gravitational/teleport/lib/teleterm/api/uri" "github.com/gravitational/teleport/lib/teleterm/clusters" - "github.com/gravitational/teleport/lib/utils" + "github.com/gravitational/teleport/lib/utils/hostid" ) func TestRoleSetupRun_WithNonLocalUser(t *testing.T) { @@ -472,7 +472,7 @@ func mustMakeHostUUIDFile(t *testing.T, agentsDir string, profileName string) st err = os.MkdirAll(dataDir, agentsDirStat.Mode()) require.NoError(t, err) - hostUUID, err := utils.ReadOrMakeHostUUID(dataDir) + hostUUID, err := hostid.ReadOrCreateFile(dataDir) require.NoError(t, err) return hostUUID diff --git a/lib/utils/hostid/hostid.go b/lib/utils/hostid/hostid.go new file mode 100644 index 0000000000000..094e4cf9547ae --- /dev/null +++ b/lib/utils/hostid/hostid.go @@ -0,0 +1,61 @@ +// Teleport +// Copyright (C) 2024 Gravitational, Inc. +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with this program. If not, see . + +package hostid + +import ( + "errors" + "io/fs" + "path/filepath" + "strings" + + "github.com/gravitational/trace" + + "github.com/gravitational/teleport/lib/utils" +) + +const ( + // FileName is the file name where the host UUID file is stored + FileName = "host_uuid" +) + +// GetPath returns the path to the host UUID file given the data directory. +func GetPath(dataDir string) string { + return filepath.Join(dataDir, FileName) +} + +// ExistsLocally checks if dataDir/host_uuid file exists in local storage. +func ExistsLocally(dataDir string) bool { + _, err := ReadFile(dataDir) + return err == nil +} + +// ReadFile reads host UUID from the file in the data dir +func ReadFile(dataDir string) (string, error) { + out, err := utils.ReadPath(GetPath(dataDir)) + if err != nil { + if errors.Is(err, fs.ErrPermission) { + //do not convert to system error as this loses the ability to compare that it is a permission error + return "", trace.Wrap(err) + } + return "", trace.ConvertSystemError(err) + } + id := strings.TrimSpace(string(out)) + if id == "" { + return "", trace.NotFound("host uuid is empty") + } + return id, nil +} diff --git a/lib/utils/hostid/hostid_test.go b/lib/utils/hostid/hostid_test.go new file mode 100644 index 0000000000000..2ea22c4e71e7f --- /dev/null +++ b/lib/utils/hostid/hostid_test.go @@ -0,0 +1,116 @@ +//go:build !windows + +// Teleport +// Copyright (C) 2024 Gravitational, Inc. +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with this program. If not, see . + +package hostid_test + +import ( + "fmt" + "os" + "path/filepath" + "strings" + "testing" + + "github.com/google/uuid" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "golang.org/x/sync/errgroup" + + "github.com/gravitational/teleport/lib/utils" + "github.com/gravitational/teleport/lib/utils/hostid" +) + +func TestMain(m *testing.M) { + utils.InitLoggerForTests() + os.Exit(m.Run()) +} + +func TestReadOrCreate(t *testing.T) { + t.Parallel() + + dir := t.TempDir() + + var wg errgroup.Group + concurrency := 10 + ids := make([]string, concurrency) + barrier := make(chan struct{}) + + for i := 0; i < concurrency; i++ { + i := i + wg.Go(func() error { + <-barrier + id, err := hostid.ReadOrCreateFile(dir) + ids[i] = id + return err + }) + } + + close(barrier) + + require.NoError(t, wg.Wait()) + for _, id := range ids { + assert.Equal(t, ids[0], id) + } +} + +func TestIdempotence(t *testing.T) { + t.Parallel() + + // call twice, get same result + dir := t.TempDir() + id, err := hostid.ReadOrCreateFile(dir) + require.Len(t, id, 36) + require.NoError(t, err) + uuidCopy, err := hostid.ReadOrCreateFile(dir) + require.NoError(t, err) + require.Equal(t, id, uuidCopy) +} + +func TestBadLocation(t *testing.T) { + t.Parallel() + + // call with a read-only dir, make sure to get an error + id, err := hostid.ReadOrCreateFile("/bad-location") + require.Empty(t, id) + require.Error(t, err) + require.Regexp(t, "^.*no such file or directory.*$", err.Error()) +} + +func TestIgnoreWhitespace(t *testing.T) { + t.Parallel() + + // newlines are getting ignored + dir := t.TempDir() + id := fmt.Sprintf("%s\n", uuid.NewString()) + err := os.WriteFile(filepath.Join(dir, hostid.FileName), []byte(id), 0666) + require.NoError(t, err) + out, err := hostid.ReadFile(dir) + require.NoError(t, err) + require.Equal(t, strings.TrimSpace(id), out) +} + +func TestRegenerateEmpty(t *testing.T) { + t.Parallel() + + // empty UUID in file is regenerated + dir := t.TempDir() + err := os.WriteFile(filepath.Join(dir, hostid.FileName), nil, 0666) + require.NoError(t, err) + out, err := hostid.ReadOrCreateFile(dir) + require.NoError(t, err) + require.Len(t, out, 36) +} diff --git a/lib/utils/hostid/hostid_unix.go b/lib/utils/hostid/hostid_unix.go new file mode 100644 index 0000000000000..b5334e641c232 --- /dev/null +++ b/lib/utils/hostid/hostid_unix.go @@ -0,0 +1,105 @@ +//go:build !windows + +// Teleport +// Copyright (C) 2024 Gravitational, Inc. +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with this program. If not, see . + +package hostid + +import ( + "errors" + "io/fs" + "time" + + "github.com/google/renameio/v2" + "github.com/google/uuid" + "github.com/gravitational/trace" + + "github.com/gravitational/teleport/lib/utils" +) + +// WriteFile writes host UUID into a file +func WriteFile(dataDir string, id string) error { + err := renameio.WriteFile(GetPath(dataDir), []byte(id), 0o400) + if err != nil { + if errors.Is(err, fs.ErrPermission) { + //do not convert to system error as this loses the ability to compare that it is a permission error + return trace.Wrap(err) + } + return trace.ConvertSystemError(err) + } + return nil +} + +// ReadOrCreateFile looks for a hostid file in the data dir. If present, +// returns the UUID from it, otherwise generates one +func ReadOrCreateFile(dataDir string) (string, error) { + hostUUIDFileLock := GetPath(dataDir) + ".lock" + const iterationLimit = 3 + + for i := 0; i < iterationLimit; i++ { + if read, err := ReadFile(dataDir); err == nil { + return read, nil + } else if !trace.IsNotFound(err) { + return "", trace.Wrap(err) + } + + // Checking error instead of the usual uuid.New() in case uuid generation + // fails due to not enough randomness. It's been known to happen happen when + // Teleport starts very early in the node initialization cycle and /dev/urandom + // isn't ready yet. + rawID, err := uuid.NewRandom() + if err != nil { + return "", trace.BadParameter("" + + "Teleport failed to generate host UUID. " + + "This may happen if randomness source is not fully initialized when the node is starting up. " + + "Please try restarting Teleport again.") + } + + writeFile := func(potentialID string) (string, error) { + unlock, err := utils.FSTryWriteLock(hostUUIDFileLock) + if err != nil { + return "", trace.Wrap(err) + } + defer unlock() + + if read, err := ReadFile(dataDir); err == nil { + return read, nil + } else if !trace.IsNotFound(err) { + return "", trace.Wrap(err) + } + + if err := WriteFile(dataDir, potentialID); err != nil { + return "", trace.Wrap(err) + } + + return potentialID, nil + } + + id, err := writeFile(rawID.String()) + if err != nil { + if errors.Is(err, utils.ErrUnsuccessfulLockTry) { + time.Sleep(100 * time.Millisecond) + continue + } + + return "", trace.Wrap(err) + } + + return id, nil + } + + return "", trace.LimitExceeded("failed to obtain host uuid") +} diff --git a/lib/utils/hostid/hostid_windows.go b/lib/utils/hostid/hostid_windows.go new file mode 100644 index 0000000000000..ab2a5a55e56d7 --- /dev/null +++ b/lib/utils/hostid/hostid_windows.go @@ -0,0 +1,30 @@ +// Teleport +// Copyright (C) 2024 Gravitational, Inc. +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with this program. If not, see . + +package hostid + +import "github.com/gravitational/trace" + +// WriteFile writes host UUID into a file +func WriteFile(dataDir string, id string) error { + return trace.NotImplemented("host id writing is not supported on windows") +} + +// ReadOrCreateFile looks for a hostid file in the data dir. If present, +// returns the UUID from it, otherwise generates one +func ReadOrCreateFile(dataDir string) (string, error) { + return "", trace.NotImplemented("host id writing is not supported on windows") +} diff --git a/lib/utils/utils.go b/lib/utils/utils.go index b1931e2ae8cf4..5da5b39d05685 100644 --- a/lib/utils/utils.go +++ b/lib/utils/utils.go @@ -37,7 +37,6 @@ import ( "time" "unicode" - "github.com/google/uuid" "github.com/gravitational/trace" log "github.com/sirupsen/logrus" "k8s.io/apimachinery/pkg/util/validation" @@ -468,75 +467,6 @@ func GetFreeTCPPorts(n int, offset ...int) (PortList, error) { return PortList{ports: list}, nil } -// GetHostUUIDPath returns the path to the host UUID file given the data directory. -func GetHostUUIDPath(dataDir string) string { - return filepath.Join(dataDir, HostUUIDFile) -} - -// HostUUIDExistsLocally checks if dataDir/host_uuid file exists in local storage. -func HostUUIDExistsLocally(dataDir string) bool { - _, err := ReadHostUUID(dataDir) - return err == nil -} - -// ReadHostUUID reads host UUID from the file in the data dir -func ReadHostUUID(dataDir string) (string, error) { - out, err := ReadPath(GetHostUUIDPath(dataDir)) - if err != nil { - if errors.Is(err, fs.ErrPermission) { - //do not convert to system error as this loses the ability to compare that it is a permission error - return "", err - } - return "", trace.ConvertSystemError(err) - } - id := strings.TrimSpace(string(out)) - if id == "" { - return "", trace.NotFound("host uuid is empty") - } - return id, nil -} - -// WriteHostUUID writes host UUID into a file -func WriteHostUUID(dataDir string, id string) error { - err := os.WriteFile(GetHostUUIDPath(dataDir), []byte(id), os.ModeExclusive|0400) - if err != nil { - if errors.Is(err, fs.ErrPermission) { - //do not convert to system error as this loses the ability to compare that it is a permission error - return err - } - return trace.ConvertSystemError(err) - } - return nil -} - -// ReadOrMakeHostUUID looks for a hostid file in the data dir. If present, -// returns the UUID from it, otherwise generates one -func ReadOrMakeHostUUID(dataDir string) (string, error) { - id, err := ReadHostUUID(dataDir) - if err == nil { - return id, nil - } - if !trace.IsNotFound(err) { - return "", trace.Wrap(err) - } - // Checking error instead of the usual uuid.New() in case uuid generation - // fails due to not enough randomness. It's been known to happen happen when - // Teleport starts very early in the node initialization cycle and /dev/urandom - // isn't ready yet. - rawID, err := uuid.NewRandom() - if err != nil { - return "", trace.BadParameter("" + - "Teleport failed to generate host UUID. " + - "This may happen if randomness source is not fully initialized when the node is starting up. " + - "Please try restarting Teleport again.") - } - id = rawID.String() - if err = WriteHostUUID(dataDir, id); err != nil { - return "", trace.Wrap(err) - } - return id, nil -} - // StringSliceSubset returns true if b is a subset of a. func StringSliceSubset(a []string, b []string) error { aset := make(map[string]bool) @@ -712,8 +642,6 @@ const ( // CertExtensionAuthority specifies teleport authority's name // that signed this domain CertExtensionAuthority = "x-teleport-authority" - // HostUUIDFile is the file name where the host UUID file is stored - HostUUIDFile = "host_uuid" // CertTeleportClusterName is a name of the teleport cluster CertTeleportClusterName = "x-teleport-cluster-name" // CertTeleportUserCertificate is the certificate of the authenticated in user. diff --git a/lib/utils/utils_test.go b/lib/utils/utils_test.go index 42ca172f35b78..e1625915bb204 100644 --- a/lib/utils/utils_test.go +++ b/lib/utils/utils_test.go @@ -20,14 +20,12 @@ package utils import ( "bytes" - "fmt" "os" "path/filepath" "strings" "testing" "time" - "github.com/google/uuid" "github.com/gravitational/trace" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -41,54 +39,6 @@ func TestMain(m *testing.M) { os.Exit(m.Run()) } -func TestHostUUIDIdempotent(t *testing.T) { - t.Parallel() - - // call twice, get same result - dir := t.TempDir() - id, err := ReadOrMakeHostUUID(dir) - require.Len(t, id, 36) - require.NoError(t, err) - uuidCopy, err := ReadOrMakeHostUUID(dir) - require.NoError(t, err) - require.Equal(t, id, uuidCopy) -} - -func TestHostUUIDBadLocation(t *testing.T) { - t.Parallel() - - // call with a read-only dir, make sure to get an error - id, err := ReadOrMakeHostUUID("/bad-location") - require.Empty(t, id) - require.Error(t, err) - require.Regexp(t, "^.*no such file or directory.*$", err.Error()) -} - -func TestHostUUIDIgnoreWhitespace(t *testing.T) { - t.Parallel() - - // newlines are getting ignored - dir := t.TempDir() - id := fmt.Sprintf("%s\n", uuid.NewString()) - err := os.WriteFile(filepath.Join(dir, HostUUIDFile), []byte(id), 0666) - require.NoError(t, err) - out, err := ReadHostUUID(dir) - require.NoError(t, err) - require.Equal(t, strings.TrimSpace(id), out) -} - -func TestHostUUIDRegenerateEmpty(t *testing.T) { - t.Parallel() - - // empty UUID in file is regenerated - dir := t.TempDir() - err := os.WriteFile(filepath.Join(dir, HostUUIDFile), nil, 0666) - require.NoError(t, err) - out, err := ReadOrMakeHostUUID(dir) - require.NoError(t, err) - require.Len(t, out, 36) -} - func TestSelfSignedCert(t *testing.T) { t.Parallel() diff --git a/tool/tctl/common/admin_action_test.go b/tool/tctl/common/admin_action_test.go index 7e9ff52ceae3a..b910cf239b5ab 100644 --- a/tool/tctl/common/admin_action_test.go +++ b/tool/tctl/common/admin_action_test.go @@ -56,6 +56,7 @@ import ( "github.com/gravitational/teleport/lib/services" "github.com/gravitational/teleport/lib/tlsca" "github.com/gravitational/teleport/lib/utils" + "github.com/gravitational/teleport/lib/utils/hostid" tctl "github.com/gravitational/teleport/tool/tctl/common" testserver "github.com/gravitational/teleport/tool/teleport/testenv" tsh "github.com/gravitational/teleport/tool/tsh/common" @@ -1076,7 +1077,7 @@ func newAdminActionTestSuite(t *testing.T) *adminActionTestSuite { }) require.NoError(t, err) - hostUUID, err := utils.ReadHostUUID(process.Config.DataDir) + hostUUID, err := hostid.ReadFile(process.Config.DataDir) require.NoError(t, err) localAdmin, err := storage.ReadLocalIdentity( filepath.Join(process.Config.DataDir, teleport.ComponentProcess), diff --git a/tool/tctl/common/edit_command.go b/tool/tctl/common/edit_command.go index 9317db74ce419..5c3b2f9efbdf4 100644 --- a/tool/tctl/common/edit_command.go +++ b/tool/tctl/common/edit_command.go @@ -44,10 +44,11 @@ import ( // EditCommand implements the `tctl edit` command for modifying // Teleport resources. type EditCommand struct { - app *kingpin.Application - cmd *kingpin.CmdClause - config *servicecfg.Config - ref services.Ref + app *kingpin.Application + cmd *kingpin.CmdClause + config *servicecfg.Config + ref services.Ref + confirm bool // Editor is used by tests to inject the editing mechanism // so that different scenarios can be asserted. @@ -61,9 +62,10 @@ func (e *EditCommand) Initialize(app *kingpin.Application, config *servicecfg.Co e.cmd.Arg("resource type/resource name", `Resource to update Type of a resource [for example: rc] Resource name to update - + Example: $ tctl edit rc/remote`).SetValue(&e.ref) + e.cmd.Flag("confirm", "Confirm an unsafe or temporary resource update").Hidden().BoolVar(&e.confirm) } func (e *EditCommand) TryRun(ctx context.Context, cmd string, client *authclient.Client) (bool, error) { @@ -115,6 +117,7 @@ func (e *EditCommand) editResource(ctx context.Context, client *authclient.Clien filename: f.Name(), force: true, withSecrets: true, + confirm: e.confirm, } rc.Initialize(e.app, e.config) diff --git a/tool/tctl/common/resource_command.go b/tool/tctl/common/resource_command.go index 4d4baba819dea..180eff4abaa1d 100644 --- a/tool/tctl/common/resource_command.go +++ b/tool/tctl/common/resource_command.go @@ -715,6 +715,14 @@ func (rc *ResourceCommand) updateAuthPreference(ctx context.Context, client *aut return trace.Wrap(err) } + storedAuthPref, err := client.GetAuthPreference(ctx) + if err != nil { + return trace.Wrap(err) + } + if err := checkUpdateResourceWithOrigin(storedAuthPref, "cluster auth preference", rc.confirm); err != nil { + return trace.Wrap(err) + } + if _, err := client.UpdateAuthPreference(ctx, newAuthPref); err != nil { return trace.Wrap(err) } @@ -751,6 +759,14 @@ func (rc *ResourceCommand) updateClusterNetworkingConfig(ctx context.Context, cl return trace.Wrap(err) } + storedNetConfig, err := client.GetClusterNetworkingConfig(ctx) + if err != nil { + return trace.Wrap(err) + } + if err := checkUpdateResourceWithOrigin(storedNetConfig, "cluster networking configuration", rc.confirm); err != nil { + return trace.Wrap(err) + } + if _, err := client.UpdateClusterNetworkingConfig(ctx, newNetConfig); err != nil { return trace.Wrap(err) } @@ -809,6 +825,14 @@ func (rc *ResourceCommand) updateSessionRecordingConfig(ctx context.Context, cli return trace.Wrap(err) } + storedRecConfig, err := client.GetSessionRecordingConfig(ctx) + if err != nil { + return trace.Wrap(err) + } + if err := checkUpdateResourceWithOrigin(storedRecConfig, "session recording configuration", rc.confirm); err != nil { + return trace.Wrap(err) + } + if _, err := client.UpdateSessionRecordingConfig(ctx, newRecConfig); err != nil { return trace.Wrap(err) } @@ -3165,10 +3189,15 @@ func checkCreateResourceWithOrigin(storedRes types.ResourceWithOrigin, resDesc s if exists := (storedRes.Origin() != types.OriginDefaults); exists && !force { return trace.AlreadyExists("non-default %s already exists", resDesc) } - if managedByStatic := (storedRes.Origin() == types.OriginConfigFile); managedByStatic && !confirm { + return checkUpdateResourceWithOrigin(storedRes, resDesc, confirm) +} + +func checkUpdateResourceWithOrigin(storedRes types.ResourceWithOrigin, resDesc string, confirm bool) error { + managedByStatic := storedRes.Origin() == types.OriginConfigFile + if managedByStatic && !confirm { return trace.BadParameter(`The %s resource is managed by static configuration. We recommend removing configuration from teleport.yaml, restarting the servers and trying this command again. -If you would still like to proceed, re-run the command with both --force and --confirm flags.`, resDesc) +If you would still like to proceed, re-run the command with the --confirm flag.`, resDesc) } return nil } diff --git a/tool/tctl/common/tctl.go b/tool/tctl/common/tctl.go index b4d0883c8464b..6ee12979b73bc 100644 --- a/tool/tctl/common/tctl.go +++ b/tool/tctl/common/tctl.go @@ -52,6 +52,7 @@ import ( "github.com/gravitational/teleport/lib/reversetunnelclient" "github.com/gravitational/teleport/lib/service/servicecfg" "github.com/gravitational/teleport/lib/utils" + "github.com/gravitational/teleport/lib/utils/hostid" "github.com/gravitational/teleport/tool/common" ) @@ -380,16 +381,16 @@ func ApplyConfig(ccf *GlobalCLIFlags, cfg *servicecfg.Config) (*authclient.Confi authConfig := new(authclient.Config) // read the host UUID only in case the identity was not provided, // because it will be used for reading local auth server identity - cfg.HostUUID, err = utils.ReadHostUUID(cfg.DataDir) + cfg.HostUUID, err = hostid.ReadFile(cfg.DataDir) if err != nil { if errors.Is(err, fs.ErrNotExist) { return nil, trace.Wrap(err, "Could not load Teleport host UUID file at %s. "+ "Please make sure that a Teleport Auth Service instance is running on this host prior to using tctl or provide credentials by logging in with tsh first.", - filepath.Join(cfg.DataDir, utils.HostUUIDFile)) + filepath.Join(cfg.DataDir, hostid.FileName)) } else if errors.Is(err, fs.ErrPermission) { return nil, trace.Wrap(err, "Teleport does not have permission to read Teleport host UUID file at %s. "+ "Ensure that you are running as a user with appropriate permissions or provide credentials by logging in with tsh first.", - filepath.Join(cfg.DataDir, utils.HostUUIDFile)) + filepath.Join(cfg.DataDir, hostid.FileName)) } return nil, trace.Wrap(err) } diff --git a/tool/teleport/testenv/test_server.go b/tool/teleport/testenv/test_server.go index 759abe0d56a4e..234ad8296792b 100644 --- a/tool/teleport/testenv/test_server.go +++ b/tool/teleport/testenv/test_server.go @@ -62,6 +62,7 @@ import ( "github.com/gravitational/teleport/lib/sshutils" "github.com/gravitational/teleport/lib/tlsca" "github.com/gravitational/teleport/lib/utils" + "github.com/gravitational/teleport/lib/utils/hostid" "github.com/gravitational/teleport/tool/teleport/common" ) @@ -695,7 +696,7 @@ func MakeDefaultAuthClient(t *testing.T, process *service.TeleportProcess) *auth t.Helper() cfg := process.Config - hostUUID, err := utils.ReadHostUUID(process.Config.DataDir) + hostUUID, err := hostid.ReadFile(process.Config.DataDir) require.NoError(t, err) identity, err := storage.ReadLocalIdentity( diff --git a/tool/tsh/common/tsh.go b/tool/tsh/common/tsh.go index 019b7354176a9..167f3a5d07cbb 100644 --- a/tool/tsh/common/tsh.go +++ b/tool/tsh/common/tsh.go @@ -3758,6 +3758,11 @@ func onSSH(cf *CLIConf) error { tc.AllowHeadless = true + // Support calling `tsh ssh -- ` (with a double dash before the command) + if len(cf.RemoteCommand) > 0 && strings.TrimSpace(cf.RemoteCommand[0]) == "--" { + cf.RemoteCommand = cf.RemoteCommand[1:] + } + tc.Stdin = os.Stdin err = retryWithAccessRequest(cf, tc, func() error { sshFunc := func() error { diff --git a/tool/tsh/common/tsh_test.go b/tool/tsh/common/tsh_test.go index 42708a3f72487..a6dc3c55bc14f 100644 --- a/tool/tsh/common/tsh_test.go +++ b/tool/tsh/common/tsh_test.go @@ -2303,6 +2303,183 @@ func TestAccessRequestOnLeaf(t *testing.T) { require.NoError(t, err) } +// TestSSHCommand tests that a user can access a single SSH node and run commands. +func TestSSHCommands(t *testing.T) { + modules.SetTestModules(t, &modules.TestModules{TestBuildType: modules.BuildEnterprise}) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + accessRoleName := "access" + sshHostname := "test-ssh-server" + + accessUser, err := types.NewUser(accessRoleName) + require.NoError(t, err) + accessUser.SetRoles([]string{accessRoleName}) + + user, err := user.Current() + require.NoError(t, err) + accessUser.SetLogins([]string{user.Username}) + + traits := map[string][]string{ + constants.TraitLogins: {user.Username}, + } + accessUser.SetTraits(traits) + + connector := mockConnector(t) + rootServerOpts := []testserver.TestServerOptFunc{ + testserver.WithBootstrap(connector, accessUser), + testserver.WithHostname(sshHostname), + testserver.WithClusterName(t, "root"), + testserver.WithSSHLabel(accessRoleName, "true"), + testserver.WithSSHPublicAddrs("127.0.0.1:0"), + testserver.WithConfig(func(cfg *servicecfg.Config) { + cfg.SSH.Enabled = true + cfg.SSH.PublicAddrs = []utils.NetAddr{cfg.SSH.Addr} + cfg.SSH.DisableCreateHostUser = true + }), + } + rootServer := testserver.MakeTestServer(t, rootServerOpts...) + + rootProxyAddr, err := rootServer.ProxyWebAddr() + require.NoError(t, err) + + require.EventuallyWithT(t, func(t *assert.CollectT) { + rootNodes, err := rootServer.GetAuthServer().GetNodes(ctx, apidefaults.Namespace) + if !assert.NoError(t, err) || !assert.Len(t, rootNodes, 1) { + return + } + }, 10*time.Second, 100*time.Millisecond) + + tmpHomePath := t.TempDir() + rootAuth := rootServer.GetAuthServer() + + err = Run(ctx, []string{ + "login", + "--insecure", + "--proxy", rootProxyAddr.String(), + "--user", user.Username, + }, setHomePath(tmpHomePath), setMockSSOLogin(rootAuth, accessUser, connector.GetName())) + require.NoError(t, err) + + tests := []struct { + name string + args []string + expected string + shouldErr bool + }{ + { + // Test that a simple echo works. + name: "ssh simple command", + expected: "this is a test message", + args: []string{ + fmt.Sprintf("%s@%s", user.Username, sshHostname), + "echo", + "this is a test message", + }, + shouldErr: false, + }, + { + // Test that commands can be prefixed with a double dash. + name: "ssh command with double dash", + expected: "this is a test message", + args: []string{ + fmt.Sprintf("%s@%s", user.Username, sshHostname), + "--", + "echo", + "this is a test message", + }, + shouldErr: false, + }, + { + // Test that a double dash is not removed from the middle of a command. + name: "ssh command with double dash in the middle", + expected: "-- this is a test message", + args: []string{ + fmt.Sprintf("%s@%s", user.Username, sshHostname), + "echo", + "--", + "this is a test message", + }, + shouldErr: false, + }, + { + // Test that quoted commands work (e.g. `tsh ssh 'echo test'`) + name: "ssh command literal", + expected: "this is a test message", + args: []string{ + fmt.Sprintf("%s@%s", user.Username, sshHostname), + "echo this is a test message", + }, + shouldErr: false, + }, + { + // Test that a double dash is passed as-is in a quoted command (which should fail). + name: "ssh command literal with double dash err", + expected: "", + args: []string{ + fmt.Sprintf("%s@%s", user.Username, sshHostname), + "-- echo this is a test message", + }, + shouldErr: true, + }, + { + // Test that a double dash is not removed from the middle of a quoted command. + name: "ssh command literal with double dash in the middle", + expected: "-- this is a test message", + args: []string{ + fmt.Sprintf("%s@%s", user.Username, sshHostname), + "echo", "-- this is a test message", + }, + shouldErr: false, + }, + { + // Test tsh ssh -- hostname command + name: "delimiter before host and command", + expected: "this is a test message", + args: []string{ + "--", sshHostname, "echo", "this is a test message", + }, + shouldErr: false, + }, + } + + for _, test := range tests { + test := test + ctx := context.Background() + t.Run(test.name, func(t *testing.T) { + t.Parallel() + + stdout := &output{buf: bytes.Buffer{}} + stderr := &output{buf: bytes.Buffer{}} + args := append( + []string{ + "ssh", + "--insecure", + "--proxy", rootProxyAddr.String(), + }, + test.args..., + ) + + err := Run(ctx, args, setHomePath(tmpHomePath), + func(conf *CLIConf) error { + conf.overrideStdin = &bytes.Buffer{} + conf.OverrideStdout = stdout + conf.overrideStderr = stderr + return nil + }, + ) + + if test.shouldErr { + require.Error(t, err) + } else { + require.NoError(t, err) + require.Equal(t, test.expected, strings.TrimSpace(stdout.String())) + require.Empty(t, stderr.String()) + } + }) + } +} + // tryCreateTrustedCluster performs several attempts to create a trusted cluster, // retries on connection problems and access denied errors to let caches // propagate and services to start diff --git a/web/packages/design/src/DataTable/Table.tsx b/web/packages/design/src/DataTable/Table.tsx index 47cc0348eb861..fd4b76ca2378a 100644 --- a/web/packages/design/src/DataTable/Table.tsx +++ b/web/packages/design/src/DataTable/Table.tsx @@ -16,7 +16,7 @@ * along with this program. If not, see . */ -import React from 'react'; +import React, { PropsWithChildren } from 'react'; import { Box, Flex, Indicator, Text } from 'design'; import * as Icons from 'design/Icon'; @@ -110,6 +110,22 @@ export function Table({ return ; } data.map((item, rowIdx) => { + const TableRow: React.FC = ({ children }) => ( + row?.onClick?.(item)} + style={row?.getStyle?.(item)} + > + {children} + + ); + + const customRow = row?.customRow?.(item); + if (customRow) { + rows.push({customRow}); + return; + } + const cells = columns.flatMap((column, columnIdx) => { if (column.isNonRender) { return []; // does not include this column. @@ -127,15 +143,7 @@ export function Table({ ); }); - rows.push( - row?.onClick?.(item)} - style={row?.getStyle?.(item)} - > - {cells} - - ); + rows.push({cells}); }); if (rows.length) { diff --git a/web/packages/design/src/DataTable/types.ts b/web/packages/design/src/DataTable/types.ts index 53a0abe644e60..afdaf940c2212 100644 --- a/web/packages/design/src/DataTable/types.ts +++ b/web/packages/design/src/DataTable/types.ts @@ -79,6 +79,14 @@ export type TableProps = { * conditionally style a row (eg: cursor: pointer, disabled) */ getStyle?(row: T): React.CSSProperties; + /** + * conditionally render a custom row + * use case: by default all columns are represented by cells + * but certain rows you need all the columns to be merged + * into one cell to render other related elements like a + * dropdown selector. + */ + customRow?(row: T): JSX.Element; }; }; diff --git a/web/packages/design/src/Link/Link.jsx b/web/packages/design/src/Link/Link.jsx index 957be80a5f7be..260c31f85094b 100644 --- a/web/packages/design/src/Link/Link.jsx +++ b/web/packages/design/src/Link/Link.jsx @@ -31,7 +31,6 @@ const StyledButtonLink = styled.a.attrs({ rel: 'noreferrer', })` color: ${({ theme }) => theme.colors.buttons.link.default}; - font-weight: normal; background: none; text-decoration: underline; text-transform: none; diff --git a/web/packages/design/src/Onboard/__snapshots__/WelcomeWrapper.story.test.tsx.snap b/web/packages/design/src/Onboard/__snapshots__/WelcomeWrapper.story.test.tsx.snap index 7a40b24bb5ff9..71b0808b0f065 100644 --- a/web/packages/design/src/Onboard/__snapshots__/WelcomeWrapper.story.test.tsx.snap +++ b/web/packages/design/src/Onboard/__snapshots__/WelcomeWrapper.story.test.tsx.snap @@ -35,7 +35,6 @@ exports[`wrapper 1`] = ` .c13 { color: #009EFF; - font-weight: normal; background: none; text-decoration: underline; text-transform: none; diff --git a/web/packages/shared/components/AccessRequests/NewRequest/CheckableOption.tsx b/web/packages/shared/components/AccessRequests/NewRequest/CheckableOption.tsx new file mode 100644 index 0000000000000..f10b940050628 --- /dev/null +++ b/web/packages/shared/components/AccessRequests/NewRequest/CheckableOption.tsx @@ -0,0 +1,48 @@ +/** + * Teleport + * Copyright (C) 2024 Gravitational, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU Affero General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Affero General Public License for more details. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +import React from 'react'; +import { Flex, Text } from 'design'; +import { components, OptionProps } from 'react-select'; + +import { Option as BaseOption } from 'shared/components/Select'; + +export type Option = BaseOption & { + isAdded?: boolean; + kind: 'app' | 'user_group' | 'namespace'; +}; + +export const CheckableOptionComponent = ( + props: OptionProps