From 6cc282bc7c6721ca97d8ae3be07e5d3e1384c912 Mon Sep 17 00:00:00 2001 From: miampf Date: Fri, 29 Dec 2023 14:20:55 +0100 Subject: [PATCH 01/54] rewrote some logger helper functions rewrote interceptors + middleware logging --- internal/logger/cmdline.go | 15 +++++++-------- internal/logger/log.go | 34 ++++++++++++++++++++-------------- 2 files changed, 27 insertions(+), 22 deletions(-) diff --git a/internal/logger/cmdline.go b/internal/logger/cmdline.go index 4957e05cad..82a6d2ee0b 100644 --- a/internal/logger/cmdline.go +++ b/internal/logger/cmdline.go @@ -7,25 +7,24 @@ SPDX-License-Identifier: AGPL-3.0-only package logger import ( - "go.uber.org/zap" - "go.uber.org/zap/zapcore" + "log/slog" ) // CmdLineVerbosityDescription explains numeric log levels. const CmdLineVerbosityDescription = "log verbosity in zap logging levels. Use -1 for debug information, 0 for info, 1 for warn, 2 for error" // VerbosityFromInt converts a verbosity level from an integer to a zapcore.Level. -func VerbosityFromInt(verbosity int) zapcore.Level { +func VerbosityFromInt(verbosity int) slog.Level { switch { case verbosity <= -1: - return zap.DebugLevel + return slog.LevelDebug case verbosity == 0: - return zap.InfoLevel + return slog.LevelInfo case verbosity == 1: - return zap.WarnLevel + return slog.LevelWarn case verbosity >= 2: - return zap.ErrorLevel + return slog.LevelError default: - return zap.InfoLevel + return slog.LevelInfo } } diff --git a/internal/logger/log.go b/internal/logger/log.go index 55a9cb9c1e..13cdf51565 100644 --- a/internal/logger/log.go +++ b/internal/logger/log.go @@ -41,8 +41,11 @@ package logger import ( "context" "fmt" + "log/slog" "os" "testing" + "runtime" + "time" "github.com/grpc-ecosystem/go-grpc-middleware/v2/interceptors/logging" "go.uber.org/zap" @@ -157,30 +160,30 @@ func (l *Logger) ReplaceGRPCLogger() { } // GetServerUnaryInterceptor returns a gRPC server option for intercepting unary gRPC logs. -func (l *Logger) GetServerUnaryInterceptor() grpc.ServerOption { +func GetServerUnaryInterceptor(l *slog.Logger) grpc.ServerOption { return grpc.UnaryInterceptor( - logging.UnaryServerInterceptor(l.middlewareLogger()), + logging.UnaryServerInterceptor(middlewareLogger(l)), ) } // GetServerStreamInterceptor returns a gRPC server option for intercepting streaming gRPC logs. -func (l *Logger) GetServerStreamInterceptor() grpc.ServerOption { +func GetServerStreamInterceptor(l *slog.Logger) grpc.ServerOption { return grpc.StreamInterceptor( - logging.StreamServerInterceptor(l.middlewareLogger()), + logging.StreamServerInterceptor(middlewareLogger(l)), ) } // GetClientUnaryInterceptor returns a gRPC client option for intercepting unary gRPC logs. -func (l *Logger) GetClientUnaryInterceptor() grpc.DialOption { +func GetClientUnaryInterceptor(l *slog.Logger) grpc.DialOption { return grpc.WithUnaryInterceptor( - logging.UnaryClientInterceptor(l.middlewareLogger()), + logging.UnaryClientInterceptor(middlewareLogger(l)), ) } // GetClientStreamInterceptor returns a gRPC client option for intercepting stream gRPC logs. -func (l *Logger) GetClientStreamInterceptor() grpc.DialOption { +func GetClientStreamInterceptor(l *slog.Logger) grpc.DialOption { return grpc.WithStreamInterceptor( - logging.StreamClientInterceptor(l.middlewareLogger()), + logging.StreamClientInterceptor(middlewareLogger(l)), ) } @@ -189,7 +192,7 @@ func (l *Logger) getZapLogger() *zap.Logger { return l.logger.Desugar() } -func (l *Logger) middlewareLogger() logging.Logger { +func middlewareLogger(l *slog.Logger) logging.Logger { return logging.LoggerFunc(func(ctx context.Context, lvl logging.Level, msg string, fields ...any) { f := make([]zap.Field, 0, len(fields)/2) @@ -209,19 +212,22 @@ func (l *Logger) middlewareLogger() logging.Logger { } } - logger := l.getZapLogger().WithOptions(zap.AddCallerSkip(1)).With(f...) + var pcs [1]uintptr + runtime.Callers(2, pcs[:]) // skip [Callers, LoggerFunc] + r := slog.Record{} switch lvl { case logging.LevelDebug: - logger.Debug(msg) + r = slog.NewRecord(time.Now(), slog.LevelDebug, fmt.Sprintf(msg, fields...), pcs[0]) case logging.LevelInfo: - logger.Info(msg) + r = slog.NewRecord(time.Now(), slog.LevelInfo, fmt.Sprintf(msg, fields...), pcs[0]) case logging.LevelWarn: - logger.Warn(msg) + r = slog.NewRecord(time.Now(), slog.LevelWarn, fmt.Sprintf(msg, fields...), pcs[0]) case logging.LevelError: - logger.Error(msg) + r = slog.NewRecord(time.Now(), slog.LevelError, fmt.Sprintf(msg, fields...), pcs[0]) default: panic(fmt.Sprintf("unknown level %v", lvl)) } + _ = l.Handler().Handle(context.Background(), r) }) } From 1d0c6aa7fcb678f85bc2c65c58c48070a9f23f53 Mon Sep 17 00:00:00 2001 From: miampf Date: Fri, 29 Dec 2023 15:15:43 +0100 Subject: [PATCH 02/54] added a test writer removed unnecessary writer --- internal/logger/log.go | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/internal/logger/log.go b/internal/logger/log.go index 13cdf51565..2c2b333665 100644 --- a/internal/logger/log.go +++ b/internal/logger/log.go @@ -41,11 +41,12 @@ package logger import ( "context" "fmt" + "io" "log/slog" "os" + "runtime" "testing" - "runtime" - "time" + "time" "github.com/grpc-ecosystem/go-grpc-middleware/v2/interceptors/logging" "go.uber.org/zap" @@ -231,3 +232,12 @@ func middlewareLogger(l *slog.Logger) logging.Logger { _ = l.Handler().Handle(context.Background(), r) }) } + +type TestWriter struct { + t *testing.T +} + +func (t TestWriter) Write(p []byte) (int, error) { + t.t.Log(p) + return len(p), nil +} From 48d5a157dda83e5ca358febbb58455ba5e5f4502 Mon Sep 17 00:00:00 2001 From: miampf Date: Wed, 3 Jan 2024 12:24:54 +0100 Subject: [PATCH 03/54] rewrote doc/description strings in logger fixed some stuff I didn't see replaced forgotten zap reference removed unneeded dependency --- disk-mapper/cmd/main.go | 1 - disk-mapper/internal/recoveryserver/recoveryserver.go | 3 +-- internal/logger/cmdline.go | 4 ++-- 3 files changed, 3 insertions(+), 5 deletions(-) diff --git a/disk-mapper/cmd/main.go b/disk-mapper/cmd/main.go index 415b1c56c6..534ae92670 100644 --- a/disk-mapper/cmd/main.go +++ b/disk-mapper/cmd/main.go @@ -35,7 +35,6 @@ import ( "github.com/edgelesssys/constellation/v2/internal/logger" "github.com/edgelesssys/constellation/v2/internal/role" "github.com/spf13/afero" - "go.uber.org/zap" ) const ( diff --git a/disk-mapper/internal/recoveryserver/recoveryserver.go b/disk-mapper/internal/recoveryserver/recoveryserver.go index d4db3b7870..f2267ebbb9 100644 --- a/disk-mapper/internal/recoveryserver/recoveryserver.go +++ b/disk-mapper/internal/recoveryserver/recoveryserver.go @@ -27,7 +27,6 @@ import ( "github.com/edgelesssys/constellation/v2/internal/grpc/grpclog" "github.com/edgelesssys/constellation/v2/internal/kms/kms" "github.com/edgelesssys/constellation/v2/internal/logger" - "go.uber.org/zap" "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" @@ -105,7 +104,7 @@ func (s *RecoveryServer) Serve(ctx context.Context, listener net.Listener, diskU func (s *RecoveryServer) Recover(ctx context.Context, req *recoverproto.RecoverMessage) (*recoverproto.RecoverResponse, error) { s.mux.Lock() defer s.mux.Unlock() - log := s.log.With(zap.String("peer", grpclog.PeerAddrFromContext(ctx))) + log := s.log.With(slog.String("peer", grpclog.PeerAddrFromContext(ctx))) log.Infof("Received recover call") diff --git a/internal/logger/cmdline.go b/internal/logger/cmdline.go index 82a6d2ee0b..85dfb41ab4 100644 --- a/internal/logger/cmdline.go +++ b/internal/logger/cmdline.go @@ -11,9 +11,9 @@ import ( ) // CmdLineVerbosityDescription explains numeric log levels. -const CmdLineVerbosityDescription = "log verbosity in zap logging levels. Use -1 for debug information, 0 for info, 1 for warn, 2 for error" +const CmdLineVerbosityDescription = "log verbosity in slog logging levels. Use -1 for debug information, 0 for info, 1 for warn, 2 for error" -// VerbosityFromInt converts a verbosity level from an integer to a zapcore.Level. +// VerbosityFromInt converts a verbosity level from an integer to a slog.Level. func VerbosityFromInt(verbosity int) slog.Level { switch { case verbosity <= -1: From f16ccf5679a4e052a956c597f7a8c63b69de2fb4 Mon Sep 17 00:00:00 2001 From: miampf Date: Fri, 29 Dec 2023 15:18:59 +0100 Subject: [PATCH 04/54] rewrote packages keyservice joinservice upgrade-agent measurement-reader debugd disk-mapper rewrote joinservice main rewrote some unit tests rewrote upgrade-agent + some grpc functions rewrote measurement-reader rewrote debugd removed unused import removed forgotten zap reference in measurements reader rewrote disk-mapper + tests rewrote packages verify disk-mapper malicious join bootstrapper attestationconfigapi versionapi internal/cloud/azure disk-mapper tests image/upload/internal/cmd rewrote verify (WIP with loglevel increase) rewrote forgotten zap references in disk-mapper rewrote malicious join rewrote bootstrapper rewrote parts of internal/ rewrote attestationconfigapi (WIP) rewrote versionapi cli rewrote internal/cloud/azure rewrote disk-mapper tests (untested by me rn) rewrote image/upload/internal/cmd removed forgotten zap references in verify/cmd rewrote packages hack/oci-pin hack/qemu-metadata-api debugd/internal/debugd/deploy hack/bazel-deps-mirror cli/internal/cmd cli-k8s-compatibility rewrote hack/qemu-metadata-api/server rewrote debugd/internal/debugd/deploy rewrote hack/bazel-deps-mirror rewrote rest of hack/qemu-metadata-api rewrote forgotten zap references in joinservice server rewrote cli/internal/cmd rewrote cli-k8s-compatibility rewrote packages internal/staticupload e2d/internal/upgrade internal/constellation/helm internal/attestation/aws/snp internal/attestation/azure/trustedlaunch joinservice/internal/certcache/amkds some missed unit tests rewrote e2e/internal/upgrade rewrote internal/constellation/helm internal/attestation/aws/snp internal/attestation/azure/trustedlaunch joinservice/internal/certcache/amkds search and replace test logging over all left *_test.go --- bootstrapper/cmd/bootstrapper/main.go | 108 +-- bootstrapper/cmd/bootstrapper/run.go | 94 +-- .../internal/initserver/initserver.go | 26 +- .../internal/initserver/initserver_test.go | 7 +- .../internal/joinclient/joinclient.go | 42 +- .../internal/joinclient/joinclient_test.go | 5 +- .../internal/kubernetes/k8sapi/k8sutil.go | 27 +- bootstrapper/internal/kubernetes/k8sutil.go | 6 +- .../internal/kubernetes/kubernetes.go | 43 +- .../internal/kubernetes/kubernetes_test.go | 9 +- cli/internal/cmd/apply.go | 47 +- cli/internal/cmd/apply_test.go | 5 +- cli/internal/cmd/applyhelm.go | 14 +- cli/internal/cmd/applyinit.go | 12 +- cli/internal/cmd/applyterraform.go | 16 +- cli/internal/cmd/configfetchmeasurements.go | 13 +- .../cmd/configfetchmeasurements_test.go | 3 +- cli/internal/cmd/configgenerate_test.go | 7 +- cli/internal/cmd/create_test.go | 5 +- cli/internal/cmd/iamcreate.go | 7 +- cli/internal/cmd/iamcreate_test.go | 7 +- cli/internal/cmd/iamdestroy.go | 21 +- cli/internal/cmd/iamdestroy_test.go | 5 +- cli/internal/cmd/iamupgradeapply.go | 2 +- cli/internal/cmd/iamupgradeapply_test.go | 3 +- cli/internal/cmd/init.go | 6 +- cli/internal/cmd/init_test.go | 11 +- cli/internal/cmd/log.go | 14 +- cli/internal/cmd/maapatch.go | 3 +- cli/internal/cmd/maapatch_test.go | 3 +- cli/internal/cmd/miniup.go | 3 +- cli/internal/cmd/miniup_linux_amd64.go | 12 +- cli/internal/cmd/recover.go | 27 +- cli/internal/cmd/recover_test.go | 5 +- cli/internal/cmd/status.go | 1 - cli/internal/cmd/terminate_test.go | 3 +- cli/internal/cmd/upgradeapply_test.go | 3 +- cli/internal/cmd/upgradecheck.go | 43 +- cli/internal/cmd/upgradecheck_test.go | 5 +- cli/internal/cmd/verify.go | 17 +- cli/internal/cmd/verify_test.go | 7 +- debugd/cmd/debugd/debugd.go | 36 +- debugd/internal/debugd/deploy/download.go | 31 +- .../internal/debugd/deploy/download_test.go | 5 +- debugd/internal/debugd/deploy/service.go | 29 +- debugd/internal/debugd/deploy/service_test.go | 7 +- .../debugd/logcollector/logcollector.go | 66 +- debugd/internal/debugd/metadata/scheduler.go | 23 +- .../debugd/metadata/scheduler_test.go | 2 +- debugd/internal/debugd/server/server.go | 48 +- debugd/internal/debugd/server/server_test.go | 10 +- debugd/internal/filetransfer/filetransfer.go | 12 +- .../filetransfer/filetransfer_test.go | 8 +- disk-mapper/cmd/main.go | 57 +- .../internal/diskencryption/diskencryption.go | 11 +- .../internal/recoveryserver/recoveryserver.go | 21 +- .../recoveryserver/recoveryserver_test.go | 5 +- .../internal/rejoinclient/rejoinclient.go | 23 +- .../rejoinclient/rejoinclient_test.go | 7 +- disk-mapper/internal/setup/setup.go | 17 +- disk-mapper/internal/setup/setup_test.go | 7 +- disk-mapper/internal/test/benchmark_test.go | 5 +- disk-mapper/internal/test/integration_test.go | 4 +- e2e/internal/upgrade/helm.go | 2 +- e2e/malicious-join/malicious-join.go | 47 +- hack/bazel-deps-mirror/check.go | 34 +- hack/bazel-deps-mirror/fix.go | 52 +- .../internal/mirror/mirror_test.go | 6 +- hack/bazel-deps-mirror/upgrade.go | 48 +- hack/cli-k8s-compatibility/main.go | 64 +- hack/oci-pin/codegen.go | 19 +- hack/oci-pin/merge.go | 17 +- hack/oci-pin/sum.go | 19 +- hack/qemu-metadata-api/main.go | 53 +- hack/qemu-metadata-api/server/server.go | 52 +- hack/qemu-metadata-api/server/server_test.go | 9 +- image/upload/internal/cmd/aws.go | 106 +++ image/upload/internal/cmd/azure.go | 107 +++ image/upload/internal/cmd/flags.go | 21 +- image/upload/internal/cmd/gcp.go | 107 +++ image/upload/internal/cmd/info.go | 9 +- .../internal/cmd/measurementsenvelope.go | 8 +- .../upload/internal/cmd/measurementsmerge.go | 8 +- .../upload/internal/cmd/measurementsupload.go | 9 +- image/upload/internal/cmd/nop.go | 90 +++ .../api/attestationconfigapi/cli/delete.go | 8 +- .../api/attestationconfigapi/cli/upload.go | 15 +- internal/api/attestationconfigapi/client.go | 4 +- internal/api/attestationconfigapi/reporter.go | 10 +- internal/api/client/client.go | 29 +- internal/api/versionsapi/cli/add.go | 54 +- internal/api/versionsapi/cli/latest.go | 20 +- internal/api/versionsapi/cli/list.go | 30 +- internal/api/versionsapi/cli/rm.go | 122 +-- internal/api/versionsapi/client.go | 6 +- internal/attestation/attestation.go | 4 +- .../attestation/aws/snp/validator_test.go | 3 +- .../attestation/azure/snp/validator_test.go | 6 +- .../azure/trustedlaunch/trustedlaunch_test.go | 3 +- internal/attestation/snp/snp_test.go | 2 +- internal/attestation/vtpm/attestation_test.go | 8 +- internal/cloud/azure/azure.go | 11 +- internal/constellation/apply.go | 2 +- internal/constellation/apply_test.go | 6 +- internal/constellation/applyinit_test.go | 4 +- internal/constellation/helm/action.go | 2 +- internal/constellation/helm/actionfactory.go | 10 +- .../constellation/helm/actionfactory_test.go | 3 +- internal/constellation/helm/helm.go | 8 +- internal/constellation/helm/helm_test.go | 3 +- internal/constellation/helm/retryaction.go | 6 +- .../constellation/helm/retryaction_test.go | 3 +- internal/constellation/kubecmd/kubecmd.go | 26 +- .../constellation/kubecmd/kubecmd_test.go | 10 +- internal/logger/grpclogger.go | 9 +- internal/logger/log.go | 8 +- internal/osimage/archive/archive.go | 8 +- internal/osimage/aws/awsupload.go | 603 +++++++++++++++ internal/osimage/azure/azureupload.go | 710 ++++++++++++++++++ internal/osimage/gcp/gcpupload.go | 298 ++++++++ internal/osimage/imageinfo/imageinfo.go | 8 +- .../measurementsuploader.go | 8 +- internal/osimage/nop/nop.go | 7 +- internal/staticupload/staticupload.go | 14 +- internal/staticupload/staticupload_test.go | 11 +- internal/verify/verify.go | 6 +- internal/verify/verify_test.go | 2 +- joinservice/cmd/main.go | 61 +- .../internal/certcache/amdkds/amdkds_test.go | 12 +- joinservice/internal/certcache/certcache.go | 28 +- .../internal/certcache/certcache_test.go | 5 +- joinservice/internal/kms/kms.go | 11 +- joinservice/internal/kms/kms_test.go | 3 +- joinservice/internal/kubeadm/kubeadm.go | 15 +- joinservice/internal/kubeadm/kubeadm_test.go | 5 +- .../internal/kubernetesca/kubernetesca.go | 12 +- .../kubernetesca/kubernetesca_test.go | 3 +- joinservice/internal/server/server.go | 62 +- joinservice/internal/server/server_test.go | 5 +- joinservice/internal/watcher/validator.go | 10 +- .../internal/watcher/validator_test.go | 9 +- joinservice/internal/watcher/watcher.go | 13 +- joinservice/internal/watcher/watcher_test.go | 3 +- keyservice/cmd/main.go | 29 +- keyservice/internal/server/server.go | 19 +- keyservice/internal/server/server_test.go | 3 +- measurement-reader/cmd/main.go | 18 +- s3proxy/cmd/main.go | 13 +- s3proxy/internal/kms/kms.go | 11 +- s3proxy/internal/kms/kms_test.go | 3 +- s3proxy/internal/router/handler.go | 40 +- s3proxy/internal/router/object.go | 23 +- s3proxy/internal/router/router.go | 3 +- upgrade-agent/cmd/main.go | 16 +- upgrade-agent/internal/server/server.go | 19 +- verify/cmd/main.go | 92 +-- verify/server/server.go | 33 +- verify/server/server_test.go | 7 +- 158 files changed, 3418 insertions(+), 1296 deletions(-) create mode 100644 image/upload/internal/cmd/aws.go create mode 100644 image/upload/internal/cmd/azure.go create mode 100644 image/upload/internal/cmd/gcp.go create mode 100644 image/upload/internal/cmd/nop.go create mode 100644 internal/osimage/aws/awsupload.go create mode 100644 internal/osimage/azure/azureupload.go create mode 100644 internal/osimage/gcp/gcpupload.go diff --git a/bootstrapper/cmd/bootstrapper/main.go b/bootstrapper/cmd/bootstrapper/main.go index efb010ea14..75c1c142f7 100644 --- a/bootstrapper/cmd/bootstrapper/main.go +++ b/bootstrapper/cmd/bootstrapper/main.go @@ -7,14 +7,14 @@ SPDX-License-Identifier: AGPL-3.0-only package main import ( - "context" - "flag" - "io" - "os" - "strconv" + "context" + "flag" + "io" + "log/slog" + "os" + "strconv" - "github.com/spf13/afero" - "go.uber.org/zap" + "github.com/spf13/afero" "github.com/edgelesssys/constellation/v2/bootstrapper/internal/kubernetes" "github.com/edgelesssys/constellation/v2/bootstrapper/internal/kubernetes/k8sapi" @@ -37,25 +37,25 @@ import ( ) const ( - // constellationCSP is the environment variable stating which Cloud Service Provider Constellation is running on. - constellationCSP = "CONSTEL_CSP" + // constellationCSP is the environment variable stating which Cloud Service Provider Constellation is running on. + constellationCSP = "CONSTEL_CSP" ) func main() { - gRPCDebug := flag.Bool("debug", false, "Enable gRPC debug logging") - verbosity := flag.Int("v", 0, logger.CmdLineVerbosityDescription) - flag.Parse() - log := logger.New(logger.JSONLog, logger.VerbosityFromInt(*verbosity)).Named("bootstrapper") - defer log.Sync() - - if *gRPCDebug { - log.Named("gRPC").ReplaceGRPCLogger() - } else { - log.Named("gRPC").WithIncreasedLevel(zap.WarnLevel).ReplaceGRPCLogger() - } + gRPCDebug := flag.Bool("debug", false, "Enable gRPC debug logging") + verbosity := flag.Int("v", 0, logger.CmdLineVerbosityDescription) + flag.Parse() + log := slog.New(slog.NewJSONHandler(os.Stdout, &slog.HandlerOptions{Level: logger.VerbosityFromInt(*verbosity)})).WithGroup("bootstrapper") + + if *gRPCDebug { + logger.ReplaceGRPCLogger(log.WithGroup("gRPC")) + } else { + //TODO(miampf): Find a good way to dynamically increase slog logLevel + logger.ReplaceGRPCLogger(log.WithGroup("gRPC").WithIncreasedLevel(slog.LevelWarn)) + } - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() bindIP := "0.0.0.0" bindPort := strconv.Itoa(constants.BootstrapperPort) @@ -64,22 +64,25 @@ func main() { var openDevice vtpm.TPMOpenFunc var fs afero.Fs - attestVariant, err := variant.FromString(os.Getenv(constants.AttestationVariant)) - if err != nil { - log.With(zap.Error(err)).Fatalf("Failed to parse attestation variant") - } - issuer, err := choose.Issuer(attestVariant, log) - if err != nil { - log.With(zap.Error(err)).Fatalf("Failed to select issuer") - } - - switch cloudprovider.FromString(os.Getenv(constellationCSP)) { - case cloudprovider.AWS: - metadata, err := awscloud.New(ctx) - if err != nil { - log.With(zap.Error(err)).Fatalf("Failed to set up AWS metadata API") - } - metadataAPI = metadata + attestVariant, err := variant.FromString(os.Getenv(constants.AttestationVariant)) + if err != nil { + log.With(slog.Any("error", err).Error("Failed to parse attestation variant") + os.Exit(1) + } + issuer, err := choose.Issuer(attestVariant, log) + if err != nil { + log.With(slog.Any("error", err).Error("Failed to select issuer") + os.Exit(1) + } + + switch cloudprovider.FromString(os.Getenv(constellationCSP)) { + case cloudprovider.AWS: + metadata, err := awscloud.New(ctx) + if err != nil { + log.With(slog.Any("error", err).Error("Failed to set up AWS metadata API") + os.Exit(1) + } + metadataAPI = metadata clusterInitJoiner = kubernetes.New( "aws", k8sapi.NewKubernetesUtil(), &k8sapi.KubdeadmConfiguration{}, kubectl.NewUninitialized(), @@ -88,12 +91,13 @@ func main() { openDevice = vtpm.OpenVTPM fs = afero.NewOsFs() - case cloudprovider.GCP: - metadata, err := gcpcloud.New(ctx) - if err != nil { - log.With(zap.Error(err)).Fatalf("Failed to create GCP metadata client") - } - defer metadata.Close() + case cloudprovider.GCP: + metadata, err := gcpcloud.New(ctx) + if err != nil { + log.With(slog.Any("error", err).Error("Failed to create GCP metadata client") + os.Exit(1) + } + defer metadata.Close() metadataAPI = metadata clusterInitJoiner = kubernetes.New( @@ -113,14 +117,14 @@ func main() { log.With(zap.Error(err)).Fatalf("Failed to prepare Azure control plane node") } - metadataAPI = metadata - clusterInitJoiner = kubernetes.New( - "azure", k8sapi.NewKubernetesUtil(), &k8sapi.KubdeadmConfiguration{}, kubectl.NewUninitialized(), - metadata, &kubewaiter.CloudKubeAPIWaiter{}, - ) + metadataAPI = metadata + clusterInitJoiner = kubernetes.New( + "azure", k8sapi.NewKubernetesUtil(), &k8sapi.KubdeadmConfiguration{}, kubectl.NewUninitialized(), + metadata, &kubewaiter.CloudKubeAPIWaiter{}, + ) - openDevice = vtpm.OpenVTPM - fs = afero.NewOsFs() + openDevice = vtpm.OpenVTPM + fs = afero.NewOsFs() case cloudprovider.QEMU: metadata := qemucloud.New() @@ -162,7 +166,7 @@ func main() { fs = afero.NewMemMapFs() } - fileHandler := file.NewHandler(fs) + fileHandler := file.NewHandler(fs) run(issuer, openDevice, fileHandler, clusterInitJoiner, metadataAPI, bindIP, bindPort, log) } diff --git a/bootstrapper/cmd/bootstrapper/run.go b/bootstrapper/cmd/bootstrapper/run.go index 41630b6d51..8acb1c2a49 100644 --- a/bootstrapper/cmd/bootstrapper/run.go +++ b/bootstrapper/cmd/bootstrapper/run.go @@ -7,8 +7,10 @@ SPDX-License-Identifier: AGPL-3.0-only package main import ( - "context" - "net" + "context" + "log/slog" + "net" + "os" "github.com/edgelesssys/constellation/v2/bootstrapper/internal/clean" "github.com/edgelesssys/constellation/v2/bootstrapper/internal/diskencryption" @@ -38,57 +40,61 @@ func run(issuer atls.Issuer, openDevice vtpm.TPMOpenFunc, fileHandler file.Handl log.Infof("Disk UUID: %s", uuid) } - nodeBootstrapped, err := initialize.IsNodeBootstrapped(openDevice) - if err != nil { - log.With(zap.Error(err)).Fatalf("Failed to check if node was previously bootstrapped") - } - - if nodeBootstrapped { - if err := kube.StartKubelet(); err != nil { - log.With(zap.Error(err)).Fatalf("Failed to restart kubelet") - } - return - } - - nodeLock := nodelock.New(openDevice) - initServer, err := initserver.New(context.Background(), nodeLock, kube, issuer, fileHandler, metadata, log) - if err != nil { - log.With(zap.Error(err)).Fatalf("Failed to create init server") - } - - dialer := dialer.New(issuer, nil, &net.Dialer{}) - joinClient := joinclient.New(nodeLock, dialer, kube, metadata, log) - - cleaner := clean.New().With(initServer).With(joinClient) - go cleaner.Start() - defer cleaner.Done() - - joinClient.Start(cleaner) - - if err := initServer.Serve(bindIP, bindPort, cleaner); err != nil { - log.With(zap.Error(err)).Fatalf("Failed to serve init server") - } + nodeBootstrapped, err := initialize.IsNodeBootstrapped(openDevice) + if err != nil { + log.With(slog.Any("error", err)).Error("Failed to check if node was previously bootstrapped") + os.Exit(1) + } + + if nodeBootstrapped { + if err := kube.StartKubelet(); err != nil { + log.With(slog.Any("error", err)).Error("Failed to restart kubelet") + os.Exit(1) + } + return + } + + nodeLock := nodelock.New(openDevice) + initServer, err := initserver.New(context.Background(), nodeLock, kube, issuer, fileHandler, metadata, log) + if err != nil { + log.With(slog.Any("error", err)).Error("Failed to create init server") + os.Exit(1) + } + + dialer := dialer.New(issuer, nil, &net.Dialer{}) + joinClient := joinclient.New(nodeLock, dialer, kube, metadata, log) + + cleaner := clean.New().With(initServer).With(joinClient) + go cleaner.Start() + defer cleaner.Done() + + joinClient.Start(cleaner) + + if err := initServer.Serve(bindIP, bindPort, cleaner); err != nil { + log.With(slog.Any("error", err)).Error("Failed to serve init server") + os.Exit(1) + } log.Infof("bootstrapper done") } func getDiskUUID() (string, error) { - disk := diskencryption.New() - free, err := disk.Open() - if err != nil { - return "", err - } - defer free() - return disk.UUID() + disk := diskencryption.New() + free, err := disk.Open() + if err != nil { + return "", err + } + defer free() + return disk.UUID() } type clusterInitJoiner interface { - joinclient.ClusterJoiner - initserver.ClusterInitializer - StartKubelet() error + joinclient.ClusterJoiner + initserver.ClusterInitializer + StartKubelet() error } type metadataAPI interface { - joinclient.MetadataAPI - initserver.MetadataAPI + joinclient.MetadataAPI + initserver.MetadataAPI } diff --git a/bootstrapper/internal/initserver/initserver.go b/bootstrapper/internal/initserver/initserver.go index 1c9d6c40a0..ff2e5e975f 100644 --- a/bootstrapper/internal/initserver/initserver.go +++ b/bootstrapper/internal/initserver/initserver.go @@ -23,6 +23,7 @@ import ( "errors" "fmt" "io" + "log/slog" "net" "strings" "sync" @@ -43,7 +44,6 @@ import ( "github.com/edgelesssys/constellation/v2/internal/nodestate" "github.com/edgelesssys/constellation/v2/internal/role" "github.com/edgelesssys/constellation/v2/internal/versions/components" - "go.uber.org/zap" "golang.org/x/crypto/bcrypt" "google.golang.org/grpc" "google.golang.org/grpc/codes" @@ -68,7 +68,7 @@ type Server struct { kmsURI string - log *logger.Logger + log *slog.Logger journaldCollector journaldCollection @@ -76,8 +76,8 @@ type Server struct { } // New creates a new initialization server. -func New(ctx context.Context, lock locker, kube ClusterInitializer, issuer atls.Issuer, fh file.Handler, metadata MetadataAPI, log *logger.Logger) (*Server, error) { - log = log.Named("initServer") +func New(ctx context.Context, lock locker, kube ClusterInitializer, issuer atls.Issuer, fh file.Handler, metadata MetadataAPI, log *slog.Logger) (*Server, error) { + log = log.WithGroup("initServer") initSecretHash, err := metadata.InitSecretHash(ctx) if err != nil { @@ -106,7 +106,7 @@ func New(ctx context.Context, lock locker, kube ClusterInitializer, issuer atls. grpcServer := grpc.NewServer( grpc.Creds(atlscredentials.New(issuer, nil)), grpc.KeepaliveParams(keepalive.ServerParameters{Time: 15 * time.Second}), - log.Named("gRPC").GetServerUnaryInterceptor(), + logger.GetServerUnaryInterceptor(log.WithGroup("gRPC")), ) initproto.RegisterAPIServer(grpcServer, server) @@ -122,7 +122,7 @@ func (s *Server) Serve(ip, port string, cleaner cleaner) error { return fmt.Errorf("failed to listen: %w", err) } - s.log.Infof("Starting") + s.log.Info("Starting") return s.grpcServer.Serve(lis) } @@ -132,8 +132,8 @@ func (s *Server) Init(req *initproto.InitRequest, stream initproto.API_InitServe s.shutdownLock.RLock() defer s.shutdownLock.RUnlock() - log := s.log.With(zap.String("peer", grpclog.PeerAddrFromContext(stream.Context()))) - log.Infof("Init called") + log := s.log.With(slog.String("peer", grpclog.PeerAddrFromContext(stream.Context()))) + log.Info("Init called") s.kmsURI = req.KmsUri @@ -174,7 +174,7 @@ func (s *Server) Init(req *initproto.InitRequest, stream initproto.API_InitServe // init does not make sense, so we just stop. // // The server stops itself after the current call is done. - log.Warnf("Node is already in a join process") + log.Warn("Node is already in a join process") err = status.Error(codes.FailedPrecondition, "node is already being activated") @@ -228,7 +228,7 @@ func (s *Server) Init(req *initproto.InitRequest, stream initproto.API_InitServe return err } - log.Infof("Init succeeded") + log.Info("Init succeeded") successMessage := &initproto.InitResponse_InitSuccess{ InitSuccess: &initproto.InitSuccessResponse{ @@ -287,14 +287,14 @@ func (s *Server) sendLogsWithMessage(stream initproto.API_InitServer, message er // Stop stops the initialization server gracefully. func (s *Server) Stop() { - s.log.Infof("Stopping") + s.log.Info("Stopping") // Make sure to only stop the server if no Init calls are running s.shutdownLock.Lock() defer s.shutdownLock.Unlock() s.grpcServer.GracefulStop() - s.log.Infof("Stopped") + s.log.Info("Stopped") } func (s *Server) setupDisk(ctx context.Context, cloudKms kms.CloudKMS) error { @@ -342,7 +342,7 @@ type ClusterInitializer interface { kubernetesComponents components.Components, apiServerCertSANs []string, serviceCIDR string, - log *logger.Logger, + log *slog.Logger, ) ([]byte, error) } diff --git a/bootstrapper/internal/initserver/initserver_test.go b/bootstrapper/internal/initserver/initserver_test.go index 155fa17938..3c79fde408 100644 --- a/bootstrapper/internal/initserver/initserver_test.go +++ b/bootstrapper/internal/initserver/initserver_test.go @@ -16,6 +16,7 @@ import ( "sync" "testing" "time" + "log/slog" "github.com/edgelesssys/constellation/v2/bootstrapper/initproto" "github.com/edgelesssys/constellation/v2/internal/atls" @@ -66,7 +67,7 @@ func TestNew(t *testing.T) { t.Run(name, func(t *testing.T) { assert := assert.New(t) - server, err := New(context.TODO(), newFakeLock(), &stubClusterInitializer{}, atls.NewFakeIssuer(variant.Dummy{}), fh, &tc.metadata, logger.NewTest(t)) + server, err := New(context.TODO(), newFakeLock(), &stubClusterInitializer{}, atls.NewFakeIssuer(variant.Dummy{}), fh, &tc.metadata, slog.New(slog.NewTextHandler(logger.TestWriter{T: t}, nil))) if tc.wantErr { assert.Error(err) return @@ -214,7 +215,7 @@ func TestInit(t *testing.T) { initializer: tc.initializer, disk: tc.disk, fileHandler: tc.fileHandler, - log: logger.NewTest(t), + log: slog.New(slog.NewTextHandler(logger.TestWriter{T: t}, nil)), grpcServer: serveStopper, cleaner: &fakeCleaner{serveStopper: serveStopper}, initSecretHash: tc.initSecretHash, @@ -408,7 +409,7 @@ type stubClusterInitializer struct { func (i *stubClusterInitializer) InitCluster( context.Context, string, string, - bool, components.Components, []string, string, *logger.Logger, + bool, components.Components, []string, string, *slog.Logger, ) ([]byte, error) { return i.initClusterKubeconfig, i.initClusterErr } diff --git a/bootstrapper/internal/joinclient/joinclient.go b/bootstrapper/internal/joinclient/joinclient.go index 7f8857419d..2e9c7feae3 100644 --- a/bootstrapper/internal/joinclient/joinclient.go +++ b/bootstrapper/internal/joinclient/joinclient.go @@ -21,6 +21,7 @@ import ( "context" "errors" "fmt" + "log/slog" "net" "path/filepath" "strconv" @@ -39,7 +40,6 @@ import ( "github.com/edgelesssys/constellation/v2/internal/versions/components" "github.com/edgelesssys/constellation/v2/joinservice/joinproto" "github.com/spf13/afero" - "go.uber.org/zap" "google.golang.org/grpc" kubeadm "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta3" kubeconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants" @@ -73,7 +73,7 @@ type JoinClient struct { cleaner cleaner metadataAPI MetadataAPI - log *logger.Logger + log *slog.Logger mux sync.Mutex stopC chan struct{} @@ -81,7 +81,7 @@ type JoinClient struct { } // New creates a new JoinClient. -func New(lock locker, dial grpcDialer, joiner ClusterJoiner, meta MetadataAPI, log *logger.Logger) *JoinClient { +func New(lock locker, dial grpcDialer, joiner ClusterJoiner, meta MetadataAPI, log *slog.Logger) *JoinClient { return &JoinClient{ nodeLock: lock, disk: diskencryption.New(), @@ -93,7 +93,7 @@ func New(lock locker, dial grpcDialer, joiner ClusterJoiner, meta MetadataAPI, l dialer: dial, joiner: joiner, metadataAPI: meta, - log: log.Named("join-client"), + log: log.WithGroup("join-client"), } } @@ -110,7 +110,7 @@ func (c *JoinClient) Start(cleaner cleaner) { return } - c.log.Infof("Starting") + c.log.Info("Starting") c.stopC = make(chan struct{}, 1) c.stopDone = make(chan struct{}, 1) c.cleaner = cleaner @@ -119,11 +119,11 @@ func (c *JoinClient) Start(cleaner cleaner) { go func() { defer ticker.Stop() defer func() { c.stopDone <- struct{}{} }() - defer c.log.Infof("Client stopped") + defer c.log.Info("Client stopped") diskUUID, err := c.getDiskUUID() if err != nil { - c.log.With(zap.Error(err)).Errorf("Failed to get disk UUID") + c.log.With(slog.Any("error", err)).Error("Failed to get disk UUID") return } c.diskUUID = diskUUID @@ -131,12 +131,12 @@ func (c *JoinClient) Start(cleaner cleaner) { for { err := c.getNodeMetadata() if err == nil { - c.log.With(zap.String("role", c.role.String()), zap.String("name", c.nodeName)).Infof("Received own instance metadata") + c.log.With(slog.String("role", c.role.String()), slog.String("name", c.nodeName)).Info("Received own instance metadata") break } - c.log.With(zap.Error(err)).Errorf("Failed to retrieve instance metadata") + c.log.With(slog.Any("error", err)).Error("Failed to retrieve instance metadata") - c.log.With(zap.Duration("interval", c.interval)).Infof("Sleeping") + c.log.With(slog.Duration("interval", c.interval)).Info("Sleeping") select { case <-c.stopC: return @@ -147,15 +147,15 @@ func (c *JoinClient) Start(cleaner cleaner) { for { err := c.tryJoinWithAvailableServices() if err == nil { - c.log.Infof("Joined successfully. Client is shutting down") + c.log.Info("Joined successfully. Client is shutting down") return } else if isUnrecoverable(err) { - c.log.With(zap.Error(err)).Errorf("Unrecoverable error occurred") + c.log.With(slog.Any("error", err)).Error("Unrecoverable error occurred") return } - c.log.With(zap.Error(err)).Warnf("Join failed for all available endpoints") + c.log.With(slog.Any("error", err)).Warn("Join failed for all available endpoints") - c.log.With(zap.Duration("interval", c.interval)).Infof("Sleeping") + c.log.With(slog.Duration("interval", c.interval)).Info("Sleeping") select { case <-c.stopC: return @@ -231,7 +231,7 @@ func (c *JoinClient) join(serviceEndpoint string) error { conn, err := c.dialer.Dial(ctx, serviceEndpoint) if err != nil { - c.log.With(zap.String("endpoint", serviceEndpoint), zap.Error(err)).Errorf("Join service unreachable") + c.log.With(slog.String("endpoint", serviceEndpoint), slog.Any("error", err)).Error("Join service unreachable") return fmt.Errorf("dialing join service endpoint: %w", err) } defer conn.Close() @@ -244,7 +244,7 @@ func (c *JoinClient) join(serviceEndpoint string) error { } ticket, err := protoClient.IssueJoinTicket(ctx, req) if err != nil { - c.log.With(zap.String("endpoint", serviceEndpoint), zap.Error(err)).Errorf("Issuing join ticket failed") + c.log.With(slog.String("endpoint", serviceEndpoint), slog.Any("error", err)).Error("Issuing join ticket failed") return fmt.Errorf("issuing join ticket: %w", err) } @@ -269,7 +269,7 @@ func (c *JoinClient) startNodeAndJoin(ticket *joinproto.IssueJoinTicketResponse, nodeLockAcquired, err := c.nodeLock.TryLockOnce(clusterID) if err != nil { - c.log.With(zap.Error(err)).Errorf("Acquiring node lock failed") + c.log.With(slog.Any("error", err)).Error("Acquiring node lock failed") return fmt.Errorf("acquiring node lock: %w", err) } if !nodeLockAcquired { @@ -322,12 +322,12 @@ func (c *JoinClient) getNodeMetadata() error { ctx, cancel := c.timeoutCtx() defer cancel() - c.log.Debugf("Requesting node metadata from metadata API") + c.log.Debug("Requesting node metadata from metadata API") inst, err := c.metadataAPI.Self(ctx) if err != nil { return err } - c.log.With(zap.Any("instance", inst)).Debugf("Received node metadata") + c.log.With(slog.Any("instance", inst)).Debug("Received node metadata") if inst.Name == "" { return errors.New("got instance metadata with empty name") @@ -371,7 +371,7 @@ func (c *JoinClient) getDiskUUID() (string, error) { func (c *JoinClient) getControlPlaneIPs(ctx context.Context) ([]string, error) { instances, err := c.metadataAPI.List(ctx) if err != nil { - c.log.With(zap.Error(err)).Errorf("Failed to list instances from metadata API") + c.log.With(slog.Any("error", err)).Error("Failed to list instances from metadata API") return nil, fmt.Errorf("listing instances from metadata API: %w", err) } @@ -382,7 +382,7 @@ func (c *JoinClient) getControlPlaneIPs(ctx context.Context) ([]string, error) { } } - c.log.With(zap.Strings("IPs", ips)).Infof("Received control plane endpoints") + c.log.With(slog.Any("IPs", ips)).Info("Received control plane endpoints") return ips, nil } diff --git a/bootstrapper/internal/joinclient/joinclient_test.go b/bootstrapper/internal/joinclient/joinclient_test.go index 82426b0a5a..29516b14b6 100644 --- a/bootstrapper/internal/joinclient/joinclient_test.go +++ b/bootstrapper/internal/joinclient/joinclient_test.go @@ -9,6 +9,7 @@ package joinclient import ( "context" "errors" + "log/slog" "net" "strconv" "sync" @@ -220,7 +221,7 @@ func TestClient(t *testing.T) { fileHandler: fileHandler, metadataAPI: metadataAPI, clock: clock, - log: logger.NewTest(t), + log: slog.New(slog.NewTextHandler(logger.TestWriter{T: t}, nil)), } serverCreds := atlscredentials.New(nil, nil) @@ -275,7 +276,7 @@ func TestClientConcurrentStartStop(t *testing.T) { fileHandler: file.NewHandler(afero.NewMemMapFs()), metadataAPI: &stubRepeaterMetadataAPI{}, clock: testclock.NewFakeClock(time.Now()), - log: logger.NewTest(t), + log: slog.New(slog.NewTextHandler(logger.TestWriter{T: t}, nil)), } wg := sync.WaitGroup{} diff --git a/bootstrapper/internal/kubernetes/k8sapi/k8sutil.go b/bootstrapper/internal/kubernetes/k8sapi/k8sutil.go index 5f00f3e1e3..53f681b493 100644 --- a/bootstrapper/internal/kubernetes/k8sapi/k8sutil.go +++ b/bootstrapper/internal/kubernetes/k8sapi/k8sutil.go @@ -13,6 +13,7 @@ import ( "encoding/pem" "errors" "fmt" + "log/slog" "net" "os" "os/exec" @@ -30,9 +31,7 @@ import ( "github.com/edgelesssys/constellation/v2/internal/crypto" "github.com/edgelesssys/constellation/v2/internal/file" "github.com/edgelesssys/constellation/v2/internal/installer" - "github.com/edgelesssys/constellation/v2/internal/logger" "github.com/spf13/afero" - "go.uber.org/zap" ) const ( @@ -87,7 +86,7 @@ func (k *KubernetesUtil) InstallComponents(ctx context.Context, kubernetesCompon // InitCluster instruments kubeadm to initialize the K8s cluster. // On success an admin kubeconfig file is returned. func (k *KubernetesUtil) InitCluster( - ctx context.Context, initConfig []byte, nodeName, clusterName string, ips []net.IP, conformanceMode bool, log *logger.Logger, + ctx context.Context, initConfig []byte, nodeName, clusterName string, ips []net.IP, conformanceMode bool, log *slog.Logger, ) ([]byte, error) { // TODO(3u13r): audit policy should be user input auditPolicy, err := resources.NewDefaultAuditPolicy().Marshal() @@ -108,7 +107,7 @@ func (k *KubernetesUtil) InitCluster( } // preflight - log.Infof("Running kubeadm preflight checks") + log.Info("Running kubeadm preflight checks") cmd := exec.CommandContext(ctx, constants.KubeadmPath, "init", "phase", "preflight", "-v=5", "--config", initConfigFile.Name()) out, err := cmd.CombinedOutput() if err != nil { @@ -120,7 +119,7 @@ func (k *KubernetesUtil) InitCluster( } // create CA certs - log.Infof("Creating Kubernetes control-plane certificates and keys") + log.Info("Creating Kubernetes control-plane certificates and keys") cmd = exec.CommandContext(ctx, constants.KubeadmPath, "init", "phase", "certs", "all", "-v=5", "--config", initConfigFile.Name()) out, err = cmd.CombinedOutput() if err != nil { @@ -132,19 +131,19 @@ func (k *KubernetesUtil) InitCluster( } // create kubelet key and CA signed certificate for the node - log.Infof("Creating signed kubelet certificate") + log.Info("Creating signed kubelet certificate") if err := k.createSignedKubeletCert(nodeName, ips); err != nil { return nil, fmt.Errorf("creating signed kubelete certificate: %w", err) } // Create static pods directory for all nodes (the Kubelets on the worker nodes also expect the path to exist) - log.Infof("Creating static Pod directory /etc/kubernetes/manifests") + log.Info("Creating static Pod directory /etc/kubernetes/manifests") if err := os.MkdirAll("/etc/kubernetes/manifests", os.ModePerm); err != nil { return nil, fmt.Errorf("creating static pods directory: %w", err) } // initialize the cluster - log.Infof("Initializing the cluster using kubeadm init") + log.Info("Initializing the cluster using kubeadm init") skipPhases := "--skip-phases=preflight,certs" if !conformanceMode { skipPhases += ",addon/kube-proxy" @@ -159,11 +158,11 @@ func (k *KubernetesUtil) InitCluster( } return nil, fmt.Errorf("kubeadm init: %w", err) } - log.With(zap.String("output", string(out))).Infof("kubeadm init succeeded") + log.With(slog.String("output", string(out))).Info("kubeadm init succeeded") userName := clusterName + "-admin" - log.With(zap.String("userName", userName)).Infof("Creating admin kubeconfig file") + log.With(slog.String("userName", userName)).Info("Creating admin kubeconfig file") cmd = exec.CommandContext( ctx, constants.KubeadmPath, "kubeconfig", "user", "--client-name", userName, "--config", initConfigFile.Name(), "--org", user.SystemPrivilegedGroup, @@ -176,12 +175,12 @@ func (k *KubernetesUtil) InitCluster( } return nil, fmt.Errorf("kubeadm kubeconfig user: %w", err) } - log.Infof("kubeadm kubeconfig user succeeded") + log.Info("kubeadm kubeconfig user succeeded") return out, nil } // JoinCluster joins existing Kubernetes cluster using kubeadm join. -func (k *KubernetesUtil) JoinCluster(ctx context.Context, joinConfig []byte, log *logger.Logger) error { +func (k *KubernetesUtil) JoinCluster(ctx context.Context, joinConfig []byte, log *slog.Logger) error { // TODO(3u13r): audit policy should be user input auditPolicy, err := resources.NewDefaultAuditPolicy().Marshal() if err != nil { @@ -201,7 +200,7 @@ func (k *KubernetesUtil) JoinCluster(ctx context.Context, joinConfig []byte, log } // Create static pods directory for all nodes (the Kubelets on the worker nodes also expect the path to exist) - log.Infof("Creating static Pod directory /etc/kubernetes/manifests") + log.Info("Creating static Pod directory /etc/kubernetes/manifests") if err := os.MkdirAll("/etc/kubernetes/manifests", os.ModePerm); err != nil { return fmt.Errorf("creating static pods directory: %w", err) } @@ -216,7 +215,7 @@ func (k *KubernetesUtil) JoinCluster(ctx context.Context, joinConfig []byte, log } return fmt.Errorf("kubeadm join: %w", err) } - log.With(zap.String("output", string(out))).Infof("kubeadm join succeeded") + log.With(slog.String("output", string(out))).Info("kubeadm join succeeded") return nil } diff --git a/bootstrapper/internal/kubernetes/k8sutil.go b/bootstrapper/internal/kubernetes/k8sutil.go index 3c7b55718c..1faf6c3cf6 100644 --- a/bootstrapper/internal/kubernetes/k8sutil.go +++ b/bootstrapper/internal/kubernetes/k8sutil.go @@ -8,15 +8,15 @@ package kubernetes import ( "context" + "log/slog" "net" - "github.com/edgelesssys/constellation/v2/internal/logger" "github.com/edgelesssys/constellation/v2/internal/versions/components" ) type clusterUtil interface { InstallComponents(ctx context.Context, kubernetesComponents components.Components) error - InitCluster(ctx context.Context, initConfig []byte, nodeName, clusterName string, ips []net.IP, conformanceMode bool, log *logger.Logger) ([]byte, error) - JoinCluster(ctx context.Context, joinConfig []byte, log *logger.Logger) error + InitCluster(ctx context.Context, initConfig []byte, nodeName, clusterName string, ips []net.IP, conformanceMode bool, log *slog.Logger) ([]byte, error) + JoinCluster(ctx context.Context, joinConfig []byte, log *slog.Logger) error StartKubelet() error } diff --git a/bootstrapper/internal/kubernetes/kubernetes.go b/bootstrapper/internal/kubernetes/kubernetes.go index ed587d933c..ec365db1b7 100644 --- a/bootstrapper/internal/kubernetes/kubernetes.go +++ b/bootstrapper/internal/kubernetes/kubernetes.go @@ -10,6 +10,7 @@ package kubernetes import ( "context" "fmt" + "log/slog" "net" "regexp" "strings" @@ -20,10 +21,8 @@ import ( "github.com/edgelesssys/constellation/v2/internal/cloud/cloudprovider" "github.com/edgelesssys/constellation/v2/internal/constants" "github.com/edgelesssys/constellation/v2/internal/kubernetes" - "github.com/edgelesssys/constellation/v2/internal/logger" "github.com/edgelesssys/constellation/v2/internal/role" "github.com/edgelesssys/constellation/v2/internal/versions/components" - "go.uber.org/zap" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" kubeadm "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta3" @@ -69,9 +68,9 @@ func New(cloudProvider string, clusterUtil clusterUtil, configProvider configura // InitCluster initializes a new Kubernetes cluster and applies pod network provider. func (k *KubeWrapper) InitCluster( - ctx context.Context, versionString, clusterName string, conformanceMode bool, kubernetesComponents components.Components, apiServerCertSANs []string, serviceCIDR string, log *logger.Logger, + ctx context.Context, versionString, clusterName string, conformanceMode bool, kubernetesComponents components.Components, apiServerCertSANs []string, serviceCIDR string, log *slog.Logger, ) ([]byte, error) { - log.With(zap.String("version", versionString)).Infof("Installing Kubernetes components") + log.With(slog.String("version", versionString)).Info("Installing Kubernetes components") if err := k.clusterUtil.InstallComponents(ctx, kubernetesComponents); err != nil { return nil, err } @@ -79,7 +78,7 @@ func (k *KubeWrapper) InitCluster( var validIPs []net.IP // Step 1: retrieve cloud metadata for Kubernetes configuration - log.Infof("Retrieving node metadata") + log.Info("Retrieving node metadata") instance, err := k.providerMetadata.Self(ctx) if err != nil { return nil, fmt.Errorf("retrieving own instance metadata: %w", err) @@ -108,14 +107,14 @@ func (k *KubeWrapper) InitCluster( certSANs = append(certSANs, apiServerCertSANs...) log.With( - zap.String("nodeName", nodeName), - zap.String("providerID", instance.ProviderID), - zap.String("nodeIP", nodeIP), - zap.String("controlPlaneHost", controlPlaneHost), - zap.String("controlPlanePort", controlPlanePort), - zap.String("certSANs", strings.Join(certSANs, ",")), - zap.String("podCIDR", subnetworkPodCIDR), - ).Infof("Setting information for node") + slog.String("nodeName", nodeName), + slog.String("providerID", instance.ProviderID), + slog.String("nodeIP", nodeIP), + slog.String("controlPlaneHost", controlPlaneHost), + slog.String("controlPlanePort", controlPlanePort), + slog.String("certSANs", strings.Join(certSANs, ",")), + slog.String("podCIDR", subnetworkPodCIDR), + ).Info("Setting information for node") // Step 2: configure kubeadm init config ccmSupported := cloudprovider.FromString(k.cloudProvider) == cloudprovider.Azure || @@ -133,7 +132,7 @@ func (k *KubeWrapper) InitCluster( if err != nil { return nil, fmt.Errorf("encoding kubeadm init configuration as YAML: %w", err) } - log.Infof("Initializing Kubernetes cluster") + log.Info("Initializing Kubernetes cluster") kubeConfig, err := k.clusterUtil.InitCluster(ctx, initConfigYAML, nodeName, clusterName, validIPs, conformanceMode, log) if err != nil { return nil, fmt.Errorf("kubeadm init: %w", err) @@ -186,7 +185,7 @@ func (k *KubeWrapper) InitCluster( } // JoinCluster joins existing Kubernetes cluster. -func (k *KubeWrapper) JoinCluster(ctx context.Context, args *kubeadm.BootstrapTokenDiscovery, peerRole role.Role, k8sComponents components.Components, log *logger.Logger) error { +func (k *KubeWrapper) JoinCluster(ctx context.Context, args *kubeadm.BootstrapTokenDiscovery, peerRole role.Role, k8sComponents components.Components, log *slog.Logger) error { log.With("k8sComponents", k8sComponents).Infof("Installing provided kubernetes components") if err := k.clusterUtil.InstallComponents(ctx, k8sComponents); err != nil { return fmt.Errorf("installing kubernetes components: %w", err) @@ -214,12 +213,12 @@ func (k *KubeWrapper) JoinCluster(ctx context.Context, args *kubeadm.BootstrapTo args.APIServerEndpoint = net.JoinHostPort(loadBalancerHost, loadBalancerPort) log.With( - zap.String("nodeName", nodeName), - zap.String("providerID", providerID), - zap.String("nodeIP", nodeInternalIP), - zap.String("loadBalancerHost", loadBalancerHost), - zap.String("loadBalancerPort", loadBalancerPort), - ).Infof("Setting information for node") + slog.String("nodeName", nodeName), + slog.String("providerID", providerID), + slog.String("nodeIP", nodeInternalIP), + slog.String("loadBalancerHost", loadBalancerHost), + slog.String("loadBalancerPort", loadBalancerPort), + ).Info("Setting information for node") // Step 2: configure kubeadm join config ccmSupported := cloudprovider.FromString(k.cloudProvider) == cloudprovider.Azure || @@ -238,7 +237,7 @@ func (k *KubeWrapper) JoinCluster(ctx context.Context, args *kubeadm.BootstrapTo if err != nil { return fmt.Errorf("encoding kubeadm join configuration as YAML: %w", err) } - log.With(zap.String("apiServerEndpoint", args.APIServerEndpoint)).Infof("Joining Kubernetes cluster") + log.With(slog.String("apiServerEndpoint", args.APIServerEndpoint)).Info("Joining Kubernetes cluster") if err := k.clusterUtil.JoinCluster(ctx, joinConfigYAML, log); err != nil { return fmt.Errorf("joining cluster: %v; %w ", string(joinConfigYAML), err) } diff --git a/bootstrapper/internal/kubernetes/kubernetes_test.go b/bootstrapper/internal/kubernetes/kubernetes_test.go index 4f179f549d..1b5284eb99 100644 --- a/bootstrapper/internal/kubernetes/kubernetes_test.go +++ b/bootstrapper/internal/kubernetes/kubernetes_test.go @@ -9,6 +9,7 @@ package kubernetes import ( "context" "errors" + "log/slog" "net" "strconv" "testing" @@ -187,7 +188,7 @@ func TestInitCluster(t *testing.T) { _, err := kube.InitCluster( context.Background(), string(tc.k8sVersion), "kubernetes", - false, nil, nil, "", logger.NewTest(t), + false, nil, nil, "", slog.New(slog.NewTextHandler(logger.TestWriter{T: t}, nil)), ) if tc.wantErr { @@ -359,7 +360,7 @@ func TestJoinCluster(t *testing.T) { getIPAddr: func() (string, error) { return privateIP, nil }, } - err := kube.JoinCluster(context.Background(), joinCommand, tc.role, tc.k8sComponents, logger.NewTest(t)) + err := kube.JoinCluster(context.Background(), joinCommand, tc.role, tc.k8sComponents, slog.New(slog.NewTextHandler(logger.TestWriter{T: t}, nil))) if tc.wantErr { assert.Error(err) return @@ -440,7 +441,7 @@ func (s *stubClusterUtil) InstallComponents(_ context.Context, _ components.Comp return s.installComponentsErr } -func (s *stubClusterUtil) InitCluster(_ context.Context, initConfig []byte, _, _ string, _ []net.IP, _ bool, _ *logger.Logger) ([]byte, error) { +func (s *stubClusterUtil) InitCluster(_ context.Context, initConfig []byte, _, _ string, _ []net.IP, _ bool, _ *slog.Logger) ([]byte, error) { s.initConfigs = append(s.initConfigs, initConfig) return s.kubeconfig, s.initClusterErr } @@ -465,7 +466,7 @@ func (s *stubClusterUtil) SetupNodeOperator(_ context.Context, _ k8sapi.Client, return s.setupNodeOperatorErr } -func (s *stubClusterUtil) JoinCluster(_ context.Context, joinConfig []byte, _ *logger.Logger) error { +func (s *stubClusterUtil) JoinCluster(_ context.Context, joinConfig []byte, _ *slog.Logger) error { s.joinConfigs = append(s.joinConfigs, joinConfig) return s.joinClusterErr } diff --git a/cli/internal/cmd/apply.go b/cli/internal/cmd/apply.go index 32eaedf211..34ff04a06c 100644 --- a/cli/internal/cmd/apply.go +++ b/cli/internal/cmd/apply.go @@ -212,7 +212,6 @@ func runApply(cmd *cobra.Command, _ []string) error { if err != nil { return fmt.Errorf("creating logger: %w", err) } - defer log.Sync() spinner, err := newSpinnerOrStderr(cmd) if err != nil { return err @@ -396,7 +395,7 @@ func (a *applyCmd) apply( // Apply Attestation Config if !a.flags.skipPhases.contains(skipAttestationConfigPhase) { - a.log.Debugf("Applying new attestation config to cluster") + a.log.Debug("Applying new attestation config to cluster") if err := a.applyJoinConfig(cmd, conf.GetAttestationConfig(), stateFile.ClusterValues.MeasurementSalt); err != nil { return fmt.Errorf("applying attestation config: %w", err) } @@ -443,7 +442,7 @@ func (a *applyCmd) apply( func (a *applyCmd) validateInputs(cmd *cobra.Command, configFetcher attestationconfigapi.Fetcher) (*config.Config, *state.State, error) { // Read user's config and state file - a.log.Debugf("Reading config from %s", a.flags.pathPrefixer.PrefixPrintablePath(constants.ConfigFilename)) + a.log.Debug("Reading config from %s", a.flags.pathPrefixer.PrefixPrintablePath(constants.ConfigFilename)) conf, err := config.New(a.fileHandler, constants.ConfigFilename, configFetcher, a.flags.force) var configValidationErr *config.ValidationError if errors.As(err, &configValidationErr) { @@ -453,7 +452,7 @@ func (a *applyCmd) validateInputs(cmd *cobra.Command, configFetcher attestationc return nil, nil, err } - a.log.Debugf("Reading state file from %s", a.flags.pathPrefixer.PrefixPrintablePath(constants.StateFilename)) + a.log.Debug("Reading state file from %s", a.flags.pathPrefixer.PrefixPrintablePath(constants.StateFilename)) stateFile, err := state.CreateOrRead(a.fileHandler, constants.StateFilename) if err != nil { return nil, nil, err @@ -473,7 +472,7 @@ func (a *applyCmd) validateInputs(cmd *cobra.Command, configFetcher attestationc // in which case the workspace has to be clean if preCreateValidateErr == nil { // We can't skip the infrastructure phase if no infrastructure has been defined - a.log.Debugf("State file is in pre-create state, checking workspace") + a.log.Debug("State file is in pre-create state, checking workspace") if a.flags.skipPhases.contains(skipInfrastructurePhase) { return nil, nil, preInitValidateErr } @@ -482,7 +481,7 @@ func (a *applyCmd) validateInputs(cmd *cobra.Command, configFetcher attestationc return nil, nil, err } - a.log.Debugf("No Terraform state found in current working directory. Preparing to create a new cluster.") + a.log.Debug("No Terraform state found in current working directory. Preparing to create a new cluster.") printCreateWarnings(cmd.ErrOrStderr(), conf) } @@ -491,7 +490,7 @@ func (a *applyCmd) validateInputs(cmd *cobra.Command, configFetcher attestationc // If so, we need to run the init RPC if preInitValidateErr == nil || (preCreateValidateErr == nil && !a.flags.skipPhases.contains(skipInitPhase)) { // We can't skip the init phase if the init RPC hasn't been run yet - a.log.Debugf("State file is in pre-init state, checking workspace") + a.log.Debug("State file is in pre-init state, checking workspace") if a.flags.skipPhases.contains(skipInitPhase) { return nil, nil, postInitValidateErr } @@ -507,7 +506,7 @@ func (a *applyCmd) validateInputs(cmd *cobra.Command, configFetcher attestationc // If the state file is in a post-init state, // we need to make sure specific files exist in the workspace if postInitValidateErr == nil { - a.log.Debugf("State file is in post-init state, checking workspace") + a.log.Debug("State file is in post-init state, checking workspace") if err := a.checkPostInitFilesExist(); err != nil { return nil, nil, err } @@ -522,16 +521,16 @@ func (a *applyCmd) validateInputs(cmd *cobra.Command, configFetcher attestationc // If we need to run the init RPC, the version has to be valid // Otherwise, we are able to use an outdated version, meaning we skip the K8s upgrade // We skip version validation if the user explicitly skips the Kubernetes phase - a.log.Debugf("Validating Kubernetes version %s", conf.KubernetesVersion) + a.log.Debug("Validating Kubernetes version %s", conf.KubernetesVersion) validVersion, err := versions.NewValidK8sVersion(string(conf.KubernetesVersion), true) if err != nil { - a.log.Debugf("Kubernetes version not valid: %s", err) + a.log.Debug("Kubernetes version not valid: %s", err) if !a.flags.skipPhases.contains(skipInitPhase) { return nil, nil, err } if !a.flags.skipPhases.contains(skipK8sPhase) { - a.log.Debugf("Checking if user wants to continue anyway") + a.log.Debug("Checking if user wants to continue anyway") if !a.flags.yes { confirmed, err := askToConfirm(cmd, fmt.Sprintf( @@ -548,7 +547,7 @@ func (a *applyCmd) validateInputs(cmd *cobra.Command, configFetcher attestationc } a.flags.skipPhases.add(skipK8sPhase) - a.log.Debugf("Outdated Kubernetes version accepted, Kubernetes upgrade will be skipped") + a.log.Debug("Outdated Kubernetes version accepted, Kubernetes upgrade will be skipped") } validVersionString, err := versions.ResolveK8sPatchVersion(xsemver.MajorMinor(string(conf.KubernetesVersion))) @@ -564,7 +563,7 @@ func (a *applyCmd) validateInputs(cmd *cobra.Command, configFetcher attestationc cmd.PrintErrf("Warning: Constellation with Kubernetes %s is still in preview. Use only for evaluation purposes.\n", validVersion) } conf.KubernetesVersion = validVersion - a.log.Debugf("Target Kubernetes version set to %s", conf.KubernetesVersion) + a.log.Debug("Target Kubernetes version set to %s", conf.KubernetesVersion) // Validate microservice version (helm versions) in the user's config matches the version of the CLI // This makes sure we catch potential errors early, not just after we already ran Terraform migrations or the init RPC @@ -592,9 +591,9 @@ func (a *applyCmd) applyJoinConfig(cmd *cobra.Command, newConfig config.Attestat ) error { clusterAttestationConfig, err := a.applier.GetClusterAttestationConfig(cmd.Context(), newConfig.GetVariant()) if err != nil { - a.log.Debugf("Getting cluster attestation config failed: %s", err) + a.log.Debug("Getting cluster attestation config failed: %s", err) if k8serrors.IsNotFound(err) { - a.log.Debugf("Creating new join config") + a.log.Debug("Creating new join config") return a.applier.ApplyJoinConfig(cmd.Context(), newConfig, measurementSalt) } return fmt.Errorf("getting cluster attestation config: %w", err) @@ -606,7 +605,7 @@ func (a *applyCmd) applyJoinConfig(cmd *cobra.Command, newConfig config.Attestat return fmt.Errorf("comparing attestation configs: %w", err) } if equal { - a.log.Debugf("Current attestation config is equal to the new config, nothing to do") + a.log.Debug("Current attestation config is equal to the new config, nothing to do") return nil } @@ -685,7 +684,7 @@ func (a *applyCmd) checkCreateFilesClean() error { if err := a.checkInitFilesClean(); err != nil { return err } - a.log.Debugf("Checking Terraform state") + a.log.Debug("Checking Terraform state") if _, err := a.fileHandler.Stat(constants.TerraformWorkingDir); err == nil { return fmt.Errorf( "terraform state %q already exists in working directory, run 'constellation terminate' before creating a new cluster", @@ -700,7 +699,7 @@ func (a *applyCmd) checkCreateFilesClean() error { // checkInitFilesClean ensures that the workspace is clean before running the init RPC. func (a *applyCmd) checkInitFilesClean() error { - a.log.Debugf("Checking admin configuration file") + a.log.Debug("Checking admin configuration file") if _, err := a.fileHandler.Stat(constants.AdminConfFilename); err == nil { return fmt.Errorf( "file %q already exists in working directory, run 'constellation terminate' before creating a new cluster", @@ -709,7 +708,7 @@ func (a *applyCmd) checkInitFilesClean() error { } else if !errors.Is(err, fs.ErrNotExist) { return fmt.Errorf("checking for %q: %w", a.flags.pathPrefixer.PrefixPrintablePath(constants.AdminConfFilename), err) } - a.log.Debugf("Checking master secrets file") + a.log.Debug("Checking master secrets file") if _, err := a.fileHandler.Stat(constants.MasterSecretFilename); err == nil { return fmt.Errorf( "file %q already exists in working directory. Constellation won't overwrite previous master secrets. Move it somewhere or delete it before creating a new cluster", @@ -807,18 +806,18 @@ type warnLogger struct { // Infof messages are reduced to debug messages, since we don't want // the extra info when using the CLI without setting the debug flag. -func (wl warnLogger) Infof(fmtStr string, args ...any) { - wl.log.Debugf(fmtStr, args...) +func (wl warnLogger) Info(fmtStr string, args ...any) { + wl.log.Debug(fmtStr, args...) } // Warnf prints a formatted warning from the validator. -func (wl warnLogger) Warnf(fmtStr string, args ...any) { +func (wl warnLogger) Warn(fmtStr string, args ...any) { wl.cmd.PrintErrf("Warning: %s\n", fmt.Sprintf(fmtStr, args...)) } type warnLog interface { - Warnf(format string, args ...any) - Infof(format string, args ...any) + Warn(format string, args ...any) + Info(format string, args ...any) } // applier is used to run the different phases of the apply command. diff --git a/cli/internal/cmd/apply_test.go b/cli/internal/cmd/apply_test.go index 55f0556697..52cfa1ca56 100644 --- a/cli/internal/cmd/apply_test.go +++ b/cli/internal/cmd/apply_test.go @@ -12,6 +12,7 @@ import ( "errors" "fmt" "io" + "log/slog" "path/filepath" "strings" "testing" @@ -196,7 +197,7 @@ func TestBackupHelmCharts(t *testing.T) { applier: &stubConstellApplier{ stubKubernetesUpgrader: tc.backupClient, }, - log: logger.NewTest(t), + log: slog.New(slog.NewTextHandler(logger.TestWriter{T: t}, nil)), } err := a.backupHelmCharts(context.Background(), tc.helmApplier, tc.includesUpgrades, "") @@ -442,7 +443,7 @@ func TestValidateInputs(t *testing.T) { cmd.SetIn(bytes.NewBufferString(tc.stdin)) a := applyCmd{ - log: logger.NewTest(t), + log: slog.New(slog.NewTextHandler(logger.TestWriter{T: t}, nil)), fileHandler: fileHandler, flags: tc.flags, } diff --git a/cli/internal/cmd/applyhelm.go b/cli/internal/cmd/applyhelm.go index 74e65ff5a0..596ce592e3 100644 --- a/cli/internal/cmd/applyhelm.go +++ b/cli/internal/cmd/applyhelm.go @@ -25,7 +25,7 @@ import ( // runHelmApply handles installing or upgrading helm charts for the cluster. func (a *applyCmd) runHelmApply(cmd *cobra.Command, conf *config.Config, stateFile *state.State, upgradeDir string, ) error { - a.log.Debugf("Installing or upgrading Helm charts") + a.log.Debug("Installing or upgrading Helm charts") var masterSecret uri.MasterSecret if err := a.fileHandler.ReadJSON(constants.MasterSecretFilename, &masterSecret); err != nil { return fmt.Errorf("reading master secret: %w", err) @@ -44,13 +44,13 @@ func (a *applyCmd) runHelmApply(cmd *cobra.Command, conf *config.Config, stateFi AllowDestructive: helm.DenyDestructive, } - a.log.Debugf("Getting service account URI") + a.log.Debug("Getting service account URI") serviceAccURI, err := cloudcmd.GetMarshaledServiceAccountURI(conf, a.fileHandler) if err != nil { return err } - a.log.Debugf("Preparing Helm charts") + a.log.Debug("Preparing Helm charts") executor, includesUpgrades, err := a.applier.PrepareHelmCharts(options, stateFile, serviceAccURI, masterSecret, conf.Provider.OpenStack) if errors.Is(err, helm.ErrConfirmationMissing) { if !a.flags.yes { @@ -75,12 +75,12 @@ func (a *applyCmd) runHelmApply(cmd *cobra.Command, conf *config.Config, stateFi cmd.PrintErrln(err) } - a.log.Debugf("Backing up Helm charts") + a.log.Debug("Backing up Helm charts") if err := a.backupHelmCharts(cmd.Context(), executor, includesUpgrades, upgradeDir); err != nil { return err } - a.log.Debugf("Applying Helm charts") + a.log.Debug("Applying Helm charts") if !a.flags.skipPhases.contains(skipInitPhase) { a.spinner.Start("Installing Kubernetes components ", false) } else { @@ -108,10 +108,10 @@ func (a *applyCmd) backupHelmCharts( if err := executor.SaveCharts(chartDir, a.fileHandler); err != nil { return fmt.Errorf("saving Helm charts to disk: %w", err) } - a.log.Debugf("Helm charts saved to %s", a.flags.pathPrefixer.PrefixPrintablePath(chartDir)) + a.log.Debug("Helm charts saved to %s", a.flags.pathPrefixer.PrefixPrintablePath(chartDir)) if includesUpgrades { - a.log.Debugf("Creating backup of CRDs and CRs") + a.log.Debug("Creating backup of CRDs and CRs") crds, err := a.applier.BackupCRDs(ctx, a.fileHandler, upgradeDir) if err != nil { return fmt.Errorf("creating CRD backup: %w", err) diff --git a/cli/internal/cmd/applyinit.go b/cli/internal/cmd/applyinit.go index 90fa77cdc9..36c6635dc0 100644 --- a/cli/internal/cmd/applyinit.go +++ b/cli/internal/cmd/applyinit.go @@ -29,13 +29,13 @@ import ( // On success, it writes the Kubernetes admin config file to disk. // Therefore it is skipped if the Kubernetes admin config file already exists. func (a *applyCmd) runInit(cmd *cobra.Command, conf *config.Config, stateFile *state.State) (*bytes.Buffer, error) { - a.log.Debugf("Creating aTLS Validator for %s", conf.GetAttestationConfig().GetVariant()) + a.log.Debug("Creating aTLS Validator for %s", conf.GetAttestationConfig().GetVariant()) validator, err := choose.Validator(conf.GetAttestationConfig(), a.wLog) if err != nil { return nil, fmt.Errorf("creating validator: %w", err) } - a.log.Debugf("Running init RPC") + a.log.Debug("Running init RPC") masterSecret, err := a.generateAndPersistMasterSecret(cmd.OutOrStdout()) if err != nil { return nil, fmt.Errorf("generating master secret: %w", err) @@ -74,9 +74,9 @@ func (a *applyCmd) runInit(cmd *cobra.Command, conf *config.Config, stateFile *s } return nil, err } - a.log.Debugf("Initialization request successful") + a.log.Debug("Initialization request successful") - a.log.Debugf("Buffering init success message") + a.log.Debug("Buffering init success message") bufferedOutput := &bytes.Buffer{} if err := a.writeInitOutput(stateFile, resp, a.flags.mergeConfigs, bufferedOutput, measurementSalt); err != nil { return nil, err @@ -121,7 +121,7 @@ func (a *applyCmd) writeInitOutput( if err := a.fileHandler.Write(constants.AdminConfFilename, initResp.Kubeconfig, file.OptNone); err != nil { return fmt.Errorf("writing kubeconfig: %w", err) } - a.log.Debugf("Kubeconfig written to %s", a.flags.pathPrefixer.PrefixPrintablePath(constants.AdminConfFilename)) + a.log.Debug("Kubeconfig written to %s", a.flags.pathPrefixer.PrefixPrintablePath(constants.AdminConfFilename)) if mergeConfig { if err := a.merger.mergeConfigs(constants.AdminConfFilename, a.fileHandler); err != nil { @@ -136,7 +136,7 @@ func (a *applyCmd) writeInitOutput( return fmt.Errorf("writing Constellation state file: %w", err) } - a.log.Debugf("Constellation state file written to %s", a.flags.pathPrefixer.PrefixPrintablePath(constants.StateFilename)) + a.log.Debug("Constellation state file written to %s", a.flags.pathPrefixer.PrefixPrintablePath(constants.StateFilename)) if !mergeConfig { fmt.Fprintln(wr, "You can now connect to your cluster by executing:") diff --git a/cli/internal/cmd/applyterraform.go b/cli/internal/cmd/applyterraform.go index 3dc0048b8e..f3541d2179 100644 --- a/cli/internal/cmd/applyterraform.go +++ b/cli/internal/cmd/applyterraform.go @@ -23,7 +23,7 @@ import ( // runTerraformApply checks if changes to Terraform are required and applies them. func (a *applyCmd) runTerraformApply(cmd *cobra.Command, conf *config.Config, stateFile *state.State, upgradeDir string) error { - a.log.Debugf("Checking if Terraform migrations are required") + a.log.Debug("Checking if Terraform migrations are required") terraformClient, removeClient, err := a.newInfraApplier(cmd.Context()) if err != nil { return fmt.Errorf("creating Terraform client: %w", err) @@ -39,18 +39,18 @@ func (a *applyCmd) runTerraformApply(cmd *cobra.Command, conf *config.Config, st if changesRequired, err := a.planTerraformChanges(cmd, conf, terraformClient); err != nil { return fmt.Errorf("planning Terraform migrations: %w", err) } else if !changesRequired { - a.log.Debugf("No changes to infrastructure required, skipping Terraform migrations") + a.log.Debug("No changes to infrastructure required, skipping Terraform migrations") return nil } - a.log.Debugf("Apply new Terraform resources for infrastructure changes") + a.log.Debug("Apply new Terraform resources for infrastructure changes") newInfraState, err := a.applyTerraformChanges(cmd, conf, terraformClient, upgradeDir, isNewCluster) if err != nil { return err } // Merge the original state with the new infrastructure values - a.log.Debugf("Updating state file with new infrastructure state") + a.log.Debug("Updating state file with new infrastructure state") if _, err := stateFile.Merge( // temporary state with new infrastructure values state.New().SetInfrastructure(newInfraState), @@ -68,7 +68,7 @@ func (a *applyCmd) runTerraformApply(cmd *cobra.Command, conf *config.Config, st // planTerraformChanges checks if any changes to the Terraform state are required. // If no state exists, this function will return true and the caller should create a new state. func (a *applyCmd) planTerraformChanges(cmd *cobra.Command, conf *config.Config, terraformClient cloudApplier) (bool, error) { - a.log.Debugf("Planning Terraform changes") + a.log.Debug("Planning Terraform changes") // Check if there are any Terraform changes to apply @@ -76,7 +76,7 @@ func (a *applyCmd) planTerraformChanges(cmd *cobra.Command, conf *config.Config, // // var manualMigrations []terraform.StateMigration // for _, migration := range manualMigrations { - // u.log.Debugf("Adding manual Terraform migration: %s", migration.DisplayName) + // u.log.Debug("Adding manual Terraform migration: %s", migration.DisplayName) // u.infraApplier.AddManualStateMigration(migration) // } @@ -146,7 +146,7 @@ func (a *applyCmd) applyTerraformChangesWithMessage( return state.Infrastructure{}, errors.New(abortErrorMsg) } } - a.log.Debugf("Applying Terraform changes") + a.log.Debug("Applying Terraform changes") a.spinner.Start(progressMsg, false) infraState, err := terraformClient.Apply(cmd.Context(), csp, attestation, rollbackBehavior) @@ -186,7 +186,7 @@ func printCreateInfo(out io.Writer, conf *config.Config, log debugLog) error { } } if len(otherGroupNames) > 0 { - log.Debugf("Creating %d additional node groups: %v", len(otherGroupNames), otherGroupNames) + log.Debug("Creating %d additional node groups: %v", len(otherGroupNames), otherGroupNames) } fmt.Fprintf(out, "The following Constellation cluster will be created:\n") diff --git a/cli/internal/cmd/configfetchmeasurements.go b/cli/internal/cmd/configfetchmeasurements.go index aaa5b1cf06..f09525dba9 100644 --- a/cli/internal/cmd/configfetchmeasurements.go +++ b/cli/internal/cmd/configfetchmeasurements.go @@ -93,7 +93,6 @@ func runConfigFetchMeasurements(cmd *cobra.Command, _ []string) error { if err != nil { return fmt.Errorf("creating logger: %w", err) } - defer log.Sync() fileHandler := file.NewHandler(afero.NewOsFs()) rekor, err := sigstore.NewRekor() if err != nil { @@ -105,7 +104,7 @@ func runConfigFetchMeasurements(cmd *cobra.Command, _ []string) error { if err := cfm.flags.parse(cmd.Flags()); err != nil { return fmt.Errorf("parsing flags: %w", err) } - cfm.log.Debugf("Using flags %+v", cfm.flags) + cfm.log.Debug("Using flags %+v", cfm.flags) fetcher := attestationconfigapi.NewFetcherWithClient(http.DefaultClient, constants.CDNRepositoryURL) return cfm.configFetchMeasurements(cmd, fileHandler, fetcher) @@ -119,7 +118,7 @@ func (cfm *configFetchMeasurementsCmd) configFetchMeasurements( return errors.New("fetching measurements is not supported") } - cfm.log.Debugf("Loading configuration file from %q", cfm.flags.pathPrefixer.PrefixPrintablePath(constants.ConfigFilename)) + cfm.log.Debug("Loading configuration file from %q", cfm.flags.pathPrefixer.PrefixPrintablePath(constants.ConfigFilename)) conf, err := config.New(fileHandler, constants.ConfigFilename, fetcher, cfm.flags.force) var configValidationErr *config.ValidationError @@ -134,11 +133,11 @@ func (cfm *configFetchMeasurementsCmd) configFetchMeasurements( cmd.PrintErrln("Configured image doesn't look like a released production image. Double check image before deploying to production.") } - cfm.log.Debugf("Creating context") + cfm.log.Debug("Creating context") ctx, cancel := context.WithTimeout(context.Background(), time.Minute) defer cancel() - cfm.log.Debugf("Updating URLs") + cfm.log.Debug("Updating URLs") if err := cfm.flags.updateURLs(conf); err != nil { return err } @@ -155,12 +154,12 @@ func (cfm *configFetchMeasurementsCmd) configFetchMeasurements( } cfm.log.Debugf("Measurements: %#v\n", fetchedMeasurements) - cfm.log.Debugf("Updating measurements in configuration") + cfm.log.Debug("Updating measurements in configuration") conf.UpdateMeasurements(fetchedMeasurements) if err := fileHandler.WriteYAML(constants.ConfigFilename, conf, file.OptOverwrite); err != nil { return err } - cfm.log.Debugf("Configuration written to %s", cfm.flags.pathPrefixer.PrefixPrintablePath(constants.ConfigFilename)) + cfm.log.Debug("Configuration written to %s", cfm.flags.pathPrefixer.PrefixPrintablePath(constants.ConfigFilename)) cmd.Print("Successfully fetched measurements and updated Configuration\n") return nil } diff --git a/cli/internal/cmd/configfetchmeasurements_test.go b/cli/internal/cmd/configfetchmeasurements_test.go index 9cebbb7dac..84c2bbd160 100644 --- a/cli/internal/cmd/configfetchmeasurements_test.go +++ b/cli/internal/cmd/configfetchmeasurements_test.go @@ -8,6 +8,7 @@ package cmd import ( "context" + "log/slog" "net/http" "net/url" "testing" @@ -180,7 +181,7 @@ func TestConfigFetchMeasurements(t *testing.T) { err := fileHandler.WriteYAML(constants.ConfigFilename, gcpConfig, file.OptMkdirAll) require.NoError(err) fetcher := stubVerifyFetcher{err: tc.err} - cfm := &configFetchMeasurementsCmd{canFetchMeasurements: true, log: logger.NewTest(t), verifyFetcher: fetcher} + cfm := &configFetchMeasurementsCmd{canFetchMeasurements: true, log: slog.New(slog.NewTextHandler(logger.TestWriter{T: t}, nil)), verifyFetcher: fetcher} cfm.flags.insecure = tc.insecureFlag cfm.flags.force = true diff --git a/cli/internal/cmd/configgenerate_test.go b/cli/internal/cmd/configgenerate_test.go index d3ffbff189..c65173a901 100644 --- a/cli/internal/cmd/configgenerate_test.go +++ b/cli/internal/cmd/configgenerate_test.go @@ -8,6 +8,7 @@ package cmd import ( "fmt" + "log/slog" "strings" "testing" @@ -92,7 +93,7 @@ func TestConfigGenerateDefault(t *testing.T) { cmd := newConfigGenerateCmd() cg := &configGenerateCmd{ - log: logger.NewTest(t), + log: slog.New(slog.NewTextHandler(logger.TestWriter{T: t}, nil)), flags: generateFlags{ attestationVariant: variant.Dummy{}, k8sVersion: versions.Default, @@ -144,7 +145,7 @@ func TestConfigGenerateDefaultProviderSpecific(t *testing.T) { wantConf.RemoveProviderAndAttestationExcept(tc.provider) cg := &configGenerateCmd{ - log: logger.NewTest(t), + log: slog.New(slog.NewTextHandler(logger.TestWriter{T: t}, nil)), flags: generateFlags{ attestationVariant: variant.Dummy{}, k8sVersion: versions.Default, @@ -177,7 +178,7 @@ func TestConfigGenerateDefaultExists(t *testing.T) { cmd := newConfigGenerateCmd() cg := &configGenerateCmd{ - log: logger.NewTest(t), + log: slog.New(slog.NewTextHandler(logger.TestWriter{T: t}, nil)), flags: generateFlags{attestationVariant: variant.Dummy{}}, } require.Error(cg.configGenerate(cmd, fileHandler, cloudprovider.Unknown, "")) diff --git a/cli/internal/cmd/create_test.go b/cli/internal/cmd/create_test.go index fe3119ee38..7eddbae57c 100644 --- a/cli/internal/cmd/create_test.go +++ b/cli/internal/cmd/create_test.go @@ -9,6 +9,7 @@ package cmd import ( "bytes" "context" + "log/slog" "testing" "github.com/edgelesssys/constellation/v2/internal/cloud/cloudprovider" @@ -227,7 +228,7 @@ func TestCreate(t *testing.T) { skipPhases: newPhases(skipInitPhase, skipAttestationConfigPhase, skipCertSANsPhase, skipHelmPhase, skipImagePhase, skipK8sPhase), }, - log: logger.NewTest(t), + log: slog.New(slog.NewTextHandler(logger.TestWriter{T: t}, nil)), spinner: &nopSpinner{}, newInfraApplier: func(_ context.Context) (cloudApplier, func(), error) { @@ -295,7 +296,7 @@ func TestCheckDirClean(t *testing.T) { for _, f := range tc.existingFiles { require.NoError(fh.Write(f, []byte{1, 2, 3}, file.OptNone)) } - a := &applyCmd{log: logger.NewTest(t), fileHandler: fh} + a := &applyCmd{log: slog.New(slog.NewTextHandler(logger.TestWriter{T: t}, nil)), fileHandler: fh} err := a.checkInitFilesClean() if tc.wantErr { diff --git a/cli/internal/cmd/iamcreate.go b/cli/internal/cmd/iamcreate.go index 337bf5f863..17e4ff4ed6 100644 --- a/cli/internal/cmd/iamcreate.go +++ b/cli/internal/cmd/iamcreate.go @@ -82,7 +82,6 @@ func runIAMCreate(cmd *cobra.Command, providerCreator providerIAMCreator, provid if err != nil { return fmt.Errorf("creating logger: %w", err) } - defer log.Sync() iamCreator := &iamCreator{ cmd: cmd, @@ -134,7 +133,7 @@ func (c *iamCreator) create(ctx context.Context) error { var conf config.Config if c.flags.updateConfig { - c.log.Debugf("Parsing config %s", c.flags.pathPrefixer.PrefixPrintablePath(constants.ConfigFilename)) + c.log.Debug("Parsing config %s", c.flags.pathPrefixer.PrefixPrintablePath(constants.ConfigFilename)) if err := c.fileHandler.ReadYAML(constants.ConfigFilename, &conf); err != nil { return fmt.Errorf("error reading the configuration file: %w", err) } @@ -154,7 +153,7 @@ func (c *iamCreator) create(ctx context.Context) error { return err } c.cmd.Println() // Print empty line to separate after spinner ended. - c.log.Debugf("Successfully created the IAM cloud resources") + c.log.Debug("Successfully created the IAM cloud resources") err = c.providerCreator.parseAndWriteIDFile(iamFile, c.fileHandler) if err != nil { @@ -162,7 +161,7 @@ func (c *iamCreator) create(ctx context.Context) error { } if c.flags.updateConfig { - c.log.Debugf("Writing IAM configuration to %s", c.flags.pathPrefixer.PrefixPrintablePath(constants.ConfigFilename)) + c.log.Debug("Writing IAM configuration to %s", c.flags.pathPrefixer.PrefixPrintablePath(constants.ConfigFilename)) c.providerCreator.writeOutputValuesToConfig(&conf, iamFile) if err := c.fileHandler.WriteYAML(constants.ConfigFilename, conf, file.OptOverwrite); err != nil { return err diff --git a/cli/internal/cmd/iamcreate_test.go b/cli/internal/cmd/iamcreate_test.go index 19aba50dbf..71cd0d1c94 100644 --- a/cli/internal/cmd/iamcreate_test.go +++ b/cli/internal/cmd/iamcreate_test.go @@ -8,6 +8,7 @@ package cmd import ( "bytes" "encoding/base64" + "log/slog" "strings" "testing" @@ -209,7 +210,7 @@ func TestIAMCreateAWS(t *testing.T) { iamCreator := &iamCreator{ cmd: cmd, - log: logger.NewTest(t), + log: slog.New(slog.NewTextHandler(logger.TestWriter{T: t}, nil)), spinner: &nopSpinner{}, creator: tc.creator, fileHandler: fileHandler, @@ -385,7 +386,7 @@ func TestIAMCreateAzure(t *testing.T) { iamCreator := &iamCreator{ cmd: cmd, - log: logger.NewTest(t), + log: slog.New(slog.NewTextHandler(logger.TestWriter{T: t}, nil)), spinner: &nopSpinner{}, creator: tc.creator, fileHandler: fileHandler, @@ -576,7 +577,7 @@ func TestIAMCreateGCP(t *testing.T) { iamCreator := &iamCreator{ cmd: cmd, - log: logger.NewTest(t), + log: slog.New(slog.NewTextHandler(logger.TestWriter{T: t}, nil)), spinner: &nopSpinner{}, creator: tc.creator, fileHandler: fileHandler, diff --git a/cli/internal/cmd/iamdestroy.go b/cli/internal/cmd/iamdestroy.go index 667218b810..5a4e3d8f63 100644 --- a/cli/internal/cmd/iamdestroy.go +++ b/cli/internal/cmd/iamdestroy.go @@ -58,7 +58,6 @@ func runIAMDestroy(cmd *cobra.Command, _ []string) error { if err != nil { return fmt.Errorf("creating logger: %w", err) } - defer log.Sync() spinner := newSpinner(cmd.ErrOrStderr()) destroyer := cloudcmd.NewIAMDestroyer() fsHandler := file.NewHandler(afero.NewOsFs()) @@ -78,25 +77,25 @@ type destroyCmd struct { func (c *destroyCmd) iamDestroy(cmd *cobra.Command, spinner spinnerInterf, destroyer iamDestroyer, fsHandler file.Handler) error { // check if there is a possibility that the cluster is still running by looking out for specific files - c.log.Debugf("Checking if %q exists", c.flags.pathPrefixer.PrefixPrintablePath(constants.AdminConfFilename)) + c.log.Debug("Checking if %q exists", c.flags.pathPrefixer.PrefixPrintablePath(constants.AdminConfFilename)) if _, err := fsHandler.Stat(constants.AdminConfFilename); !errors.Is(err, os.ErrNotExist) { return fmt.Errorf("file %q still exists, please make sure to terminate your cluster before destroying your IAM configuration", c.flags.pathPrefixer.PrefixPrintablePath(constants.AdminConfFilename)) } - c.log.Debugf("Checking if %q exists", c.flags.pathPrefixer.PrefixPrintablePath(constants.StateFilename)) + c.log.Debug("Checking if %q exists", c.flags.pathPrefixer.PrefixPrintablePath(constants.StateFilename)) if _, err := fsHandler.Stat(constants.StateFilename); !errors.Is(err, os.ErrNotExist) { return fmt.Errorf("file %q still exists, please make sure to terminate your cluster before destroying your IAM configuration", c.flags.pathPrefixer.PrefixPrintablePath(constants.StateFilename)) } gcpFileExists := false - c.log.Debugf("Checking if %q exists", c.flags.pathPrefixer.PrefixPrintablePath(constants.GCPServiceAccountKeyFilename)) + c.log.Debug("Checking if %q exists", c.flags.pathPrefixer.PrefixPrintablePath(constants.GCPServiceAccountKeyFilename)) if _, err := fsHandler.Stat(constants.GCPServiceAccountKeyFilename); err != nil { if !errors.Is(err, os.ErrNotExist) { return err } } else { - c.log.Debugf("%q exists", c.flags.pathPrefixer.PrefixPrintablePath(constants.GCPServiceAccountKeyFilename)) + c.log.Debug("%q exists", c.flags.pathPrefixer.PrefixPrintablePath(constants.GCPServiceAccountKeyFilename)) gcpFileExists = true } @@ -117,7 +116,7 @@ func (c *destroyCmd) iamDestroy(cmd *cobra.Command, spinner spinnerInterf, destr } if gcpFileExists { - c.log.Debugf("Starting to delete %q", c.flags.pathPrefixer.PrefixPrintablePath(constants.GCPServiceAccountKeyFilename)) + c.log.Debug("Starting to delete %q", c.flags.pathPrefixer.PrefixPrintablePath(constants.GCPServiceAccountKeyFilename)) proceed, err := c.deleteGCPServiceAccountKeyFile(cmd, destroyer, fsHandler) if err != nil { return err @@ -128,7 +127,7 @@ func (c *destroyCmd) iamDestroy(cmd *cobra.Command, spinner spinnerInterf, destr } } - c.log.Debugf("Starting to destroy IAM configuration") + c.log.Debug("Starting to destroy IAM configuration") spinner.Start("Destroying IAM configuration", false) defer spinner.Stop() @@ -144,18 +143,18 @@ func (c *destroyCmd) iamDestroy(cmd *cobra.Command, spinner spinnerInterf, destr func (c *destroyCmd) deleteGCPServiceAccountKeyFile(cmd *cobra.Command, destroyer iamDestroyer, fsHandler file.Handler) (bool, error) { var fileSaKey gcpshared.ServiceAccountKey - c.log.Debugf("Parsing %q", c.flags.pathPrefixer.PrefixPrintablePath(constants.GCPServiceAccountKeyFilename)) + c.log.Debug("Parsing %q", c.flags.pathPrefixer.PrefixPrintablePath(constants.GCPServiceAccountKeyFilename)) if err := fsHandler.ReadJSON(constants.GCPServiceAccountKeyFilename, &fileSaKey); err != nil { return false, err } - c.log.Debugf("Getting service account key from the tfstate") + c.log.Debug("Getting service account key from the tfstate") tfSaKey, err := destroyer.GetTfStateServiceAccountKey(cmd.Context(), constants.TerraformIAMWorkingDir) if err != nil { return false, err } - c.log.Debugf("Checking if keys are the same") + c.log.Debug("Checking if keys are the same") if tfSaKey != fileSaKey { cmd.Printf( "The key in %q don't match up with your Terraform state. %q will not be deleted.\n", @@ -169,6 +168,6 @@ func (c *destroyCmd) deleteGCPServiceAccountKeyFile(cmd *cobra.Command, destroye return false, err } - c.log.Debugf("Successfully deleted %q", c.flags.pathPrefixer.PrefixPrintablePath(constants.GCPServiceAccountKeyFilename)) + c.log.Debug("Successfully deleted %q", c.flags.pathPrefixer.PrefixPrintablePath(constants.GCPServiceAccountKeyFilename)) return true, nil } diff --git a/cli/internal/cmd/iamdestroy_test.go b/cli/internal/cmd/iamdestroy_test.go index e6dd4feb2e..8a1adc56b8 100644 --- a/cli/internal/cmd/iamdestroy_test.go +++ b/cli/internal/cmd/iamdestroy_test.go @@ -7,6 +7,7 @@ package cmd import ( "bytes" "errors" + "log/slog" "testing" "github.com/edgelesssys/constellation/v2/internal/cloud/gcpshared" @@ -106,7 +107,7 @@ func TestIAMDestroy(t *testing.T) { cmd.SetErr(&bytes.Buffer{}) cmd.SetIn(bytes.NewBufferString(tc.stdin)) - c := &destroyCmd{log: logger.NewTest(t), flags: iamDestroyFlags{ + c := &destroyCmd{log: slog.New(slog.NewTextHandler(logger.TestWriter{T: t}, nil)), flags: iamDestroyFlags{ yes: tc.yesFlag, }} @@ -196,7 +197,7 @@ func TestDeleteGCPServiceAccountKeyFile(t *testing.T) { cmd.SetErr(&bytes.Buffer{}) cmd.SetIn(bytes.NewBufferString(tc.stdin)) - c := &destroyCmd{log: logger.NewTest(t)} + c := &destroyCmd{log: slog.New(slog.NewTextHandler(logger.TestWriter{T: t}, nil))} proceed, err := c.deleteGCPServiceAccountKeyFile(cmd, tc.destroyer, tc.fsHandler) if tc.wantErr { diff --git a/cli/internal/cmd/iamupgradeapply.go b/cli/internal/cmd/iamupgradeapply.go index 7f1e98544d..0a3485d279 100644 --- a/cli/internal/cmd/iamupgradeapply.go +++ b/cli/internal/cmd/iamupgradeapply.go @@ -149,7 +149,7 @@ func (i iamUpgradeApplyCmd) iamUpgradeApply(cmd *cobra.Command, iamUpgrader iamU return errors.New("IAM upgrade aborted by user") } } - i.log.Debugf("Applying Terraform IAM migrations") + i.log.Debug("Applying Terraform IAM migrations") if err := iamUpgrader.ApplyIAMUpgrade(cmd.Context(), conf.GetProvider()); err != nil { return fmt.Errorf("applying terraform migrations: %w", err) } diff --git a/cli/internal/cmd/iamupgradeapply_test.go b/cli/internal/cmd/iamupgradeapply_test.go index e1d4c19ce9..affa738ec3 100644 --- a/cli/internal/cmd/iamupgradeapply_test.go +++ b/cli/internal/cmd/iamupgradeapply_test.go @@ -8,6 +8,7 @@ package cmd import ( "context" "io" + "log/slog" "path/filepath" "strings" "testing" @@ -131,7 +132,7 @@ func TestIamUpgradeApply(t *testing.T) { iamUpgradeApplyCmd := &iamUpgradeApplyCmd{ fileHandler: tc.fh, - log: logger.NewTest(t), + log: slog.New(slog.NewTextHandler(logger.TestWriter{T: t}, nil)), configFetcher: tc.configFetcher, flags: iamUpgradeApplyFlags{ yes: tc.yesFlag, diff --git a/cli/internal/cmd/init.go b/cli/internal/cmd/init.go index 166195d492..157354b404 100644 --- a/cli/internal/cmd/init.go +++ b/cli/internal/cmd/init.go @@ -72,7 +72,7 @@ func (c *kubeconfigMerger) mergeConfigs(configPath string, fileHandler file.Hand clientcmd.RecommendedHomeFile, configPath, // our config should overwrite the default config } - c.log.Debugf("Kubeconfig file loading precedence: %v", loadingRules.Precedence) + c.log.Debug("Kubeconfig file loading precedence: %v", loadingRules.Precedence) // merge the kubeconfigs cfg, err := loadingRules.Load() @@ -82,7 +82,7 @@ func (c *kubeconfigMerger) mergeConfigs(configPath string, fileHandler file.Hand // Set the current context to the cluster we just created cfg.CurrentContext = constellConfig.CurrentContext - c.log.Debugf("Set current context to %s", cfg.CurrentContext) + c.log.Debug("Set current context to %s", cfg.CurrentContext) json, err := runtime.Encode(clientcodec.Codec, cfg) if err != nil { @@ -97,7 +97,7 @@ func (c *kubeconfigMerger) mergeConfigs(configPath string, fileHandler file.Hand if err := fileHandler.Write(clientcmd.RecommendedHomeFile, mergedKubeconfig, file.OptOverwrite); err != nil { return fmt.Errorf("writing merged kubeconfig to file: %w", err) } - c.log.Debugf("Merged kubeconfig into default config file: %s", clientcmd.RecommendedHomeFile) + c.log.Debug("Merged kubeconfig into default config file: %s", clientcmd.RecommendedHomeFile) return nil } diff --git a/cli/internal/cmd/init_test.go b/cli/internal/cmd/init_test.go index de6278d660..b0f5f8a4ed 100644 --- a/cli/internal/cmd/init_test.go +++ b/cli/internal/cmd/init_test.go @@ -13,6 +13,7 @@ import ( "strings" "testing" "time" + "log/slog" "github.com/edgelesssys/constellation/v2/bootstrapper/initproto" "github.com/edgelesssys/constellation/v2/cli/internal/cmd/pathprefix" @@ -228,7 +229,7 @@ func TestInitialize(t *testing.T) { rootFlags: rootFlags{force: true}, skipPhases: newPhases(skipInfrastructurePhase), }, - log: logger.NewTest(t), + log: slog.New(slog.NewTextHandler(logger.TestWriter{T: t}, nil)), spinner: &nopSpinner{}, merger: &stubMerger{}, applier: &stubConstellApplier{ @@ -368,8 +369,8 @@ func TestWriteOutput(t *testing.T) { fileHandler: fileHandler, spinner: &nopSpinner{}, merger: &stubMerger{}, - log: logger.NewTest(t), - applier: constellation.NewApplier(logger.NewTest(t), &nopSpinner{}, constellation.ApplyContextCLI, nil), + log: slog.New(slog.NewTextHandler(logger.TestWriter{T: t}, nil)), + applier: constellation.NewApplier(slog.New(slog.NewTextHandler(logger.TestWriter{T: t}, nil)), &nopSpinner{}, constellation.ApplyContextCLI, nil), } err = i.writeInitOutput(stateFile, initOutput, false, &out, measurementSalt) require.NoError(err) @@ -460,8 +461,8 @@ func TestGenerateMasterSecret(t *testing.T) { var out bytes.Buffer i := &applyCmd{ fileHandler: fileHandler, - log: logger.NewTest(t), - applier: constellation.NewApplier(logger.NewTest(t), &nopSpinner{}, constellation.ApplyContextCLI, nil), + log: slog.New(slog.NewTextHandler(logger.TestWriter{T: t}, nil)), + applier: constellation.NewApplier(slog.New(slog.NewTextHandler(logger.TestWriter{T: t}, nil)), &nopSpinner{}, constellation.ApplyContextCLI, nil), } secret, err := i.generateAndPersistMasterSecret(&out) diff --git a/cli/internal/cmd/log.go b/cli/internal/cmd/log.go index 463e1f5b6d..cd0c6008db 100644 --- a/cli/internal/cmd/log.go +++ b/cli/internal/cmd/log.go @@ -7,25 +7,25 @@ SPDX-License-Identifier: AGPL-3.0-only package cmd import ( - "github.com/edgelesssys/constellation/v2/internal/logger" + "log/slog" + "os" + "github.com/spf13/cobra" - "go.uber.org/zap/zapcore" ) type debugLog interface { - Debugf(format string, args ...any) - Sync() + Debug(format string, args ...any) } func newCLILogger(cmd *cobra.Command) (debugLog, error) { - logLvl := zapcore.InfoLevel + logLvl := slog.LevelInfo debugLog, err := cmd.Flags().GetBool("debug") if err != nil { return nil, err } if debugLog { - logLvl = zapcore.DebugLevel + logLvl = slog.LevelDebug } - return logger.New(logger.PlainLog, logLvl), nil + return slog.New(slog.NewTextHandler(os.Stdout, &slog.HandlerOptions{Level: logLvl})), nil } diff --git a/cli/internal/cmd/maapatch.go b/cli/internal/cmd/maapatch.go index 7db1abb5c5..f29aea2ce0 100644 --- a/cli/internal/cmd/maapatch.go +++ b/cli/internal/cmd/maapatch.go @@ -47,7 +47,6 @@ func runPatchMAA(cmd *cobra.Command, args []string) error { if err != nil { return fmt.Errorf("creating logger: %w", err) } - defer log.Sync() p := maa.NewAzurePolicyPatcher() @@ -57,7 +56,7 @@ func runPatchMAA(cmd *cobra.Command, args []string) error { } func (c *maaPatchCmd) patchMAA(cmd *cobra.Command, attestationURL string) error { - c.log.Debugf("Using attestation URL %s", attestationURL) + c.log.Debug("Using attestation URL %s", attestationURL) if err := c.patcher.Patch(cmd.Context(), attestationURL); err != nil { return fmt.Errorf("patching MAA attestation policy: %w", err) diff --git a/cli/internal/cmd/maapatch_test.go b/cli/internal/cmd/maapatch_test.go index bbd0e165fa..925a03e0b1 100644 --- a/cli/internal/cmd/maapatch_test.go +++ b/cli/internal/cmd/maapatch_test.go @@ -8,6 +8,7 @@ package cmd import ( "context" + "log/slog" "testing" "github.com/edgelesssys/constellation/v2/internal/logger" @@ -38,7 +39,7 @@ func TestMAAPatch(t *testing.T) { t.Run(name, func(t *testing.T) { require := require.New(t) - c := &maaPatchCmd{log: logger.NewTest(t), patcher: tc.patcher} + c := &maaPatchCmd{log: slog.New(slog.NewTextHandler(logger.TestWriter{T: t}, nil)), patcher: tc.patcher} err := c.patchMAA(&cobra.Command{}, tc.attestationURL) if tc.wantErr { require.Error(err) diff --git a/cli/internal/cmd/miniup.go b/cli/internal/cmd/miniup.go index ffe254e908..dfd297d934 100644 --- a/cli/internal/cmd/miniup.go +++ b/cli/internal/cmd/miniup.go @@ -50,7 +50,6 @@ func runUp(cmd *cobra.Command, _ []string) error { if err != nil { return fmt.Errorf("creating logger: %w", err) } - defer log.Sync() m := &miniUpCmd{ log: log, @@ -152,7 +151,7 @@ func (m *miniUpCmd) prepareConfig(cmd *cobra.Command) (*config.Config, error) { if err != nil { return nil, fmt.Errorf("mini default config is invalid: %v", err) } - m.log.Debugf("Prepared configuration") + m.log.Debug("Prepared configuration") return config, m.fileHandler.WriteYAML(constants.ConfigFilename, config, file.OptOverwrite) } diff --git a/cli/internal/cmd/miniup_linux_amd64.go b/cli/internal/cmd/miniup_linux_amd64.go index a28fe1486f..c059851a12 100644 --- a/cli/internal/cmd/miniup_linux_amd64.go +++ b/cli/internal/cmd/miniup_linux_amd64.go @@ -32,12 +32,12 @@ func (m *miniUpCmd) checkSystemRequirements(out io.Writer) error { return fmt.Errorf("creation of a QEMU based Constellation is not supported for %s/%s, a linux/amd64 platform is required", runtime.GOOS, runtime.GOARCH) } - m.log.Debugf("Checked arch and os") + m.log.Debug("Checked arch and os") // check if /dev/kvm exists if _, err := os.Stat("/dev/kvm"); err != nil { return fmt.Errorf("unable to access KVM device: %w", err) } - m.log.Debugf("Checked that /dev/kvm exists") + m.log.Debug("Checked that /dev/kvm exists") // check CPU cores if runtime.NumCPU() < 4 { return fmt.Errorf("insufficient CPU cores: %d, at least 4 cores are required by MiniConstellation", runtime.NumCPU()) @@ -45,7 +45,7 @@ func (m *miniUpCmd) checkSystemRequirements(out io.Writer) error { if runtime.NumCPU() < 6 { fmt.Fprintf(out, "WARNING: Only %d CPU cores available. This may cause performance issues.\n", runtime.NumCPU()) } - m.log.Debugf("Checked CPU cores - there are %d", runtime.NumCPU()) + m.log.Debug("Checked CPU cores - there are %d", runtime.NumCPU()) // check memory f, err := os.Open("/proc/meminfo") @@ -63,7 +63,7 @@ func (m *miniUpCmd) checkSystemRequirements(out io.Writer) error { } } } - m.log.Debugf("Scanned for available memory") + m.log.Debug("Scanned for available memory") memGB := memKB / 1024 / 1024 if memGB < 4 { return fmt.Errorf("insufficient memory: %dGB, at least 4GB of memory are required by MiniConstellation", memGB) @@ -71,7 +71,7 @@ func (m *miniUpCmd) checkSystemRequirements(out io.Writer) error { if memGB < 6 { fmt.Fprintln(out, "WARNING: Less than 6GB of memory available. This may cause performance issues.") } - m.log.Debugf("Checked available memory, you have %dGB available", memGB) + m.log.Debug("Checked available memory, you have %dGB available", memGB) var stat unix.Statfs_t if err := unix.Statfs(".", &stat); err != nil { @@ -81,7 +81,7 @@ func (m *miniUpCmd) checkSystemRequirements(out io.Writer) error { if freeSpaceGB < 20 { return fmt.Errorf("insufficient disk space: %dGB, at least 20GB of disk space are required by MiniConstellation", freeSpaceGB) } - m.log.Debugf("Checked for free space available, you have %dGB available", freeSpaceGB) + m.log.Debug("Checked for free space available, you have %dGB available", freeSpaceGB) return nil } diff --git a/cli/internal/cmd/recover.go b/cli/internal/cmd/recover.go index d66aa69b3a..4084ad11db 100644 --- a/cli/internal/cmd/recover.go +++ b/cli/internal/cmd/recover.go @@ -76,7 +76,6 @@ func runRecover(cmd *cobra.Command, _ []string) error { if err != nil { return fmt.Errorf("creating logger: %w", err) } - defer log.Sync() fileHandler := file.NewHandler(afero.NewOsFs()) newDialer := func(validator atls.Validator) *dialer.Dialer { return dialer.New(nil, validator, &net.Dialer{}) @@ -85,7 +84,7 @@ func runRecover(cmd *cobra.Command, _ []string) error { if err := r.flags.parse(cmd.Flags()); err != nil { return err } - r.log.Debugf("Using flags: %+v", r.flags) + r.log.Debug("Using flags: %+v", r.flags) return r.recover(cmd, fileHandler, 5*time.Second, &recoverDoer{log: r.log}, newDialer) } @@ -94,12 +93,12 @@ func (r *recoverCmd) recover( doer recoverDoerInterface, newDialer func(validator atls.Validator) *dialer.Dialer, ) error { var masterSecret uri.MasterSecret - r.log.Debugf("Loading master secret file from %s", r.flags.pathPrefixer.PrefixPrintablePath(constants.MasterSecretFilename)) + r.log.Debug("Loading master secret file from %s", r.flags.pathPrefixer.PrefixPrintablePath(constants.MasterSecretFilename)) if err := fileHandler.ReadJSON(constants.MasterSecretFilename, &masterSecret); err != nil { return err } - r.log.Debugf("Loading configuration file from %q", r.flags.pathPrefixer.PrefixPrintablePath(constants.ConfigFilename)) + r.log.Debug("Loading configuration file from %q", r.flags.pathPrefixer.PrefixPrintablePath(constants.ConfigFilename)) conf, err := config.New(fileHandler, constants.ConfigFilename, r.configFetcher, r.flags.force) var configValidationErr *config.ValidationError if errors.As(err, &configValidationErr) { @@ -130,16 +129,16 @@ func (r *recoverCmd) recover( conf.UpdateMAAURL(stateFile.Infrastructure.Azure.AttestationURL) } - r.log.Debugf("Creating aTLS Validator for %s", conf.GetAttestationConfig().GetVariant()) + r.log.Debug("Creating aTLS Validator for %s", conf.GetAttestationConfig().GetVariant()) validator, err := choose.Validator(conf.GetAttestationConfig(), warnLogger{cmd: cmd, log: r.log}) if err != nil { return fmt.Errorf("creating new validator: %w", err) } - r.log.Debugf("Created a new validator") + r.log.Debug("Created a new validator") doer.setDialer(newDialer(validator), endpoint) - r.log.Debugf("Set dialer for endpoint %s", endpoint) + r.log.Debug("Set dialer for endpoint %s", endpoint) doer.setURIs(masterSecret.EncodeToURI(), uri.NoStoreURI) - r.log.Debugf("Set secrets") + r.log.Debug("Set secrets") if err := r.recoverCall(cmd.Context(), cmd.OutOrStdout(), interval, doer); err != nil { if grpcRetry.ServiceIsUnavailable(err) { return nil @@ -167,12 +166,12 @@ func (r *recoverCmd) recoverCall(ctx context.Context, out io.Writer, interval ti }) } - r.log.Debugf("Encountered error (retriable: %t): %s", retry, err) + r.log.Debug("Encountered error (retriable: %t): %s", retry, err) return retry } retrier := retry.NewIntervalRetrier(doer, interval, retryOnceOnFailure) - r.log.Debugf("Created new interval retrier") + r.log.Debug("Created new interval retrier") err = retrier.Do(ctx) if err != nil { break @@ -180,7 +179,7 @@ func (r *recoverCmd) recoverCall(ctx context.Context, out io.Writer, interval ti fmt.Fprintln(out, "Pushed recovery key.") ctr++ } - r.log.Debugf("Retry counter is %d", ctr) + r.log.Debug("Retry counter is %d", ctr) if ctr > 0 { fmt.Fprintf(out, "Recovered %d control-plane nodes.\n", ctr) } else if grpcRetry.ServiceIsUnavailable(err) { @@ -222,11 +221,11 @@ func (d *recoverDoer) Do(ctx context.Context) (retErr error) { if err != nil { return fmt.Errorf("dialing recovery server: %w", err) } - d.log.Debugf("Dialed recovery server") + d.log.Debug("Dialed recovery server") defer conn.Close() protoClient := recoverproto.NewAPIClient(conn) - d.log.Debugf("Created protoClient") + d.log.Debug("Created protoClient") req := &recoverproto.RecoverMessage{ KmsUri: d.kmsURI, @@ -238,7 +237,7 @@ func (d *recoverDoer) Do(ctx context.Context) (retErr error) { return fmt.Errorf("calling recover: %w", err) } - d.log.Debugf("Received confirmation") + d.log.Debug("Received confirmation") return nil } diff --git a/cli/internal/cmd/recover_test.go b/cli/internal/cmd/recover_test.go index 41ca89817c..76799b2eb4 100644 --- a/cli/internal/cmd/recover_test.go +++ b/cli/internal/cmd/recover_test.go @@ -10,6 +10,7 @@ import ( "bytes" "context" "errors" + "log/slog" "net" "strconv" "testing" @@ -164,7 +165,7 @@ func TestRecover(t *testing.T) { newDialer := func(atls.Validator) *dialer.Dialer { return nil } r := &recoverCmd{ - log: logger.NewTest(t), + log: slog.New(slog.NewTextHandler(logger.TestWriter{T: t}, nil)), configFetcher: stubAttestationFetcher{}, flags: recoverFlags{ rootFlags: rootFlags{force: true}, @@ -218,7 +219,7 @@ func TestDoRecovery(t *testing.T) { go recoverServer.Serve(listener) defer recoverServer.GracefulStop() - r := &recoverCmd{log: logger.NewTest(t)} + r := &recoverCmd{log: slog.New(slog.NewTextHandler(logger.TestWriter{T: t}, nil))} recoverDoer := &recoverDoer{ dialer: dialer.New(nil, nil, netDialer), endpoint: addr, diff --git a/cli/internal/cmd/status.go b/cli/internal/cmd/status.go index 73f24ffbac..c2e83ef3ac 100644 --- a/cli/internal/cmd/status.go +++ b/cli/internal/cmd/status.go @@ -43,7 +43,6 @@ func runStatus(cmd *cobra.Command, _ []string) error { if err != nil { return fmt.Errorf("creating logger: %w", err) } - defer log.Sync() fileHandler := file.NewHandler(afero.NewOsFs()) diff --git a/cli/internal/cmd/terminate_test.go b/cli/internal/cmd/terminate_test.go index e2888d4b0a..282d241d27 100644 --- a/cli/internal/cmd/terminate_test.go +++ b/cli/internal/cmd/terminate_test.go @@ -9,6 +9,7 @@ package cmd import ( "bytes" "errors" + "log/slog" "testing" "github.com/edgelesssys/constellation/v2/internal/constants" @@ -139,7 +140,7 @@ func TestTerminate(t *testing.T) { fileHandler := file.NewHandler(tc.setupFs(require, tc.stateFile)) tCmd := &terminateCmd{ - log: logger.NewTest(t), + log: slog.New(slog.NewTextHandler(logger.TestWriter{T: t}, nil)), fileHandler: fileHandler, flags: terminateFlags{ yes: tc.yesFlag, diff --git a/cli/internal/cmd/upgradeapply_test.go b/cli/internal/cmd/upgradeapply_test.go index 488a512d51..e074dc2298 100644 --- a/cli/internal/cmd/upgradeapply_test.go +++ b/cli/internal/cmd/upgradeapply_test.go @@ -9,6 +9,7 @@ package cmd import ( "bytes" "context" + "log/slog" "testing" "github.com/edgelesssys/constellation/v2/cli/internal/cloudcmd" @@ -251,7 +252,7 @@ func TestUpgradeApply(t *testing.T) { upgrader := &applyCmd{ fileHandler: fh, flags: tc.flags, - log: logger.NewTest(t), + log: slog.New(slog.NewTextHandler(logger.TestWriter{T: t}, nil)), spinner: &nopSpinner{}, merger: &stubMerger{}, newInfraApplier: func(ctx context.Context) (cloudApplier, func(), error) { diff --git a/cli/internal/cmd/upgradecheck.go b/cli/internal/cmd/upgradecheck.go index 4916480747..e1c723b6e6 100644 --- a/cli/internal/cmd/upgradecheck.go +++ b/cli/internal/cmd/upgradecheck.go @@ -92,7 +92,6 @@ func runUpgradeCheck(cmd *cobra.Command, _ []string) error { if err != nil { return fmt.Errorf("creating logger: %w", err) } - defer log.Sync() var flags upgradeCheckFlags if err := flags.parse(cmd.Flags()); err != nil { @@ -188,7 +187,7 @@ func (u *upgradeCheckCmd) upgradeCheck(cmd *cobra.Command, fetcher attestationco // get current image version of the cluster csp := conf.GetProvider() attestationVariant := conf.GetAttestationConfig().GetVariant() - u.log.Debugf("Using provider %s with attestation variant %s", csp.String(), attestationVariant.String()) + u.log.Debug("Using provider %s with attestation variant %s", csp.String(), attestationVariant.String()) current, err := u.collect.currentVersions(cmd.Context()) if err != nil { @@ -199,18 +198,18 @@ func (u *upgradeCheckCmd) upgradeCheck(cmd *cobra.Command, fetcher attestationco if err != nil { return err } - u.log.Debugf("Current cli version: %s", current.cli) - u.log.Debugf("Supported cli version(s): %s", supported.cli) - u.log.Debugf("Current service version: %s", current.service) - u.log.Debugf("Supported service version: %s", supported.service) - u.log.Debugf("Current k8s version: %s", current.k8s) - u.log.Debugf("Supported k8s version(s): %s", supported.k8s) + u.log.Debug("Current cli version: %s", current.cli) + u.log.Debug("Supported cli version(s): %s", supported.cli) + u.log.Debug("Current service version: %s", current.service) + u.log.Debug("Supported service version: %s", supported.service) + u.log.Debug("Current k8s version: %s", current.k8s) + u.log.Debug("Supported k8s version(s): %s", supported.k8s) // Filter versions to only include upgrades newServices := supported.service if err := supported.service.IsUpgradeTo(current.service); err != nil { newServices = consemver.Semver{} - u.log.Debugf("No valid service upgrades are available from %q to %q. The minor version can only drift by 1.\n", current.service.String(), supported.service.String()) + u.log.Debug("No valid service upgrades are available from %q to %q. The minor version can only drift by 1.\n", current.service.String(), supported.service.String()) } newKubernetes := filterK8sUpgrades(current.k8s, supported.k8s) @@ -222,13 +221,13 @@ func (u *upgradeCheckCmd) upgradeCheck(cmd *cobra.Command, fetcher attestationco return err } - u.log.Debugf("Planning Terraform migrations") + u.log.Debug("Planning Terraform migrations") // Add manual migrations here if required // // var manualMigrations []terraform.StateMigration // for _, migration := range manualMigrations { - // u.log.Debugf("Adding manual Terraform migration: %s", migration.DisplayName) + // u.log.Debug("Adding manual Terraform migration: %s", migration.DisplayName) // u.terraformChecker.AddManualStateMigration(migration) // } cmd.Println("The following Terraform migrations are available with this CLI:") @@ -344,7 +343,7 @@ func (v *versionCollector) newMeasurements(ctx context.Context, csp cloudprovide // get expected measurements for each image upgrades := make(map[string]measurements.M) for _, version := range versions { - v.log.Debugf("Fetching measurements for image: %s", version) + v.log.Debug("Fetching measurements for image: %s", version) shortPath := version.ShortPath() publicKey, err := keyselect.CosignPublicKeyForVersion(version) @@ -365,7 +364,7 @@ func (v *versionCollector) newMeasurements(ctx context.Context, csp cloudprovide } upgrades[shortPath] = measurements } - v.log.Debugf("Compatible image measurements are %v", upgrades) + v.log.Debug("Compatible image measurements are %v", upgrades) return upgrades, nil } @@ -453,9 +452,9 @@ func (v *versionCollector) newImages(ctx context.Context, currentImageVersion co if err != nil { return nil, fmt.Errorf("calculating next image minor version: %w", err) } - v.log.Debugf("Current image minor version is %s", currentImageMinorVer) - v.log.Debugf("Current CLI minor version is %s", currentCLIMinorVer) - v.log.Debugf("Next image minor version is %s", nextImageMinorVer) + v.log.Debug("Current image minor version is %s", currentImageMinorVer) + v.log.Debug("Current CLI minor version is %s", currentCLIMinorVer) + v.log.Debug("Next image minor version is %s", nextImageMinorVer) allowedMinorVersions := []string{currentImageMinorVer, nextImageMinorVer} switch cliImageCompare := semver.Compare(currentCLIMinorVer, currentImageMinorVer); { @@ -471,7 +470,7 @@ func (v *versionCollector) newImages(ctx context.Context, currentImageVersion co case cliImageCompare > 0: allowedMinorVersions = []string{currentImageMinorVer, nextImageMinorVer} } - v.log.Debugf("Allowed minor versions are %#v", allowedMinorVersions) + v.log.Debug("Allowed minor versions are %#v", allowedMinorVersions) newerImages, err := v.newerVersions(ctx, allowedMinorVersions) if err != nil { @@ -494,7 +493,7 @@ func (v *versionCollector) newerVersions(ctx context.Context, allowedVersions [] patchList, err := v.verListFetcher.FetchVersionList(ctx, patchList) var notFound *fetcher.NotFoundError if errors.As(err, ¬Found) { - v.log.Debugf("Skipping version: %s", err) + v.log.Debug("Skipping version: %s", err) continue } if err != nil { @@ -502,7 +501,7 @@ func (v *versionCollector) newerVersions(ctx context.Context, allowedVersions [] } updateCandidates = append(updateCandidates, patchList.StructuredVersions()...) } - v.log.Debugf("Update candidates are %v", updateCandidates) + v.log.Debug("Update candidates are %v", updateCandidates) return updateCandidates, nil } @@ -604,7 +603,7 @@ func getCompatibleImageMeasurements(ctx context.Context, writer io.Writer, clien } var fetchedMeasurements measurements.M - log.Debugf("Fetching for measurement url: %s", measurementsURL) + log.Debug("Fetching for measurement url: %s", measurementsURL) hash, err := fetchedMeasurements.FetchAndVerify( ctx, client, cosign, @@ -658,7 +657,7 @@ func (v *versionCollector) newCLIVersions(ctx context.Context) ([]consemver.Semv return nil, fmt.Errorf("parsing version %s: %w", version, err) } if err := target.IsUpgradeTo(v.cliVersion); err != nil { - v.log.Debugf("Skipping incompatible minor version %q: %s", version, err) + v.log.Debug("Skipping incompatible minor version %q: %s", version, err) continue } list := versionsapi.List{ @@ -692,7 +691,7 @@ func (v *versionCollector) filterCompatibleCLIVersions(ctx context.Context, cliP var compatibleVersions []consemver.Semver for _, version := range cliPatchVersions { if err := version.IsUpgradeTo(v.cliVersion); err != nil { - v.log.Debugf("Skipping incompatible patch version %q: %s", version, err) + v.log.Debug("Skipping incompatible patch version %q: %s", version, err) continue } req := versionsapi.CLIInfo{ diff --git a/cli/internal/cmd/upgradecheck_test.go b/cli/internal/cmd/upgradecheck_test.go index 5e6f8329ab..8c334afdd6 100644 --- a/cli/internal/cmd/upgradecheck_test.go +++ b/cli/internal/cmd/upgradecheck_test.go @@ -11,6 +11,7 @@ import ( "context" "errors" "io" + "log/slog" "net/http" "strings" "testing" @@ -139,7 +140,7 @@ func TestGetCompatibleImageMeasurements(t *testing.T) { } }) - upgrades, err := getCompatibleImageMeasurements(context.Background(), &bytes.Buffer{}, client, &stubCosignVerifier{}, singleUUIDVerifier(), csp, attestationVariant, versionZero, logger.NewTest(t)) + upgrades, err := getCompatibleImageMeasurements(context.Background(), &bytes.Buffer{}, client, &stubCosignVerifier{}, singleUUIDVerifier(), csp, attestationVariant, versionZero, slog.New(slog.NewTextHandler(logger.TestWriter{T: t}, nil))) assert.NoError(err) for _, measurement := range upgrades { @@ -215,7 +216,7 @@ func TestUpgradeCheck(t *testing.T) { collect: &tc.collector, terraformChecker: tc.checker, fileHandler: fileHandler, - log: logger.NewTest(t), + log: slog.New(slog.NewTextHandler(logger.TestWriter{T: t}, nil)), } cmd := newUpgradeCheckCmd() diff --git a/cli/internal/cmd/verify.go b/cli/internal/cmd/verify.go index cd734e5632..3204b9236b 100644 --- a/cli/internal/cmd/verify.go +++ b/cli/internal/cmd/verify.go @@ -100,7 +100,6 @@ func runVerify(cmd *cobra.Command, _ []string) error { if err != nil { return fmt.Errorf("creating logger: %w", err) } - defer log.Sync() fileHandler := file.NewHandler(afero.NewOsFs()) verifyClient := &constellationVerifier{ @@ -129,7 +128,7 @@ func runVerify(cmd *cobra.Command, _ []string) error { if err := v.flags.parse(cmd.Flags()); err != nil { return err } - v.log.Debugf("Using flags: %+v", v.flags) + v.log.Debug("Using flags: %+v", v.flags) fetcher := attestationconfigapi.NewFetcher() return v.verify(cmd, verifyClient, formatterFactory, fetcher) } @@ -137,7 +136,7 @@ func runVerify(cmd *cobra.Command, _ []string) error { type formatterFactory func(output string, attestation variant.Variant, log debugLog) (attestationDocFormatter, error) func (c *verifyCmd) verify(cmd *cobra.Command, verifyClient verifyClient, factory formatterFactory, configFetcher attestationconfigapi.Fetcher) error { - c.log.Debugf("Loading configuration file from %q", c.flags.pathPrefixer.PrefixPrintablePath(constants.ConfigFilename)) + c.log.Debug("Loading configuration file from %q", c.flags.pathPrefixer.PrefixPrintablePath(constants.ConfigFilename)) conf, err := config.New(c.fileHandler, constants.ConfigFilename, configFetcher, c.flags.force) var configValidationErr *config.ValidationError if errors.As(err, &configValidationErr) { @@ -170,13 +169,13 @@ func (c *verifyCmd) verify(cmd *cobra.Command, verifyClient verifyClient, factor } conf.UpdateMAAURL(maaURL) - c.log.Debugf("Updating expected PCRs") + c.log.Debug("Updating expected PCRs") attConfig := conf.GetAttestationConfig() if err := updateInitMeasurements(attConfig, ownerID, clusterID); err != nil { return fmt.Errorf("updating expected PCRs: %w", err) } - c.log.Debugf("Creating aTLS Validator for %s", conf.GetAttestationConfig().GetVariant()) + c.log.Debug("Creating aTLS Validator for %s", conf.GetAttestationConfig().GetVariant()) validator, err := choose.Validator(attConfig, warnLogger{cmd: cmd, log: c.log}) if err != nil { return fmt.Errorf("creating aTLS validator: %w", err) @@ -186,7 +185,7 @@ func (c *verifyCmd) verify(cmd *cobra.Command, verifyClient verifyClient, factor if err != nil { return fmt.Errorf("generating random nonce: %w", err) } - c.log.Debugf("Generated random nonce: %x", nonce) + c.log.Debug("Generated random nonce: %x", nonce) rawAttestationDoc, err := verifyClient.Verify( cmd.Context(), @@ -385,7 +384,7 @@ type constellationVerifier struct { func (v *constellationVerifier) Verify( ctx context.Context, endpoint string, req *verifyproto.GetAttestationRequest, validator atls.Validator, ) (string, error) { - v.log.Debugf("Dialing endpoint: %q", endpoint) + v.log.Debug("Dialing endpoint: %q", endpoint) conn, err := v.dialer.DialInsecure(ctx, endpoint) if err != nil { return "", fmt.Errorf("dialing init server: %w", err) @@ -394,13 +393,13 @@ func (v *constellationVerifier) Verify( client := verifyproto.NewAPIClient(conn) - v.log.Debugf("Sending attestation request") + v.log.Debug("Sending attestation request") resp, err := client.GetAttestation(ctx, req) if err != nil { return "", fmt.Errorf("getting attestation: %w", err) } - v.log.Debugf("Verifying attestation") + v.log.Debug("Verifying attestation") signedData, err := validator.Validate(ctx, resp.Attestation, req.Nonce) if err != nil { return "", fmt.Errorf("validating attestation: %w", err) diff --git a/cli/internal/cmd/verify_test.go b/cli/internal/cmd/verify_test.go index a695a7c2f1..cb659573ec 100644 --- a/cli/internal/cmd/verify_test.go +++ b/cli/internal/cmd/verify_test.go @@ -18,6 +18,7 @@ import ( "strconv" "strings" "testing" + "log/slog" "github.com/edgelesssys/constellation/v2/internal/atls" "github.com/edgelesssys/constellation/v2/internal/attestation/measurements" @@ -210,7 +211,7 @@ func TestVerify(t *testing.T) { v := &verifyCmd{ fileHandler: fileHandler, - log: logger.NewTest(t), + log: slog.New(slog.NewTextHandler(logger.TestWriter{T: t}, nil)), flags: verifyFlags{ clusterID: tc.clusterIDFlag, endpoint: tc.nodeEndpointFlag, @@ -242,7 +243,7 @@ func (f *stubAttDocFormatter) format(_ context.Context, _ string, _ bool, _ conf func TestFormat(t *testing.T) { formatter := func() *defaultAttestationDocFormatter { return &defaultAttestationDocFormatter{ - log: logger.NewTest(t), + log: slog.New(slog.NewTextHandler(logger.TestWriter{T: t}, nil)), } } @@ -333,7 +334,7 @@ func TestVerifyClient(t *testing.T) { go verifyServer.Serve(listener) defer verifyServer.GracefulStop() - verifier := &constellationVerifier{dialer: dialer, log: logger.NewTest(t)} + verifier := &constellationVerifier{dialer: dialer, log: slog.New(slog.NewTextHandler(logger.TestWriter{T: t}, nil))} request := &verifyproto.GetAttestationRequest{ Nonce: tc.nonce, } diff --git a/debugd/cmd/debugd/debugd.go b/debugd/cmd/debugd/debugd.go index 92f7f16146..2ebb27be4c 100644 --- a/debugd/cmd/debugd/debugd.go +++ b/debugd/cmd/debugd/debugd.go @@ -10,12 +10,12 @@ import ( "context" "flag" "fmt" + "log/slog" "net" "os" "sync" "github.com/spf13/afero" - "go.uber.org/zap" "github.com/edgelesssys/constellation/v2/debugd/internal/debugd/deploy" "github.com/edgelesssys/constellation/v2/debugd/internal/debugd/info" @@ -46,11 +46,11 @@ func main() { verbosity := flag.Int("v", 0, logger.CmdLineVerbosityDescription) flag.Parse() - log := logger.New(logger.JSONLog, logger.VerbosityFromInt(*verbosity)) + log := slog.New(slog.NewJSONHandler(os.Stdout, &slog.HandlerOptions{Level: logger.VerbosityFromInt(*verbosity)})) fs := afero.NewOsFs() streamer := streamer.New(fs) - filetransferer := filetransfer.New(log.Named("filetransfer"), streamer, filetransfer.DontShowProgress) - serviceManager := deploy.NewServiceManager(log.Named("serviceManager")) + filetransferer := filetransfer.New(log.WithGroup("filetransfer"), streamer, filetransfer.DontShowProgress) + serviceManager := deploy.NewServiceManager(log.WithGroup("serviceManager")) ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -64,21 +64,24 @@ func main() { case platform.AWS: meta, err := awscloud.New(ctx) if err != nil { - log.With(zap.Error(err)).Fatalf("Failed to initialize AWS metadata") + log.With(slog.Any("error", err)).Error("Failed to initialize AWS metadata") + os.Exit(1) } fetcher = cloudprovider.New(meta) case platform.Azure: meta, err := azurecloud.New(ctx) if err != nil { - log.With(zap.Error(err)).Fatalf("Failed to initialize Azure metadata") + log.With(slog.Any("error", err)).Error("Failed to initialize Azure metadata") + os.Exit(1) } fetcher = cloudprovider.New(meta) case platform.GCP: meta, err := gcpcloud.New(ctx) if err != nil { - log.With(zap.Error(err)).Fatalf("Failed to initialize GCP metadata") + log.With(slog.Any("error", err)).Error("Failed to initialize GCP metadata") + os.Exit(1) } defer meta.Close() fetcher = cloudprovider.New(meta) @@ -86,26 +89,27 @@ func main() { case platform.OpenStack: meta, err := openstackcloud.New(ctx) if err != nil { - log.With(zap.Error(err)).Fatalf("Failed to initialize OpenStack metadata") + log.With(slog.Any("error", err)).Error("Failed to initialize OpenStack metadata") + os.Exit(1) } fetcher = cloudprovider.New(meta) case platform.QEMU: fetcher = cloudprovider.New(qemucloud.New()) default: - log.Errorf("Unknown / unimplemented cloud provider CONSTEL_CSP=%v. Using fallback", csp) + log.Error("Unknown / unimplemented cloud provider CONSTEL_CSP=%v. Using fallback", csp) fetcher = fallback.NewFallbackFetcher() } infoMap := info.NewMap() infoMap.RegisterOnReceiveTrigger( - logcollector.NewStartTrigger(ctx, wg, platform.FromString(csp), fetcher, log.Named("logcollector")), + logcollector.NewStartTrigger(ctx, wg, platform.FromString(csp), fetcher, log.WithGroup("logcollector")), ) - download := deploy.New(log.Named("download"), &net.Dialer{}, serviceManager, filetransferer, infoMap) + download := deploy.New(log.WithGroup("download"), &net.Dialer{}, serviceManager, filetransferer, infoMap) - sched := metadata.NewScheduler(log.Named("scheduler"), fetcher, download) - serv := server.New(log.Named("server"), serviceManager, filetransferer, infoMap) + sched := metadata.NewScheduler(log.WithGroup("scheduler"), fetcher, download) + serv := server.New(log.WithGroup("server"), serviceManager, filetransferer, infoMap) writeDebugBanner(log) @@ -114,14 +118,14 @@ func main() { wg.Wait() } -func writeDebugBanner(log *logger.Logger) { +func writeDebugBanner(log *slog.Logger) { tty, err := os.OpenFile("/dev/ttyS0", os.O_WRONLY, os.ModeAppend) if err != nil { - log.With(zap.Error(err)).Errorf("Unable to open /dev/ttyS0 for printing banner") + log.With(slog.Any("error", err)).Error("Unable to open /dev/ttyS0 for printing banner") return } defer tty.Close() if _, err := fmt.Fprint(tty, debugBanner); err != nil { - log.With(zap.Error(err)).Errorf("Unable to print to /dev/ttyS0") + log.With(slog.Any("error", err)).Error("Unable to print to /dev/ttyS0") } } diff --git a/debugd/internal/debugd/deploy/download.go b/debugd/internal/debugd/deploy/download.go index b4000ff2cd..2c6d75ee45 100644 --- a/debugd/internal/debugd/deploy/download.go +++ b/debugd/internal/debugd/deploy/download.go @@ -11,21 +11,20 @@ import ( "errors" "fmt" "io" + "log/slog" "net" "strconv" "github.com/edgelesssys/constellation/v2/debugd/internal/filetransfer" pb "github.com/edgelesssys/constellation/v2/debugd/service" "github.com/edgelesssys/constellation/v2/internal/constants" - "github.com/edgelesssys/constellation/v2/internal/logger" - "go.uber.org/zap" "google.golang.org/grpc" "google.golang.org/grpc/credentials/insecure" ) // Download downloads a bootstrapper from a given debugd instance. type Download struct { - log *logger.Logger + log *slog.Logger dialer NetDialer transfer fileTransferer serviceManager serviceManager @@ -33,7 +32,7 @@ type Download struct { } // New creates a new Download. -func New(log *logger.Logger, dialer NetDialer, serviceManager serviceManager, +func New(log *slog.Logger, dialer NetDialer, serviceManager serviceManager, transfer fileTransferer, info infoSetter, ) *Download { return &Download{ @@ -51,7 +50,7 @@ func (d *Download) DownloadInfo(ctx context.Context, ip string) error { return nil } - log := d.log.With(zap.String("ip", ip)) + log := d.log.With(slog.String("ip", ip)) serverAddr := net.JoinHostPort(ip, strconv.Itoa(constants.DebugdPort)) client, closer, err := d.newClient(ctx, serverAddr, log) @@ -60,19 +59,19 @@ func (d *Download) DownloadInfo(ctx context.Context, ip string) error { } defer closer.Close() - log.Infof("Trying to download info") + log.Info("Trying to download info") resp, err := client.GetInfo(ctx, &pb.GetInfoRequest{}) if err != nil { return fmt.Errorf("getting info from other instance: %w", err) } - log.Infof("Successfully downloaded info") + log.Info("Successfully downloaded info") return d.info.SetProto(resp.Info) } // DownloadDeployment will open a new grpc connection to another instance, attempting to download files from that instance. func (d *Download) DownloadDeployment(ctx context.Context, ip string) error { - log := d.log.With(zap.String("ip", ip)) + log := d.log.With(slog.String("ip", ip)) serverAddr := net.JoinHostPort(ip, strconv.Itoa(constants.DebugdPort)) client, closer, err := d.newClient(ctx, serverAddr, log) @@ -81,7 +80,7 @@ func (d *Download) DownloadDeployment(ctx context.Context, ip string) error { } defer closer.Close() - log.Infof("Trying to download files") + log.Info("Trying to download files") stream, err := client.DownloadFiles(ctx, &pb.DownloadFilesRequest{}) if err != nil { return fmt.Errorf("starting file download from other instance: %w", err) @@ -90,15 +89,15 @@ func (d *Download) DownloadDeployment(ctx context.Context, ip string) error { err = d.transfer.RecvFiles(stream) switch { case err == nil: - d.log.Infof("Downloading files succeeded") + d.log.Info("Downloading files succeeded") case errors.Is(err, filetransfer.ErrReceiveRunning): - d.log.Warnf("Download already in progress") + d.log.Warn("Download already in progress") return err case errors.Is(err, filetransfer.ErrReceiveFinished): - d.log.Warnf("Download already finished") + d.log.Warn("Download already finished") return nil default: - d.log.With(zap.Error(err)).Errorf("Downloading files failed") + d.log.With(slog.Any("error", err)).Error("Downloading files failed") return err } @@ -111,15 +110,15 @@ func (d *Download) DownloadDeployment(ctx context.Context, ip string) error { ctx, file.OverrideServiceUnit, file.TargetPath, ); err != nil { // continue on error to allow other units to be overridden - d.log.With(zap.Error(err)).Errorf("Failed to override service unit %s", file.OverrideServiceUnit) + d.log.With(slog.Any("error", err)).Error("Failed to override service unit %s", file.OverrideServiceUnit) } } return nil } -func (d *Download) newClient(ctx context.Context, serverAddr string, log *logger.Logger) (pb.DebugdClient, io.Closer, error) { - log.Infof("Connecting to server") +func (d *Download) newClient(ctx context.Context, serverAddr string, log *slog.Logger) (pb.DebugdClient, io.Closer, error) { + log.Info("Connecting to server") conn, err := d.dial(ctx, serverAddr) if err != nil { return nil, nil, fmt.Errorf("connecting to other instance via gRPC: %w", err) diff --git a/debugd/internal/debugd/deploy/download_test.go b/debugd/internal/debugd/deploy/download_test.go index 8477377c50..5c8c5c8c05 100644 --- a/debugd/internal/debugd/deploy/download_test.go +++ b/debugd/internal/debugd/deploy/download_test.go @@ -9,6 +9,7 @@ package deploy import ( "context" "errors" + "log/slog" "net" "strconv" "testing" @@ -117,7 +118,7 @@ func TestDownloadDeployment(t *testing.T) { defer grpcServ.GracefulStop() download := &Download{ - log: logger.NewTest(t), + log: slog.New(slog.NewTextHandler(logger.TestWriter{T: t}, nil)), dialer: dialer, transfer: transfer, serviceManager: serviceMgr, @@ -189,7 +190,7 @@ func TestDownloadInfo(t *testing.T) { defer grpcServer.GracefulStop() download := &Download{ - log: logger.NewTest(t), + log: slog.New(slog.NewTextHandler(logger.TestWriter{T: t}, nil)), dialer: dialer, info: &tc.infoSetter, } diff --git a/debugd/internal/debugd/deploy/service.go b/debugd/internal/debugd/deploy/service.go index f82e09c252..fb78094a07 100644 --- a/debugd/internal/debugd/deploy/service.go +++ b/debugd/internal/debugd/deploy/service.go @@ -9,15 +9,14 @@ package deploy import ( "context" "fmt" + "log/slog" "os" "path/filepath" "regexp" "strings" "sync" - "github.com/edgelesssys/constellation/v2/internal/logger" "github.com/spf13/afero" - "go.uber.org/zap" ) const ( @@ -60,14 +59,14 @@ type SystemdUnit struct { // ServiceManager receives ServiceManagerRequests and units via channels and performs the requests / creates the unit files. type ServiceManager struct { - log *logger.Logger + log *slog.Logger dbus dbusClient fs afero.Fs systemdUnitFilewriteLock sync.Mutex } // NewServiceManager creates a new ServiceManager. -func NewServiceManager(log *logger.Logger) *ServiceManager { +func NewServiceManager(log *slog.Logger) *ServiceManager { fs := afero.NewOsFs() return &ServiceManager{ log: log, @@ -102,7 +101,7 @@ type dbusConn interface { // SystemdAction will perform a systemd action on a service unit (start, stop, restart, reload). func (s *ServiceManager) SystemdAction(ctx context.Context, request ServiceManagerRequest) error { - log := s.log.With(zap.String("unit", request.Unit), zap.String("action", request.Action.String())) + log := s.log.With(slog.String("unit", request.Unit), slog.String("action", request.Action.String())) conn, err := s.dbus.NewSystemConnectionContext(ctx) if err != nil { return fmt.Errorf("establishing systemd connection: %w", err) @@ -127,7 +126,7 @@ func (s *ServiceManager) SystemdAction(ctx context.Context, request ServiceManag } if request.Action == Reload { - log.Infof("daemon-reload succeeded") + log.Info("daemon-reload succeeded") return nil } // Wait for the action to finish and then check if it was @@ -136,7 +135,7 @@ func (s *ServiceManager) SystemdAction(ctx context.Context, request ServiceManag switch result { case "done": - log.Infof("%s on systemd unit %s succeeded", request.Action, request.Unit) + log.Info("%s on systemd unit %s succeeded", request.Action, request.Unit) return nil default: @@ -146,8 +145,8 @@ func (s *ServiceManager) SystemdAction(ctx context.Context, request ServiceManag // WriteSystemdUnitFile will write a systemd unit to disk. func (s *ServiceManager) WriteSystemdUnitFile(ctx context.Context, unit SystemdUnit) error { - log := s.log.With(zap.String("unitFile", fmt.Sprintf("%s/%s", systemdUnitFolder, unit.Name))) - log.Infof("Writing systemd unit file") + log := s.log.With(slog.String("unitFile", fmt.Sprintf("%s/%s", systemdUnitFolder, unit.Name))) + log.Info("Writing systemd unit file") s.systemdUnitFilewriteLock.Lock() defer s.systemdUnitFilewriteLock.Unlock() if err := afero.WriteFile(s.fs, fmt.Sprintf("%s/%s", systemdUnitFolder, unit.Name), []byte(unit.Contents), 0o644); err != nil { @@ -158,14 +157,14 @@ func (s *ServiceManager) WriteSystemdUnitFile(ctx context.Context, unit SystemdU return fmt.Errorf("performing systemd daemon-reload: %w", err) } - log.Infof("Wrote systemd unit file and performed daemon-reload") + log.Info("Wrote systemd unit file and performed daemon-reload") return nil } // OverrideServiceUnitExecStart will override the ExecStart of a systemd unit. func (s *ServiceManager) OverrideServiceUnitExecStart(ctx context.Context, unitName, execStart string) error { - log := s.log.With(zap.String("unitFile", fmt.Sprintf("%s/%s", systemdUnitFolder, unitName))) - log.Infof("Overriding systemd unit file execStart") + log := s.log.With(slog.String("unitFile", fmt.Sprintf("%s/%s", systemdUnitFolder, unitName))) + log.Info("Overriding systemd unit file execStart") if !systemdUnitNameRegexp.MatchString(unitName) { return fmt.Errorf("unit name %q is invalid", unitName) } @@ -187,13 +186,13 @@ func (s *ServiceManager) OverrideServiceUnitExecStart(ctx context.Context, unitN // do not return early here // the "daemon-reload" command may return an unrelated error // and there is no way to know if the override was successful - log.Warnf("Failed to perform systemd daemon-reload: %v", err) + log.Warn("Failed to perform systemd daemon-reload: %v", err) } if err := s.SystemdAction(ctx, ServiceManagerRequest{Unit: unitName + ".service", Action: Restart}); err != nil { - log.Warnf("Failed to perform unit restart: %v", err) + log.Warn("Failed to perform unit restart: %v", err) return fmt.Errorf("performing systemd unit restart: %w", err) } - log.Infof("Overrode systemd unit file execStart, performed daemon-reload and restarted unit %v", unitName) + log.Info("Overrode systemd unit file execStart, performed daemon-reload and restarted unit %v", unitName) return nil } diff --git a/debugd/internal/debugd/deploy/service_test.go b/debugd/internal/debugd/deploy/service_test.go index c0c98f93e7..bae933e126 100644 --- a/debugd/internal/debugd/deploy/service_test.go +++ b/debugd/internal/debugd/deploy/service_test.go @@ -10,6 +10,7 @@ import ( "context" "errors" "fmt" + "log/slog" "sync" "testing" @@ -102,7 +103,7 @@ func TestSystemdAction(t *testing.T) { fs := afero.NewMemMapFs() manager := ServiceManager{ - log: logger.NewTest(t), + log: slog.New(slog.NewTextHandler(logger.TestWriter{T: t}, nil)), dbus: &tc.dbus, fs: fs, systemdUnitFilewriteLock: sync.Mutex{}, @@ -181,7 +182,7 @@ func TestWriteSystemdUnitFile(t *testing.T) { fs = afero.NewReadOnlyFs(fs) } manager := ServiceManager{ - log: logger.NewTest(t), + log: slog.New(slog.NewTextHandler(logger.TestWriter{T: t}, nil)), dbus: &tc.dbus, fs: fs, systemdUnitFilewriteLock: sync.Mutex{}, @@ -294,7 +295,7 @@ func TestOverrideServiceUnitExecStart(t *testing.T) { fs = afero.NewReadOnlyFs(fs) } manager := ServiceManager{ - log: logger.NewTest(t), + log: slog.New(slog.NewTextHandler(logger.TestWriter{T: t}, nil)), dbus: &tc.dbus, fs: fs, systemdUnitFilewriteLock: sync.Mutex{}, diff --git a/debugd/internal/debugd/logcollector/logcollector.go b/debugd/internal/debugd/logcollector/logcollector.go index c40859d0be..89579e778b 100644 --- a/debugd/internal/debugd/logcollector/logcollector.go +++ b/debugd/internal/debugd/logcollector/logcollector.go @@ -12,6 +12,7 @@ import ( "context" "fmt" "io" + "log/slog" "os" "os/exec" "path/filepath" @@ -22,7 +23,6 @@ import ( "github.com/edgelesssys/constellation/v2/debugd/internal/debugd/info" "github.com/edgelesssys/constellation/v2/internal/cloud/cloudprovider" "github.com/edgelesssys/constellation/v2/internal/cloud/metadata" - "github.com/edgelesssys/constellation/v2/internal/logger" "github.com/edgelesssys/constellation/v2/internal/versions" ) @@ -36,60 +36,60 @@ const ( // // This requires podman to be installed. func NewStartTrigger(ctx context.Context, wg *sync.WaitGroup, provider cloudprovider.Provider, - metadata providerMetadata, logger *logger.Logger, + metadata providerMetadata, logger *slog.Logger, ) func(*info.Map) { return func(infoMap *info.Map) { wg.Add(1) go func() { defer wg.Done() - logger.Infof("Start trigger running") + logger.Info("Start trigger running") if err := ctx.Err(); err != nil { - logger.With("err", err).Errorf("Start trigger canceled") + logger.With("err", err).Error("Start trigger canceled") return } - logger.Infof("Get flags from infos") + logger.Info("Get flags from infos") _, ok, err := infoMap.Get("logcollect") if err != nil { - logger.Errorf("Getting infos: %v", err) + logger.Error("Getting infos: %v", err) return } if !ok { - logger.Infof("Flag 'logcollect' not set") + logger.Info("Flag 'logcollect' not set") return } cerdsGetter, err := newCloudCredentialGetter(ctx, provider, infoMap) if err != nil { - logger.Errorf("Creating cloud credential getter: %v", err) + logger.Error("Creating cloud credential getter: %v", err) return } - logger.Infof("Getting credentials") + logger.Info("Getting credentials") creds, err := cerdsGetter.GetOpensearchCredentials(ctx) if err != nil { - logger.Errorf("Getting opensearch credentials: %v", err) + logger.Error("Getting opensearch credentials: %v", err) return } - logger.Infof("Getting logstash pipeline template from image %s", versions.LogstashImage) + logger.Info("Getting logstash pipeline template from image %s", versions.LogstashImage) tmpl, err := getTemplate(ctx, logger, versions.LogstashImage, "/run/logstash/templates/pipeline.conf", "/run/logstash") if err != nil { - logger.Errorf("Getting logstash pipeline template: %v", err) + logger.Error("Getting logstash pipeline template: %v", err) return } infoMapM, err := infoMap.GetCopy() if err != nil { - logger.Errorf("Getting copy of map from info: %v", err) + logger.Error("Getting copy of map from info: %v", err) return } infoMapM = filterInfoMap(infoMapM) setCloudMetadata(ctx, infoMapM, provider, metadata) - logger.Infof("Writing logstash pipeline") + logger.Info("Writing logstash pipeline") pipelineConf := logstashConfInput{ Port: 5044, Host: openSearchHost, @@ -97,14 +97,14 @@ func NewStartTrigger(ctx context.Context, wg *sync.WaitGroup, provider cloudprov Credentials: creds, } if err := writeTemplate("/run/logstash/pipeline/pipeline.conf", tmpl, pipelineConf); err != nil { - logger.Errorf("Writing logstash config: %v", err) + logger.Error("Writing logstash config: %v", err) return } - logger.Infof("Getting filebeat config template from image %s", versions.FilebeatImage) + logger.Info("Getting filebeat config template from image %s", versions.FilebeatImage) tmpl, err = getTemplate(ctx, logger, versions.FilebeatImage, "/run/filebeat/templates/filebeat.yml", "/run/filebeat") if err != nil { - logger.Errorf("Getting filebeat config template: %v", err) + logger.Error("Getting filebeat config template: %v", err) return } filebeatConf := filebeatConfInput{ @@ -112,26 +112,26 @@ func NewStartTrigger(ctx context.Context, wg *sync.WaitGroup, provider cloudprov AddCloudMetadata: true, } if err := writeTemplate("/run/filebeat/filebeat.yml", tmpl, filebeatConf); err != nil { - logger.Errorf("Writing filebeat pipeline: %v", err) + logger.Error("Writing filebeat pipeline: %v", err) return } - logger.Infof("Starting log collection pod") + logger.Info("Starting log collection pod") if err := startPod(ctx, logger); err != nil { - logger.Errorf("Starting log collection: %v", err) + logger.Error("Starting log collection: %v", err) } }() } } -func getTemplate(ctx context.Context, logger *logger.Logger, image, templateDir, destDir string) (*template.Template, error) { +func getTemplate(ctx context.Context, logger *slog.Logger, image, templateDir, destDir string) (*template.Template, error) { createContainerArgs := []string{ "create", "--name=template", image, } createContainerCmd := exec.CommandContext(ctx, "podman", createContainerArgs...) - logger.Infof("Creating template container") + logger.Info("Creating template container") if out, err := createContainerCmd.CombinedOutput(); err != nil { return nil, fmt.Errorf("creating template container: %w\n%s", err, out) } @@ -146,7 +146,7 @@ func getTemplate(ctx context.Context, logger *logger.Logger, image, templateDir, destDir, } copyFromCmd := exec.CommandContext(ctx, "podman", copyFromArgs...) - logger.Infof("Copying templates") + logger.Info("Copying templates") if out, err := copyFromCmd.CombinedOutput(); err != nil { return nil, fmt.Errorf("copying templates: %w\n%s", err, out) } @@ -156,7 +156,7 @@ func getTemplate(ctx context.Context, logger *logger.Logger, image, templateDir, "template", } removeContainerCmd := exec.CommandContext(ctx, "podman", removeContainerArgs...) - logger.Infof("Removing template container") + logger.Info("Removing template container") if out, err := removeContainerCmd.CombinedOutput(); err != nil { return nil, fmt.Errorf("removing template container: %w\n%s", err, out) } @@ -169,7 +169,7 @@ func getTemplate(ctx context.Context, logger *logger.Logger, image, templateDir, return tmpl, nil } -func startPod(ctx context.Context, logger *logger.Logger) error { +func startPod(ctx context.Context, logger *slog.Logger) error { // create a shared pod for filebeat, metricbeat and logstash createPodArgs := []string{ "pod", @@ -177,13 +177,13 @@ func startPod(ctx context.Context, logger *logger.Logger) error { "logcollection", } createPodCmd := exec.CommandContext(ctx, "podman", createPodArgs...) - logger.Infof("Create pod command: %v", createPodCmd.String()) + logger.Info("Create pod command: %v", createPodCmd.String()) if out, err := createPodCmd.CombinedOutput(); err != nil { return fmt.Errorf("failed to create pod: %w; output: %s", err, out) } // start logstash container - logstashLog := newCmdLogger(logger.Named("logstash")) + logstashLog := newCmdLogger(logger.WithGroup("logstash")) runLogstashArgs := []string{ "run", "--rm", @@ -194,7 +194,7 @@ func startPod(ctx context.Context, logger *logger.Logger) error { versions.LogstashImage, } runLogstashCmd := exec.CommandContext(ctx, "podman", runLogstashArgs...) - logger.Infof("Run logstash command: %v", runLogstashCmd.String()) + logger.Info("Run logstash command: %v", runLogstashCmd.String()) runLogstashCmd.Stdout = logstashLog runLogstashCmd.Stderr = logstashLog if err := runLogstashCmd.Start(); err != nil { @@ -202,7 +202,7 @@ func startPod(ctx context.Context, logger *logger.Logger) error { } // start filebeat container - filebeatLog := newCmdLogger(logger.Named("filebeat")) + filebeatLog := newCmdLogger(logger.WithGroup("filebeat")) runFilebeatArgs := []string{ "run", "--rm", @@ -219,7 +219,7 @@ func startPod(ctx context.Context, logger *logger.Logger) error { versions.FilebeatImage, } runFilebeatCmd := exec.CommandContext(ctx, "podman", runFilebeatArgs...) - logger.Infof("Run filebeat command: %v", runFilebeatCmd.String()) + logger.Info("Run filebeat command: %v", runFilebeatCmd.String()) runFilebeatCmd.Stdout = filebeatLog runFilebeatCmd.Stderr = filebeatLog if err := runFilebeatCmd.Start(); err != nil { @@ -295,16 +295,16 @@ func setCloudMetadata(ctx context.Context, m map[string]string, provider cloudpr } } -func newCmdLogger(logger *logger.Logger) io.Writer { +func newCmdLogger(logger *slog.Logger) io.Writer { return &cmdLogger{logger: logger} } type cmdLogger struct { - logger *logger.Logger + logger *slog.Logger } func (c *cmdLogger) Write(p []byte) (n int, err error) { - c.logger.Infof("%s", p) + c.logger.Info("%s", p) return len(p), nil } diff --git a/debugd/internal/debugd/metadata/scheduler.go b/debugd/internal/debugd/metadata/scheduler.go index eb04e5ade1..bf6705fecc 100644 --- a/debugd/internal/debugd/metadata/scheduler.go +++ b/debugd/internal/debugd/metadata/scheduler.go @@ -8,12 +8,11 @@ package metadata import ( "context" + "log/slog" "sync" "time" "github.com/edgelesssys/constellation/v2/debugd/internal/debugd" - "github.com/edgelesssys/constellation/v2/internal/logger" - "go.uber.org/zap" ) // Fetcher retrieves other debugd IPs from cloud provider metadata. @@ -24,7 +23,7 @@ type Fetcher interface { // Scheduler schedules fetching of metadata using timers. type Scheduler struct { - log *logger.Logger + log *slog.Logger fetcher Fetcher downloader downloader deploymentDone bool @@ -33,7 +32,7 @@ type Scheduler struct { } // NewScheduler returns a new scheduler. -func NewScheduler(log *logger.Logger, fetcher Fetcher, downloader downloader) *Scheduler { +func NewScheduler(log *slog.Logger, fetcher Fetcher, downloader downloader) *Scheduler { return &Scheduler{ log: log, fetcher: fetcher, @@ -60,22 +59,22 @@ func (s *Scheduler) Start(ctx context.Context, wg *sync.WaitGroup) { ips, err := s.fetcher.DiscoverDebugdIPs(ctx) if err != nil { - s.log.With(zap.Error(err)).Warnf("Discovering debugd IPs failed") + s.log.With(slog.Any("error", err)).Warn("Discovering debugd IPs failed") } lbip, err := s.fetcher.DiscoverLoadBalancerIP(ctx) if err != nil { - s.log.With(zap.Error(err)).Warnf("Discovering load balancer IP failed") + s.log.With(slog.Any("error", err)).Warn("Discovering load balancer IP failed") } else { ips = append(ips, lbip) } if len(ips) == 0 { - s.log.With(zap.Error(err)).Warnf("No debugd IPs discovered") + s.log.With(slog.Any("error", err)).Warn("No debugd IPs discovered") continue } - s.log.With(zap.Strings("ips", ips)).Infof("Discovered instances") + s.log.With(slog.Any("ips", ips)).Info("Discovered instances") s.download(ctx, ips) if s.deploymentDone && s.infoDone { return @@ -90,8 +89,8 @@ func (s *Scheduler) download(ctx context.Context, ips []string) { for _, ip := range ips { if !s.deploymentDone { if err := s.downloader.DownloadDeployment(ctx, ip); err != nil { - s.log.With(zap.Error(err), zap.String("peer", ip)). - Warnf("Downloading deployment from %s: %s", ip, err) + s.log.With(slog.Any("error", err), slog.String("peer", ip)). + Warn("Downloading deployment from %s: %s", ip, err) } else { s.deploymentDone = true } @@ -99,8 +98,8 @@ func (s *Scheduler) download(ctx context.Context, ips []string) { if !s.infoDone { if err := s.downloader.DownloadInfo(ctx, ip); err != nil { - s.log.With(zap.Error(err), zap.String("peer", ip)). - Warnf("Downloading info from %s: %s", ip, err) + s.log.With(slog.Any("error", err), slog.String("peer", ip)). + Warn("Downloading info from %s: %s", ip, err) } else { s.infoDone = true } diff --git a/debugd/internal/debugd/metadata/scheduler_test.go b/debugd/internal/debugd/metadata/scheduler_test.go index 13f9d47076..5289eb2055 100644 --- a/debugd/internal/debugd/metadata/scheduler_test.go +++ b/debugd/internal/debugd/metadata/scheduler_test.go @@ -84,7 +84,7 @@ func TestSchedulerStart(t *testing.T) { assert := assert.New(t) scheduler := Scheduler{ - log: logger.NewTest(t), + log: slog.New(slog.NewTextHandler(logger.TestWriter{T: t}, nil)), fetcher: &tc.fetcher, downloader: &tc.downloader, interval: 20 * time.Millisecond, diff --git a/debugd/internal/debugd/server/server.go b/debugd/internal/debugd/server/server.go index 551230ae71..3bf869a5ad 100644 --- a/debugd/internal/debugd/server/server.go +++ b/debugd/internal/debugd/server/server.go @@ -10,7 +10,9 @@ package server import ( "context" "errors" + "log/slog" "net" + "os" "strconv" "sync" "time" @@ -27,7 +29,7 @@ import ( ) type debugdServer struct { - log *logger.Logger + log *slog.Logger serviceManager serviceManager transfer fileTransferer info *info.Map @@ -36,7 +38,7 @@ type debugdServer struct { } // New creates a new debugdServer according to the gRPC spec. -func New(log *logger.Logger, serviceManager serviceManager, transfer fileTransferer, infos *info.Map) pb.DebugdServer { +func New(log *slog.Logger, serviceManager serviceManager, transfer fileTransferer, infos *info.Map) pb.DebugdServer { return &debugdServer{ log: log, serviceManager: serviceManager, @@ -47,25 +49,25 @@ func New(log *logger.Logger, serviceManager serviceManager, transfer fileTransfe // SetInfo sets the info of the debugd instance. func (s *debugdServer) SetInfo(_ context.Context, req *pb.SetInfoRequest) (*pb.SetInfoResponse, error) { - s.log.Infof("Received SetInfo request") + s.log.Info("Received SetInfo request") if len(req.Info) == 0 { - s.log.Infof("Info is empty") + s.log.Info("Info is empty") } setProtoErr := s.info.SetProto(req.Info) if errors.Is(setProtoErr, info.ErrInfoAlreadySet) { - s.log.Warnf("Setting info failed (already set)") + s.log.Warn("Setting info failed (already set)") return &pb.SetInfoResponse{ Status: pb.SetInfoStatus_SET_INFO_ALREADY_SET, }, nil } if setProtoErr != nil { - s.log.With(zap.Error(setProtoErr)).Errorf("Setting info failed") + s.log.With(slog.Any("error", setProtoErr)).Error("Setting info failed") return nil, setProtoErr } - s.log.Infof("Info set") + s.log.Info("Info set") return &pb.SetInfoResponse{ Status: pb.SetInfoStatus_SET_INFO_SUCCESS, @@ -74,7 +76,7 @@ func (s *debugdServer) SetInfo(_ context.Context, req *pb.SetInfoRequest) (*pb.S // GetInfo returns the info of the debugd instance. func (s *debugdServer) GetInfo(_ context.Context, _ *pb.GetInfoRequest) (*pb.GetInfoResponse, error) { - s.log.Infof("Received GetInfo request") + s.log.Info("Received GetInfo request") info, err := s.info.GetProto() if err != nil { @@ -86,23 +88,23 @@ func (s *debugdServer) GetInfo(_ context.Context, _ *pb.GetInfoRequest) (*pb.Get // UploadFiles receives a stream of files (each consisting of a header and a stream of chunks) and writes them to the filesystem. func (s *debugdServer) UploadFiles(stream pb.Debugd_UploadFilesServer) error { - s.log.Infof("Received UploadFiles request") + s.log.Info("Received UploadFiles request") err := s.transfer.RecvFiles(stream) switch { case err == nil: - s.log.Infof("Uploading files succeeded") + s.log.Info("Uploading files succeeded") case errors.Is(err, filetransfer.ErrReceiveRunning): - s.log.Warnf("Upload already in progress") + s.log.Warn("Upload already in progress") return stream.SendAndClose(&pb.UploadFilesResponse{ Status: pb.UploadFilesStatus_UPLOAD_FILES_ALREADY_STARTED, }) case errors.Is(err, filetransfer.ErrReceiveFinished): - s.log.Warnf("Upload already finished") + s.log.Warn("Upload already finished") return stream.SendAndClose(&pb.UploadFilesResponse{ Status: pb.UploadFilesStatus_UPLOAD_FILES_ALREADY_FINISHED, }) default: - s.log.With(zap.Error(err)).Errorf("Uploading files failed") + s.log.With(slog.Any("error", err)).Error("Uploading files failed") return stream.SendAndClose(&pb.UploadFilesResponse{ Status: pb.UploadFilesStatus_UPLOAD_FILES_UPLOAD_FAILED, }) @@ -120,7 +122,7 @@ func (s *debugdServer) UploadFiles(stream pb.Debugd_UploadFilesServer) error { } if overrideUnitErr != nil { - s.log.With(zap.Error(overrideUnitErr)).Errorf("Overriding service units failed") + s.log.With(slog.Any("error", overrideUnitErr)).Error("Overriding service units failed") return stream.SendAndClose(&pb.UploadFilesResponse{ Status: pb.UploadFilesStatus_UPLOAD_FILES_START_FAILED, }) @@ -132,13 +134,13 @@ func (s *debugdServer) UploadFiles(stream pb.Debugd_UploadFilesServer) error { // DownloadFiles streams the previously received files to other instances. func (s *debugdServer) DownloadFiles(_ *pb.DownloadFilesRequest, stream pb.Debugd_DownloadFilesServer) error { - s.log.Infof("Sending files to other instance") + s.log.Info("Sending files to other instance") return s.transfer.SendFiles(stream) } // UploadSystemServiceUnits receives systemd service units, writes them to a service file and schedules a daemon-reload. func (s *debugdServer) UploadSystemServiceUnits(ctx context.Context, in *pb.UploadSystemdServiceUnitsRequest) (*pb.UploadSystemdServiceUnitsResponse, error) { - s.log.Infof("Uploading systemd service units") + s.log.Info("Uploading systemd service units") for _, unit := range in.Units { if err := s.serviceManager.WriteSystemdUnitFile(ctx, deploy.SystemdUnit{Name: unit.Name, Contents: unit.Contents}); err != nil { return &pb.UploadSystemdServiceUnitsResponse{Status: pb.UploadSystemdServiceUnitsStatus_UPLOAD_SYSTEMD_SERVICE_UNITS_FAILURE}, nil @@ -149,25 +151,27 @@ func (s *debugdServer) UploadSystemServiceUnits(ctx context.Context, in *pb.Uplo } // Start will start the gRPC server as goroutine. -func Start(log *logger.Logger, wg *sync.WaitGroup, serv pb.DebugdServer) { +func Start(log *slog.Logger, wg *sync.WaitGroup, serv pb.DebugdServer) { wg.Add(1) go func() { defer wg.Done() - grpcLog := log.Named("gRPC") + grpcLog := log.WithGroup("gRPC") + // TODO(miampf): Find a way to dynamically increase the log level grpcLog.WithIncreasedLevel(zap.WarnLevel).ReplaceGRPCLogger() grpcServer := grpc.NewServer( - grpcLog.GetServerStreamInterceptor(), - grpcLog.GetServerUnaryInterceptor(), + logger.GetServerStreamInterceptor(grpcLog), + logger.GetServerUnaryInterceptor(grpcLog), grpc.KeepaliveParams(keepalive.ServerParameters{Time: 15 * time.Second}), ) pb.RegisterDebugdServer(grpcServer, serv) lis, err := net.Listen("tcp", net.JoinHostPort("0.0.0.0", strconv.Itoa(constants.DebugdPort))) if err != nil { - log.With(zap.Error(err)).Fatalf("Listening failed") + log.With(slog.Any("error", err)).Error("Listening failed") + os.Exit(1) } - log.Infof("gRPC server is waiting for connections") + log.Info("gRPC server is waiting for connections") grpcServer.Serve(lis) }() } diff --git a/debugd/internal/debugd/server/server_test.go b/debugd/internal/debugd/server/server_test.go index 3ae8b3312b..e16f40a2d8 100644 --- a/debugd/internal/debugd/server/server_test.go +++ b/debugd/internal/debugd/server/server_test.go @@ -65,7 +65,7 @@ func TestSetInfo(t *testing.T) { require := require.New(t) serv := debugdServer{ - log: logger.NewTest(t), + log: slog.New(slog.NewTextHandler(logger.TestWriter{T: t}, nil)), info: tc.info, } @@ -128,7 +128,7 @@ func TestGetInfo(t *testing.T) { } serv := debugdServer{ - log: logger.NewTest(t), + log: slog.New(slog.NewTextHandler(logger.TestWriter{T: t}, nil)), info: tc.info, } @@ -192,7 +192,7 @@ func TestUploadFiles(t *testing.T) { transfer := &stubTransfer{files: tc.files, recvFilesErr: tc.recvFilesErr} serv := debugdServer{ - log: logger.NewTest(t), + log: slog.New(slog.NewTextHandler(logger.TestWriter{T: t}, nil)), serviceManager: serviceMgr, transfer: transfer, } @@ -237,7 +237,7 @@ func TestDownloadFiles(t *testing.T) { transfer := &stubTransfer{canSend: tc.canSend} serv := debugdServer{ - log: logger.NewTest(t), + log: slog.New(slog.NewTextHandler(logger.TestWriter{T: t}, nil)), transfer: transfer, } @@ -317,7 +317,7 @@ func TestUploadSystemServiceUnits(t *testing.T) { require := require.New(t) serv := debugdServer{ - log: logger.NewTest(t), + log: slog.New(slog.NewTextHandler(logger.TestWriter{T: t}, nil)), serviceManager: &tc.serviceManager, } grpcServ, conn, err := setupServerWithConn(endpoint, &serv) diff --git a/debugd/internal/filetransfer/filetransfer.go b/debugd/internal/filetransfer/filetransfer.go index ff90bdf09b..c4abe7ac83 100644 --- a/debugd/internal/filetransfer/filetransfer.go +++ b/debugd/internal/filetransfer/filetransfer.go @@ -12,13 +12,13 @@ import ( "errors" "io" "io/fs" + "log/slog" "sync" "sync/atomic" "github.com/edgelesssys/constellation/v2/debugd/internal/debugd" "github.com/edgelesssys/constellation/v2/debugd/internal/filetransfer/streamer" pb "github.com/edgelesssys/constellation/v2/debugd/service" - "github.com/edgelesssys/constellation/v2/internal/logger" "go.uber.org/zap" ) @@ -35,7 +35,7 @@ type SendFilesStream interface { // FileTransferer manages sending and receiving of files. type FileTransferer struct { fileMux sync.RWMutex - log *logger.Logger + log *slog.Logger receiveStarted bool receiveFinished atomic.Bool files []FileStat @@ -44,7 +44,7 @@ type FileTransferer struct { } // New creates a new FileTransferer. -func New(log *logger.Logger, streamer streamReadWriter, showProgress bool) *FileTransferer { +func New(log *slog.Logger, streamer streamReadWriter, showProgress bool) *FileTransferer { return &FileTransferer{ log: log, streamer: streamer, @@ -146,7 +146,7 @@ func (s *FileTransferer) handleFileRecv(stream RecvFilesStream) (bool, error) { if header == nil { return false, errors.New("first message must be a header message") } - s.log.Infof("Starting file receive of %q", header.TargetPath) + s.log.Info("Starting file receive of %q", header.TargetPath) s.addFile(FileStat{ SourcePath: header.TargetPath, TargetPath: header.TargetPath, @@ -160,10 +160,10 @@ func (s *FileTransferer) handleFileRecv(stream RecvFilesStream) (bool, error) { }) recvChunkStream := &recvChunkStream{stream: stream} if err := s.streamer.WriteStream(header.TargetPath, recvChunkStream, s.showProgress); err != nil { - s.log.With(zap.Error(err)).Errorf("Receive of file %q failed", header.TargetPath) + s.log.With(slog.Any("error", err)).Error("Receive of file %q failed", header.TargetPath) return false, err } - s.log.Infof("Finished file receive of %q", header.TargetPath) + s.log.Info("Finished file receive of %q", header.TargetPath) return false, nil } diff --git a/debugd/internal/filetransfer/filetransfer_test.go b/debugd/internal/filetransfer/filetransfer_test.go index fed9d9d055..8fa101229a 100644 --- a/debugd/internal/filetransfer/filetransfer_test.go +++ b/debugd/internal/filetransfer/filetransfer_test.go @@ -117,7 +117,7 @@ func TestSendFiles(t *testing.T) { streamer := &stubStreamReadWriter{readStreamErr: tc.readStreamErr} stream := &stubSendFilesStream{sendErr: tc.sendErr} transfer := &FileTransferer{ - log: logger.NewTest(t), + log: slog.New(slog.NewTextHandler(logger.TestWriter{T: t}, nil)), streamer: streamer, showProgress: false, } @@ -254,7 +254,7 @@ func TestRecvFiles(t *testing.T) { streamer := &stubStreamReadWriter{writeStreamErr: tc.writeStreamErr} stream := &fakeRecvFilesStream{msgs: tc.msgs, recvErr: tc.recvErr} - transfer := New(logger.NewTest(t), streamer, false) + transfer := New(slog.New(slog.NewTextHandler(logger.TestWriter{T: t}, nil)), streamer, false) if tc.recvAlreadyStarted { transfer.receiveStarted = true } @@ -307,7 +307,7 @@ func TestGetSetFiles(t *testing.T) { assert := assert.New(t) streamer := &dummyStreamReadWriter{} - transfer := New(logger.NewTest(t), streamer, false) + transfer := New(slog.New(slog.NewTextHandler(logger.TestWriter{T: t}, nil)), streamer, false) if tc.setFiles != nil { transfer.SetFiles(*tc.setFiles) } @@ -319,7 +319,7 @@ func TestGetSetFiles(t *testing.T) { } func TestConcurrency(t *testing.T) { - ft := New(logger.NewTest(t), &stubStreamReadWriter{}, false) + ft := New(slog.New(slog.NewTextHandler(logger.TestWriter{T: t}, nil)), &stubStreamReadWriter{}, false) sendFiles := func() { _ = ft.SendFiles(&stubSendFilesStream{}) diff --git a/disk-mapper/cmd/main.go b/disk-mapper/cmd/main.go index 534ae92670..a7de6b5158 100644 --- a/disk-mapper/cmd/main.go +++ b/disk-mapper/cmd/main.go @@ -10,6 +10,7 @@ import ( "context" "flag" "io" + "log/slog" "net" "os" "path/filepath" @@ -50,18 +51,20 @@ func main() { verbosity := flag.Int("v", 0, logger.CmdLineVerbosityDescription) flag.Parse() - log := logger.New(logger.JSONLog, logger.VerbosityFromInt(*verbosity)) - log.With(zap.String("version", constants.BinaryVersion().String()), zap.String("cloudProvider", *csp)). - Infof("Starting disk-mapper") + log := slog.New(slog.NewJSONHandler(os.Stdout, &slog.HandlerOptions{Level: logger.VerbosityFromInt(*verbosity)})) + log.With(slog.String("version", constants.BinaryVersion().String()), slog.String("cloudProvider", *csp)). + Info("Starting disk-mapper") // set up quote issuer for aTLS connections attestVariant, err := variant.FromString(os.Getenv(constants.AttestationVariant)) if err != nil { - log.With(zap.Error(err)).Fatalf("Failed to parse attestation variant") + log.With(slog.Any("error", err)).Error("Failed to parse attestation variant") + os.Exit(1) } issuer, err := choose.Issuer(attestVariant, log) if err != nil { - log.With(zap.Error(err)).Fatalf("Failed to select issuer") + log.With(slog.Any("error", err)).Error("Failed to select issuer") + os.Exit(1) } // set up metadata API @@ -73,31 +76,37 @@ func main() { // using udev rules, a symlink for our disk is created at /dev/sdb diskPath, err = filepath.EvalSymlinks(awsStateDiskPath) if err != nil { - log.With(zap.Error(err)).Fatalf("Unable to resolve Azure state disk path") + log.With(slog.Any("error", err)).Error("Unable to resolve Azure state disk path") + os.Exit(1) } metadataClient, err = awscloud.New(context.Background()) if err != nil { - log.With(zap.Error(err)).Fatalf("Failed to set up AWS metadata client") + log.With(slog.Any("error", err)).Error("Failed to set up AWS metadata client") + os.Exit(1) } case cloudprovider.Azure: diskPath, err = filepath.EvalSymlinks(azureStateDiskPath) if err != nil { - log.With(zap.Error(err)).Fatalf("Unable to resolve Azure state disk path") + log.With(slog.Any("error", err)).Error("Unable to resolve Azure state disk path") + os.Exit(1) } metadataClient, err = azurecloud.New(context.Background()) if err != nil { - log.With(zap.Error(err)).Fatalf("Failed to set up Azure metadata client") + log.With(slog.Any("error", err)).Error("Failed to set up Azure metadata client") + os.Exit(1) } case cloudprovider.GCP: diskPath, err = filepath.EvalSymlinks(gcpStateDiskPath) if err != nil { - log.With(zap.Error(err)).Fatalf("Unable to resolve GCP state disk path") + log.With(slog.Any("error", err)).Error("Unable to resolve GCP state disk path") + os.Exit(1) } gcpMeta, err := gcpcloud.New(context.Background()) if err != nil { - log.With(zap.Error(err)).Fatalf("Failed to create GCP metadata client") + log.With(slog.Any("error", err)).Error(("Failed to create GCP metadata client")) + os.Exit(1) } defer gcpMeta.Close() metadataClient = gcpMeta @@ -106,7 +115,8 @@ func main() { diskPath = openstackStateDiskPath metadataClient, err = openstack.New(context.Background()) if err != nil { - log.With(zap.Error(err)).Fatalf("Failed to create OpenStack metadata client") + log.With(slog.Any("error", err)).Error(("Failed to create OpenStack metadata client")) + os.Exit(1) } case cloudprovider.QEMU: @@ -114,13 +124,15 @@ func main() { metadataClient = qemucloud.New() default: - log.Fatalf("CSP %s is not supported by Constellation", *csp) + log.Error("CSP %s is not supported by Constellation", *csp) + os.Exit(1) } // initialize device mapper mapper, free, err := diskencryption.New(diskPath, log) if err != nil { - log.With(zap.Error(err)).Fatalf("Failed to initialize device mapper") + log.With(slog.Any("error", err)).Error(("Failed to initialize device mapper")) + os.Exit(1) } defer free() @@ -132,7 +144,7 @@ func main() { } } setupManger := setup.New( - log.Named("setupManager"), + log.WithGroup("setupManager"), *csp, diskPath, afero.Afero{Fs: afero.NewOsFs()}, @@ -142,7 +154,8 @@ func main() { ) if err := setupManger.LogDevices(); err != nil { - log.With(zap.Error(err)).Fatalf("Failed to log devices") + log.With(slog.Any("error", err)).Error(("Failed to log devices")) + os.Exit(1) } // prepare the state disk @@ -151,21 +164,22 @@ func main() { var self metadata.InstanceMetadata self, err = metadataClient.Self(context.Background()) if err != nil { - log.With(zap.Error(err)).Fatalf("Failed to get self metadata") + log.With(slog.Any("error", err)).Error(("Failed to get self metadata")) + os.Exit(1) } rejoinClient := rejoinclient.New( dialer.New(issuer, nil, &net.Dialer{}), self, metadataClient, - log.Named("rejoinClient"), + log.WithGroup("rejoinClient"), ) // set up recovery server if control-plane node var recoveryServer setup.RecoveryServer if self.Role == role.ControlPlane { - recoveryServer = recoveryserver.New(issuer, kmssetup.KMS, log.Named("recoveryServer")) + recoveryServer = recoveryserver.New(issuer, kmssetup.KMS, log.WithGroup("recoveryServer")) } else { - recoveryServer = recoveryserver.NewStub(log.Named("recoveryServer")) + recoveryServer = recoveryserver.NewStub(log.WithGroup("recoveryServer")) } err = setupManger.PrepareExistingDisk(setup.NewNodeRecoverer(recoveryServer, rejoinClient)) @@ -173,6 +187,7 @@ func main() { err = setupManger.PrepareNewDisk() } if err != nil { - log.With(zap.Error(err)).Fatalf("Failed to prepare state disk") + log.With(slog.Any("error", err)).Error(("Failed to prepare state disk")) + os.Exit(1) } } diff --git a/disk-mapper/internal/diskencryption/diskencryption.go b/disk-mapper/internal/diskencryption/diskencryption.go index f6d25a6942..6bed71bfe9 100644 --- a/disk-mapper/internal/diskencryption/diskencryption.go +++ b/disk-mapper/internal/diskencryption/diskencryption.go @@ -15,22 +15,21 @@ package diskencryption import ( "fmt" + "log/slog" "time" "github.com/edgelesssys/constellation/v2/internal/cryptsetup" - "github.com/edgelesssys/constellation/v2/internal/logger" - "go.uber.org/zap" ) // DiskEncryption handles actions for formatting and mapping crypt devices. type DiskEncryption struct { device cryptDevice devicePath string - log *logger.Logger + log *slog.Logger } // New creates a new crypt device for the device at path. -func New(path string, log *logger.Logger) (*DiskEncryption, func(), error) { +func New(path string, log *slog.Logger) (*DiskEncryption, func(), error) { device := cryptsetup.New() _, err := device.Init(path) if err != nil { @@ -101,7 +100,7 @@ func (d *DiskEncryption) UnmapDisk(target string) error { func (d *DiskEncryption) Wipe(blockWipeSize int) error { logProgress := func(size, offset uint64) { prog := (float64(offset) / float64(size)) * 100 - d.log.With(zap.String("progress", fmt.Sprintf("%.2f%%", prog))).Infof("Wiping disk") + d.log.With(slog.String("progress", fmt.Sprintf("%.2f%%", prog))).Info("Wiping disk") } start := time.Now() @@ -109,7 +108,7 @@ func (d *DiskEncryption) Wipe(blockWipeSize int) error { if err := d.device.Wipe("integrity", blockWipeSize, 0, logProgress, 30*time.Second); err != nil { return fmt.Errorf("wiping disk: %w", err) } - d.log.With(zap.Duration("duration", time.Since(start))).Infof("Wiping disk successful") + d.log.With(slog.Duration("duration", time.Since(start))).Info("Wiping disk successful") return nil } diff --git a/disk-mapper/internal/recoveryserver/recoveryserver.go b/disk-mapper/internal/recoveryserver/recoveryserver.go index f2267ebbb9..5234d2e63f 100644 --- a/disk-mapper/internal/recoveryserver/recoveryserver.go +++ b/disk-mapper/internal/recoveryserver/recoveryserver.go @@ -17,6 +17,7 @@ package recoveryserver import ( "context" + "log/slog" "net" "sync" @@ -44,13 +45,13 @@ type RecoveryServer struct { grpcServer server factory kmsFactory - log *logger.Logger + log *slog.Logger recoverproto.UnimplementedAPIServer } // New returns a new RecoveryServer. -func New(issuer atls.Issuer, factory kmsFactory, log *logger.Logger) *RecoveryServer { +func New(issuer atls.Issuer, factory kmsFactory, log *slog.Logger) *RecoveryServer { server := &RecoveryServer{ log: log, factory: factory, @@ -58,7 +59,7 @@ func New(issuer atls.Issuer, factory kmsFactory, log *logger.Logger) *RecoverySe grpcServer := grpc.NewServer( grpc.Creds(atlscredentials.New(issuer, nil)), - log.Named("gRPC").GetServerStreamInterceptor(), + logger.GetServerStreamInterceptor(log.WithGroup("gRPC")), ) recoverproto.RegisterAPIServer(grpcServer, server) @@ -71,7 +72,7 @@ func New(issuer atls.Issuer, factory kmsFactory, log *logger.Logger) *RecoverySe // The server will shut down when the call is successful and the keys are returned. // Additionally, the server can be shutdown by canceling the context. func (s *RecoveryServer) Serve(ctx context.Context, listener net.Listener, diskUUID string) (diskKey, measurementSecret []byte, err error) { - s.log.Infof("Starting RecoveryServer") + s.log.Info("Starting RecoveryServer") s.diskUUID = diskUUID recoveryDone := make(chan struct{}, 1) var serveErr error @@ -88,7 +89,7 @@ func (s *RecoveryServer) Serve(ctx context.Context, listener net.Listener, diskU for { select { case <-ctx.Done(): - s.log.Infof("Context canceled, shutting down server") + s.log.Info("Context canceled, shutting down server") s.grpcServer.GracefulStop() return nil, nil, ctx.Err() case <-recoveryDone: @@ -106,7 +107,7 @@ func (s *RecoveryServer) Recover(ctx context.Context, req *recoverproto.RecoverM defer s.mux.Unlock() log := s.log.With(slog.String("peer", grpclog.PeerAddrFromContext(ctx))) - log.Infof("Received recover call") + log.Info("Received recover call") cloudKms, err := s.factory(ctx, req.StorageUri, req.KmsUri) if err != nil { @@ -123,7 +124,7 @@ func (s *RecoveryServer) Recover(ctx context.Context, req *recoverproto.RecoverM } s.stateDiskKey = stateDiskKey s.measurementSecret = measurementSecret - log.Infof("Received state disk key and measurement secret, shutting down server") + log.Info("Received state disk key and measurement secret, shutting down server") go s.grpcServer.GracefulStop() return &recoverproto.RecoverResponse{}, nil @@ -131,18 +132,18 @@ func (s *RecoveryServer) Recover(ctx context.Context, req *recoverproto.RecoverM // StubServer implements the RecoveryServer interface but does not actually start a server. type StubServer struct { - log *logger.Logger + log *slog.Logger } // NewStub returns a new stubbed RecoveryServer. // We use this to avoid having to start a server for worker nodes, since they don't require manual recovery. -func NewStub(log *logger.Logger) *StubServer { +func NewStub(log *slog.Logger) *StubServer { return &StubServer{log: log} } // Serve waits until the context is canceled and returns nil. func (s *StubServer) Serve(ctx context.Context, _ net.Listener, _ string) ([]byte, []byte, error) { - s.log.Infof("Running as worker node, skipping recovery server") + s.log.Info("Running as worker node, skipping recovery server") <-ctx.Done() return nil, nil, ctx.Err() } diff --git a/disk-mapper/internal/recoveryserver/recoveryserver_test.go b/disk-mapper/internal/recoveryserver/recoveryserver_test.go index 9da0a1ce2e..049048381f 100644 --- a/disk-mapper/internal/recoveryserver/recoveryserver_test.go +++ b/disk-mapper/internal/recoveryserver/recoveryserver_test.go @@ -9,6 +9,7 @@ package recoveryserver import ( "context" "errors" + "log/slog" "sync" "testing" "time" @@ -35,7 +36,7 @@ func TestMain(m *testing.M) { func TestServe(t *testing.T) { assert := assert.New(t) - log := logger.NewTest(t) + log := slog.New(slog.NewTextHandler(logger.TestWriter{T: t}, nil)) uuid := "uuid" server := New(atls.NewFakeIssuer(variant.Dummy{}), newStubKMS(nil, nil), log) dialer := testdialer.NewBufconnDialer() @@ -106,7 +107,7 @@ func TestRecover(t *testing.T) { ctx := context.Background() serverUUID := "uuid" - server := New(atls.NewFakeIssuer(variant.Dummy{}), tc.factory, logger.NewTest(t)) + server := New(atls.NewFakeIssuer(variant.Dummy{}), tc.factory, slog.New(slog.NewTextHandler(logger.TestWriter{T: t}, nil))) netDialer := testdialer.NewBufconnDialer() listener := netDialer.GetListener("192.0.2.1:1234") diff --git a/disk-mapper/internal/rejoinclient/rejoinclient.go b/disk-mapper/internal/rejoinclient/rejoinclient.go index 4f50b0b418..bedb01535d 100644 --- a/disk-mapper/internal/rejoinclient/rejoinclient.go +++ b/disk-mapper/internal/rejoinclient/rejoinclient.go @@ -15,16 +15,15 @@ import ( "context" "errors" "fmt" + "log/slog" "net" "strconv" "time" "github.com/edgelesssys/constellation/v2/internal/cloud/metadata" "github.com/edgelesssys/constellation/v2/internal/constants" - "github.com/edgelesssys/constellation/v2/internal/logger" "github.com/edgelesssys/constellation/v2/internal/role" "github.com/edgelesssys/constellation/v2/joinservice/joinproto" - "go.uber.org/zap" "google.golang.org/grpc" "k8s.io/utils/clock" ) @@ -47,12 +46,12 @@ type RejoinClient struct { dialer grpcDialer metadataAPI metadataAPI - log *logger.Logger + log *slog.Logger } // New returns a new RejoinClient. func New(dial grpcDialer, nodeInfo metadata.InstanceMetadata, - meta metadataAPI, log *logger.Logger, + meta metadataAPI, log *slog.Logger, ) *RejoinClient { return &RejoinClient{ nodeInfo: nodeInfo, @@ -70,22 +69,22 @@ func New(dial grpcDialer, nodeInfo metadata.InstanceMetadata, // from the metadata API and send rejoin requests to them. // The function returns after a successful rejoin request has been performed. func (c *RejoinClient) Start(ctx context.Context, diskUUID string) (diskKey, measurementSecret []byte) { - c.log.Infof("Starting RejoinClient") + c.log.Info("Starting RejoinClient") c.diskUUID = diskUUID ticker := c.clock.NewTicker(c.interval) defer ticker.Stop() - defer c.log.Infof("RejoinClient stopped") + defer c.log.Info("RejoinClient stopped") for { endpoints, err := c.getJoinEndpoints() if err != nil { - c.log.With(zap.Error(err)).Errorf("Failed to get control-plane endpoints") + c.log.With(slog.Any("error", err)).Error("Failed to get control-plane endpoints") } else { - c.log.With(zap.Strings("endpoints", endpoints)).Infof("Received list with JoinService endpoints") + c.log.With(slog.Any("endpoints", endpoints)).Info("Received list with JoinService endpoints") diskKey, measurementSecret, err = c.tryRejoinWithAvailableServices(ctx, endpoints) if err == nil { - c.log.Infof("Successfully retrieved rejoin ticket") + c.log.Info("Successfully retrieved rejoin ticket") return diskKey, measurementSecret } } @@ -101,12 +100,12 @@ func (c *RejoinClient) Start(ctx context.Context, diskUUID string) (diskKey, mea // tryRejoinWithAvailableServices tries sending rejoin requests to the available endpoints. func (c *RejoinClient) tryRejoinWithAvailableServices(ctx context.Context, endpoints []string) (diskKey, measurementSecret []byte, err error) { for _, endpoint := range endpoints { - c.log.With(zap.String("endpoint", endpoint)).Infof("Requesting rejoin ticket") + c.log.With(slog.String("endpoint", endpoint)).Info("Requesting rejoin ticket") rejoinTicket, err := c.requestRejoinTicket(endpoint) if err == nil { return rejoinTicket.StateDiskKey, rejoinTicket.MeasurementSecret, nil } - c.log.With(zap.Error(err), zap.String("endpoint", endpoint)).Warnf("Failed to rejoin on endpoint") + c.log.With(slog.Any("error", err), slog.String("endpoint", endpoint)).Warn("Failed to rejoin on endpoint") // stop requesting additional endpoints if the context is done select { @@ -115,7 +114,7 @@ func (c *RejoinClient) tryRejoinWithAvailableServices(ctx context.Context, endpo default: } } - c.log.Errorf("Failed to rejoin on all endpoints") + c.log.Error("Failed to rejoin on all endpoints") return nil, nil, errors.New("failed to join on all endpoints") } diff --git a/disk-mapper/internal/rejoinclient/rejoinclient_test.go b/disk-mapper/internal/rejoinclient/rejoinclient_test.go index 18bf15df14..ef34748a22 100644 --- a/disk-mapper/internal/rejoinclient/rejoinclient_test.go +++ b/disk-mapper/internal/rejoinclient/rejoinclient_test.go @@ -9,6 +9,7 @@ package rejoinclient import ( "context" "errors" + "log/slog" "net" "strconv" "sync" @@ -56,7 +57,7 @@ func TestStartCancel(t *testing.T) { dialer: dialer, nodeInfo: metadata.InstanceMetadata{Role: role.Worker}, metadataAPI: metaAPI, - log: logger.NewTest(t), + log: slog.New(slog.NewTextHandler(logger.TestWriter{T: t}, nil)), timeout: time.Second * 30, interval: time.Second, clock: clock, @@ -216,7 +217,7 @@ func TestGetJoinEndpoints(t *testing.T) { t.Run(name, func(t *testing.T) { assert := assert.New(t) - client := New(nil, tc.nodeInfo, tc.meta, logger.NewTest(t)) + client := New(nil, tc.nodeInfo, tc.meta, slog.New(slog.NewTextHandler(logger.TestWriter{T: t}, nil))) endpoints, err := client.getJoinEndpoints() if tc.wantErr { @@ -292,7 +293,7 @@ func TestStart(t *testing.T) { }, } - client := New(dialer, tc.nodeInfo, meta, logger.NewTest(t)) + client := New(dialer, tc.nodeInfo, meta, slog.New(slog.NewTextHandler(logger.TestWriter{T: t}, nil))) passphrase, secret := client.Start(context.Background(), "uuid") assert.Equal(diskKey, passphrase) diff --git a/disk-mapper/internal/setup/setup.go b/disk-mapper/internal/setup/setup.go index 3128807138..b566521615 100644 --- a/disk-mapper/internal/setup/setup.go +++ b/disk-mapper/internal/setup/setup.go @@ -17,6 +17,7 @@ import ( "errors" "fmt" "io/fs" + "log/slog" "net" "os" "path/filepath" @@ -31,10 +32,8 @@ import ( "github.com/edgelesssys/constellation/v2/internal/constants" "github.com/edgelesssys/constellation/v2/internal/crypto" "github.com/edgelesssys/constellation/v2/internal/file" - "github.com/edgelesssys/constellation/v2/internal/logger" "github.com/edgelesssys/constellation/v2/internal/nodestate" "github.com/spf13/afero" - "go.uber.org/zap" ) const ( @@ -49,7 +48,7 @@ const ( // Manager handles formatting, mapping, mounting and unmounting of state disks. type Manager struct { - log *logger.Logger + log *slog.Logger csp string diskPath string fs afero.Afero @@ -60,7 +59,7 @@ type Manager struct { } // New initializes a SetupManager with the given parameters. -func New(log *logger.Logger, csp string, diskPath string, fs afero.Afero, +func New(log *slog.Logger, csp string, diskPath string, fs afero.Afero, mapper DeviceMapper, mounter Mounter, openDevice vtpm.TPMOpenFunc, ) *Manager { return &Manager{ @@ -82,7 +81,7 @@ func (s *Manager) PrepareExistingDisk(recover RecoveryDoer) error { if err != nil { return err } - s.log.With(zap.String("uuid", uuid)).Infof("Preparing existing state disk") + s.log.With(slog.String("uuid", uuid)).Info("Preparing existing state disk") endpoint := net.JoinHostPort("0.0.0.0", strconv.Itoa(constants.RecoveryPort)) passphrase, measurementSecret, err := recover.Do(uuid, endpoint) @@ -128,7 +127,7 @@ func (s *Manager) PrepareExistingDisk(recover RecoveryDoer) error { // PrepareNewDisk prepares an instances state disk by formatting the disk as a LUKS device using a random passphrase. func (s *Manager) PrepareNewDisk() error { uuid, _ := s.mapper.DiskUUID() - s.log.With(zap.String("uuid", uuid)).Infof("Preparing new state disk") + s.log.With(slog.String("uuid", uuid)).Info("Preparing new state disk") // generate and save temporary passphrase passphrase := make([]byte, crypto.RNGLengthDefault) @@ -192,12 +191,12 @@ func (s *Manager) LogDevices() error { devices = append(devices, fileInfo) } - s.log.Infof("List of all available block devices and partitions:") + s.log.Info("List of all available block devices and partitions:") for _, device := range devices { var stat syscall.Statfs_t dev := "/dev/" + device.Name() if err := syscall.Statfs(dev, &stat); err != nil { - s.log.With(zap.Error(err)).Errorf("failed to statfs %s", dev) + s.log.With(slog.Any("error", err)).Error("failed to statfs %s", dev) continue } @@ -206,7 +205,7 @@ func (s *Manager) LogDevices() error { free := stat.Bfree * uint64(stat.Bsize) avail := stat.Bavail * uint64(stat.Bsize) - s.log.Infof( + s.log.Info( "Name: %-15s, Size: %-10d, Mode: %s, ModTime: %s, Size = %-10d, Free = %-10d, Available = %-10d\n", dev, device.Size(), diff --git a/disk-mapper/internal/setup/setup_test.go b/disk-mapper/internal/setup/setup_test.go index 1678b6bbfa..d8dd2ea08f 100644 --- a/disk-mapper/internal/setup/setup_test.go +++ b/disk-mapper/internal/setup/setup_test.go @@ -11,6 +11,7 @@ import ( "errors" "io" "io/fs" + "log/slog" "net" "path/filepath" "sync" @@ -136,7 +137,7 @@ func TestPrepareExistingDisk(t *testing.T) { } setupManager := &Manager{ - log: logger.NewTest(t), + log: slog.New(slog.NewTextHandler(logger.TestWriter{T: t}, nil)), csp: "test", diskPath: "disk-path", fs: fs, @@ -214,7 +215,7 @@ func TestPrepareNewDisk(t *testing.T) { assert := assert.New(t) setupManager := &Manager{ - log: logger.NewTest(t), + log: slog.New(slog.NewTextHandler(logger.TestWriter{T: t}, nil)), csp: "test", diskPath: "disk-path", fs: tc.fs, @@ -270,7 +271,7 @@ func TestReadMeasurementSalt(t *testing.T) { require.NoError(handler.WriteJSON("test-state.json", state, file.OptMkdirAll)) } - setupManager := New(logger.NewTest(t), "test", "disk-path", fs, nil, nil, nil) + setupManager := New(slog.New(slog.NewTextHandler(logger.TestWriter{T: t}, nil)), "test", "disk-path", fs, nil, nil, nil) measurementSalt, err := setupManager.readMeasurementSalt("test-state.json") if tc.wantErr { diff --git a/disk-mapper/internal/test/benchmark_test.go b/disk-mapper/internal/test/benchmark_test.go index 6fc92a2846..d4a574a9df 100644 --- a/disk-mapper/internal/test/benchmark_test.go +++ b/disk-mapper/internal/test/benchmark_test.go @@ -12,11 +12,10 @@ import ( "fmt" "math" "testing" + "log/slog" "github.com/edgelesssys/constellation/v2/disk-mapper/internal/diskencryption" - "github.com/edgelesssys/constellation/v2/internal/logger" "github.com/martinjungblut/go-cryptsetup" - "go.uber.org/zap/zapcore" ) func BenchmarkMapper(b *testing.B) { @@ -39,7 +38,7 @@ func BenchmarkMapper(b *testing.B) { } passphrase := "benchmark" - mapper, free, err := diskencryption.New(testPath, logger.New(logger.PlainLog, zapcore.InfoLevel)) + mapper, free, err := diskencryption.New(testPath, slog.New(slog.NewPlainTextHandler(os.Stdout, &slog.HandlerOptions{Level: slog.LevelInfo}))) if err != nil { b.Fatal("Failed to create mapper:", err) } diff --git a/disk-mapper/internal/test/integration_test.go b/disk-mapper/internal/test/integration_test.go index a22faa28e7..9cfafe653d 100644 --- a/disk-mapper/internal/test/integration_test.go +++ b/disk-mapper/internal/test/integration_test.go @@ -18,11 +18,11 @@ import ( "strings" "syscall" "testing" + "log/slog" "github.com/bazelbuild/rules_go/go/runfiles" "github.com/edgelesssys/constellation/v2/disk-mapper/internal/diskencryption" ccryptsetup "github.com/edgelesssys/constellation/v2/internal/cryptsetup" - "github.com/edgelesssys/constellation/v2/internal/logger" cryptsetup "github.com/martinjungblut/go-cryptsetup" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -103,7 +103,7 @@ func TestMapper(t *testing.T) { require.NoError(setup(1), "failed to setup test disk") defer func() { require.NoError(teardown(), "failed to delete test disk") }() - mapper, free, err := diskencryption.New(devicePath, logger.NewTest(t)) + mapper, free, err := diskencryption.New(devicePath, slog.New(slog.NewPlainTextHandler(logger.TestWriter{T: t}, nil)) require.NoError(err, "failed to initialize crypt device") defer free() diff --git a/e2e/internal/upgrade/helm.go b/e2e/internal/upgrade/helm.go index b0fc498fdc..c86db9d25b 100644 --- a/e2e/internal/upgrade/helm.go +++ b/e2e/internal/upgrade/helm.go @@ -21,7 +21,7 @@ import ( func servicesVersion(t *testing.T) (semver.Semver, error) { t.Helper() - log := logger.NewTest(t) + log := slog.New(slog.NewTextHandler(logger.TestWriter{T: t}, nil)) settings := cli.New() settings.KubeConfig = "constellation-admin.conf" actionConfig := &action.Configuration{} diff --git a/e2e/malicious-join/malicious-join.go b/e2e/malicious-join/malicious-join.go index a8894d0231..c1c7f58d28 100644 --- a/e2e/malicious-join/malicious-join.go +++ b/e2e/malicious-join/malicious-join.go @@ -12,21 +12,19 @@ import ( "encoding/json" "flag" "fmt" + "log/slog" "net" + "os" "strings" "github.com/edgelesssys/constellation/v2/internal/attestation/variant" "github.com/edgelesssys/constellation/v2/internal/cloud/cloudprovider" "github.com/edgelesssys/constellation/v2/internal/grpc/dialer" - "github.com/edgelesssys/constellation/v2/internal/logger" "github.com/edgelesssys/constellation/v2/joinservice/joinproto" - "go.uber.org/zap" - "go.uber.org/zap/zapcore" ) func main() { - log := logger.New(logger.JSONLog, zapcore.DebugLevel) - defer log.Sync() + log := slog.New(slog.NewJSONHandler(os.Stdout, &slog.HandlerOptions{Level: slog.LevelDebug})) jsEndpoint := flag.String("js-endpoint", "", "Join service endpoint to use.") csp := flag.String("csp", "", "Cloud service provider to use.") @@ -38,13 +36,13 @@ func main() { ) flag.Parse() log.With( - zap.String("js-endpoint", *jsEndpoint), - zap.String("csp", *csp), - zap.String("variant", *attVariant), - ).Infof("Running tests with flags") + slog.String("js-endpoint", *jsEndpoint), + slog.String("csp", *csp), + slog.String("variant", *attVariant), + ).Info("Running tests with flags") testCases := map[string]struct { - fn func(attVariant, csp, jsEndpoint string, log *logger.Logger) error + fn func(attVariant, csp, jsEndpoint string, log *slog.Logger) error wantErr bool }{ "JoinFromUnattestedNode": { @@ -58,44 +56,45 @@ func main() { TestCases: make(map[string]testCaseOutput), } for name, tc := range testCases { - log.With(zap.String("testcase", name)).Infof("Running testcase") + log.With(slog.String("testcase", name)).Info("Running testcase") err := tc.fn(*attVariant, *csp, *jsEndpoint, log) switch { case err == nil && tc.wantErr: - log.With(zap.Error(err), zap.String("testcase", name)).Errorf("Test case failed: Expected error but got none") + log.With(slog.Any("error", err), slog.String("testcase", name)).Error("Test case failed: Expected error but got none") testOutput.TestCases[name] = testCaseOutput{ Passed: false, Message: "Expected error but got none", } allPassed = false case !tc.wantErr && err != nil: - log.With(zap.Error(err), zap.String("testcase", name)).Errorf("Test case failed: Got unexpected error") + log.With(slog.Any("error", err), slog.String("testcase", name)).Error("Test case failed: Got unexpected error") testOutput.TestCases[name] = testCaseOutput{ Passed: false, Message: fmt.Sprintf("Got unexpected error: %s", err), } allPassed = false case tc.wantErr && err != nil: - log.With(zap.String("testcase", name)).Infof("Test case succeeded") + log.With(slog.String("testcase", name)).Info("Test case succeeded") testOutput.TestCases[name] = testCaseOutput{ Passed: true, Message: fmt.Sprintf("Got expected error: %s", err), } case !tc.wantErr && err == nil: - log.With(zap.String("testcase", name)).Infof("Test case succeeded") + log.With(slog.String("testcase", name)).Info("Test case succeeded") testOutput.TestCases[name] = testCaseOutput{ Passed: true, Message: "No error, as expected", } default: - log.With(zap.String("testcase", name)).Fatalf("invalid result") + log.With(slog.String("testcase", name)).Error("invalid result") + os.Exit(1) } } testOutput.AllPassed = allPassed - log.With(zap.Any("result", testOutput)).Infof("Test completed") + log.With(slog.Any("result", testOutput)).Info("Test completed") } type testOutput struct { @@ -110,7 +109,7 @@ type testCaseOutput struct { // JoinFromUnattestedNode simulates a join request from a Node that uses a stub issuer // and thus cannot be attested correctly. -func JoinFromUnattestedNode(attVariant, csp, jsEndpoint string, log *logger.Logger) error { +func JoinFromUnattestedNode(attVariant, csp, jsEndpoint string, log *slog.Logger) error { joiner, err := newMaliciousJoiner(attVariant, csp, jsEndpoint, log) if err != nil { return fmt.Errorf("creating malicious joiner: %w", err) @@ -125,7 +124,7 @@ func JoinFromUnattestedNode(attVariant, csp, jsEndpoint string, log *logger.Logg // newMaliciousJoiner creates a new malicious joiner, i.e. a simulated node that issues // an invalid join request. -func newMaliciousJoiner(attVariant, csp, endpoint string, log *logger.Logger) (*maliciousJoiner, error) { +func newMaliciousJoiner(attVariant, csp, endpoint string, log *slog.Logger) (*maliciousJoiner, error) { var attVariantOid variant.Variant var err error if strings.EqualFold(attVariant, "default") { @@ -149,30 +148,30 @@ func newMaliciousJoiner(attVariant, csp, endpoint string, log *logger.Logger) (* // maliciousJoiner simulates a malicious node joining a cluster. type maliciousJoiner struct { endpoint string - logger *logger.Logger + logger *slog.Logger dialer *dialer.Dialer } // join issues a join request to the join service endpoint. func (j *maliciousJoiner) join(ctx context.Context) (*joinproto.IssueJoinTicketResponse, error) { - j.logger.Debugf("Dialing join service endpoint %s", j.endpoint) + j.logger.Debug("Dialing join service endpoint %s", j.endpoint, "") conn, err := j.dialer.Dial(ctx, j.endpoint) if err != nil { return nil, fmt.Errorf("dialing join service endpoint: %w", err) } defer conn.Close() - j.logger.Debugf("Successfully dialed join service endpoint %s", j.endpoint) + j.logger.Debug("Successfully dialed join service endpoint %s", j.endpoint, "") protoClient := joinproto.NewAPIClient(conn) - j.logger.Debugf("Issuing join ticket") + j.logger.Debug("Issuing join ticket") req := &joinproto.IssueJoinTicketRequest{ DiskUuid: "", CertificateRequest: []byte{}, IsControlPlane: false, } res, err := protoClient.IssueJoinTicket(ctx, req) - j.logger.Debugf("Got join ticket response: %+v", res) + j.logger.Debug("Got join ticket response: %s", fmt.Sprintf("%+v", res), "") if err != nil { return nil, fmt.Errorf("issuing join ticket: %w", err) } diff --git a/hack/bazel-deps-mirror/check.go b/hack/bazel-deps-mirror/check.go index c8089ff737..d9b0213e4b 100644 --- a/hack/bazel-deps-mirror/check.go +++ b/hack/bazel-deps-mirror/check.go @@ -9,14 +9,14 @@ package main import ( "context" "errors" + "log/slog" + "os" "github.com/edgelesssys/constellation/v2/hack/bazel-deps-mirror/internal/bazelfiles" "github.com/edgelesssys/constellation/v2/hack/bazel-deps-mirror/internal/issues" "github.com/edgelesssys/constellation/v2/hack/bazel-deps-mirror/internal/mirror" "github.com/edgelesssys/constellation/v2/hack/bazel-deps-mirror/internal/rules" - "github.com/edgelesssys/constellation/v2/internal/logger" "github.com/spf13/cobra" - "go.uber.org/zap/zapcore" ) func newCheckCmd() *cobra.Command { @@ -38,15 +38,15 @@ func runCheck(cmd *cobra.Command, _ []string) error { if err != nil { return err } - log := logger.New(logger.PlainLog, flags.logLevel) - log.Debugf("Parsed flags: %+v", flags) + log := slog.New(slog.NewTextHandler(os.Stdout, &slog.HandlerOptions{Level: flags.logLevel})) + log.Debug("Parsed flags: %+v", flags) filesHelper, err := bazelfiles.New() if err != nil { return err } - log.Debugf("Searching for Bazel files in the current WORKSPACE and all subdirectories...") + log.Debug("Searching for Bazel files in the current WORKSPACE and all subdirectories...") bazelFiles, err := filesHelper.FindFiles() if err != nil { return err @@ -55,10 +55,10 @@ func runCheck(cmd *cobra.Command, _ []string) error { var mirrorCheck mirrorChecker switch { case flags.mirrorUnauthenticated: - log.Debugf("Checking consistency of all referenced CAS objects without authentication.") + log.Debug("Checking consistency of all referenced CAS objects without authentication.") mirrorCheck = mirror.NewUnauthenticated(flags.mirrorBaseURL, mirror.Run, log) case flags.mirror: - log.Debugf("Checking consistency of all referenced CAS objects using AWS S3.") + log.Debug("Checking consistency of all referenced CAS objects using AWS S3.") mirrorCheck, err = mirror.New(cmd.Context(), flags.region, flags.bucket, flags.mirrorBaseURL, mirror.Run, log) if err != nil { return err @@ -78,17 +78,17 @@ func runCheck(cmd *cobra.Command, _ []string) error { } } if len(iss) > 0 { - log.Infof("Found issues in rules") + log.Info("Found issues in rules") iss.Report(cmd.OutOrStdout()) return errors.New("found issues in rules") } - log.Infof("No issues found 🦭") + log.Info("No issues found 🦭") return nil } -func checkBazelFile(ctx context.Context, fileHelper *bazelfiles.Helper, mirrorCheck mirrorChecker, bazelFile bazelfiles.BazelFile, log *logger.Logger) (issByFile issues.ByFile, err error) { - log.Debugf("Checking file: %s", bazelFile.RelPath) +func checkBazelFile(ctx context.Context, fileHelper *bazelfiles.Helper, mirrorCheck mirrorChecker, bazelFile bazelfiles.BazelFile, log *slog.Logger) (issByFile issues.ByFile, err error) { + log.Debug("Checking file: %s", bazelFile.RelPath) issByFile = issues.NewByFile() buildfile, err := fileHelper.LoadFile(bazelFile) if err != nil { @@ -96,12 +96,12 @@ func checkBazelFile(ctx context.Context, fileHelper *bazelfiles.Helper, mirrorCh } found := rules.Rules(buildfile, rules.SupportedRules) if len(found) == 0 { - log.Debugf("No rules found in file: %s", bazelFile.RelPath) + log.Debug("No rules found in file: %s", bazelFile.RelPath) return issByFile, nil } - log.Debugf("Found %d rules in file: %s", len(found), bazelFile.RelPath) + log.Debug("Found %d rules in file: %s", len(found), bazelFile.RelPath) for _, rule := range found { - log.Debugf("Checking rule: %s", rule.Name()) + log.Debug("Checking rule: %s", rule.Name()) // check if the rule is a valid pinned dependency rule (has all required attributes) if issues := rules.ValidatePinned(rule); len(issues) > 0 { issByFile.Add(rule.Name(), issues...) @@ -130,7 +130,7 @@ type checkFlags struct { region string bucket string mirrorBaseURL string - logLevel zapcore.Level + logLevel slog.Level } func parseCheckFlags(cmd *cobra.Command) (checkFlags, error) { @@ -146,9 +146,9 @@ func parseCheckFlags(cmd *cobra.Command) (checkFlags, error) { if err != nil { return checkFlags{}, err } - logLevel := zapcore.InfoLevel + logLevel := slog.LevelInfo if verbose { - logLevel = zapcore.DebugLevel + logLevel = slog.LevelDebug } region, err := cmd.Flags().GetString("region") if err != nil { diff --git a/hack/bazel-deps-mirror/fix.go b/hack/bazel-deps-mirror/fix.go index c40d42d055..6380e7e349 100644 --- a/hack/bazel-deps-mirror/fix.go +++ b/hack/bazel-deps-mirror/fix.go @@ -9,15 +9,15 @@ package main import ( "context" "errors" + "log/slog" + "os" "github.com/bazelbuild/buildtools/build" "github.com/edgelesssys/constellation/v2/hack/bazel-deps-mirror/internal/bazelfiles" "github.com/edgelesssys/constellation/v2/hack/bazel-deps-mirror/internal/issues" "github.com/edgelesssys/constellation/v2/hack/bazel-deps-mirror/internal/mirror" "github.com/edgelesssys/constellation/v2/hack/bazel-deps-mirror/internal/rules" - "github.com/edgelesssys/constellation/v2/internal/logger" "github.com/spf13/cobra" - "go.uber.org/zap/zapcore" ) func newFixCmd() *cobra.Command { @@ -38,15 +38,15 @@ func runFix(cmd *cobra.Command, _ []string) error { if err != nil { return err } - log := logger.New(logger.PlainLog, flags.logLevel) - log.Debugf("Parsed flags: %+v", flags) + log := slog.New(slog.NewTextHandler(os.Stdout, &slog.HandlerOptions{Level: flags.logLevel})) + log.Debug("Parsed flags: %+v", flags) fileHelper, err := bazelfiles.New() if err != nil { return err } - log.Debugf("Searching for Bazel files in the current WORKSPACE and all subdirectories...") + log.Debug("Searching for Bazel files in the current WORKSPACE and all subdirectories...") bazelFiles, err := fileHelper.FindFiles() if err != nil { return err @@ -55,10 +55,10 @@ func runFix(cmd *cobra.Command, _ []string) error { var mirrorUpload mirrorUploader switch { case flags.unauthenticated: - log.Warnf("Fixing rules without authentication for AWS S3. If artifacts are not yet mirrored, this will fail.") + log.Warn("Fixing rules without authentication for AWS S3. If artifacts are not yet mirrored, this will fail.") mirrorUpload = mirror.NewUnauthenticated(flags.mirrorBaseURL, flags.dryRun, log) default: - log.Debugf("Fixing rules with authentication for AWS S3.") + log.Debug("Fixing rules with authentication for AWS S3.") mirrorUpload, err = mirror.New(cmd.Context(), flags.region, flags.bucket, flags.mirrorBaseURL, flags.dryRun, log) if err != nil { return err @@ -76,29 +76,29 @@ func runFix(cmd *cobra.Command, _ []string) error { } } if len(issues) > 0 { - log.Warnf("Found %d unfixable issues in rules", len(issues)) + log.Warn("Found %d unfixable issues in rules", len(issues)) issues.Report(cmd.OutOrStdout()) return errors.New("found issues in rules") } - log.Infof("No unfixable issues found") + log.Info("No unfixable issues found") return nil } -func fixBazelFile(ctx context.Context, fileHelper *bazelfiles.Helper, mirrorUpload mirrorUploader, bazelFile bazelfiles.BazelFile, dryRun bool, log *logger.Logger) (iss issues.ByFile, err error) { +func fixBazelFile(ctx context.Context, fileHelper *bazelfiles.Helper, mirrorUpload mirrorUploader, bazelFile bazelfiles.BazelFile, dryRun bool, log *slog.Logger) (iss issues.ByFile, err error) { iss = issues.NewByFile() var changed bool // true if any rule in this file was changed - log.Infof("Checking file: %s", bazelFile.RelPath) + log.Info("Checking file: %s", bazelFile.RelPath) buildfile, err := fileHelper.LoadFile(bazelFile) if err != nil { return iss, err } found := rules.Rules(buildfile, rules.SupportedRules) if len(found) == 0 { - log.Debugf("No rules found in file: %s", bazelFile.RelPath) + log.Debug("No rules found in file: %s", bazelFile.RelPath) return iss, nil } - log.Debugf("Found %d rules in file: %s", len(found), bazelFile.RelPath) + log.Debug("Found %d rules in file: %s", len(found), bazelFile.RelPath) for _, rule := range found { changedRule, ruleIssues := fixRule(ctx, mirrorUpload, rule, log) if len(ruleIssues) > 0 { @@ -108,11 +108,11 @@ func fixBazelFile(ctx context.Context, fileHelper *bazelfiles.Helper, mirrorUplo } if len(iss) > 0 { - log.Warnf("File %s has issues. Not saving!", bazelFile.RelPath) + log.Warn("File %s has issues. Not saving!", bazelFile.RelPath) return iss, nil } if !changed { - log.Debugf("No changes to file: %s", bazelFile.RelPath) + log.Debug("No changes to file: %s", bazelFile.RelPath) return iss, nil } if dryRun { @@ -120,10 +120,10 @@ func fixBazelFile(ctx context.Context, fileHelper *bazelfiles.Helper, mirrorUplo if err != nil { return iss, err } - log.Infof("Dry run: would save updated file %s with diff:\n%s", bazelFile.RelPath, diff) + log.Info("Dry run: would save updated file %s with diff:\n%s", bazelFile.RelPath, diff) return iss, nil } - log.Infof("Saving updated file: %s", bazelFile.RelPath) + log.Info("Saving updated file: %s", bazelFile.RelPath) if err := fileHelper.WriteFile(bazelFile, buildfile); err != nil { return iss, err } @@ -131,7 +131,7 @@ func fixBazelFile(ctx context.Context, fileHelper *bazelfiles.Helper, mirrorUplo return iss, nil } -func learnHashForRule(ctx context.Context, mirrorUpload mirrorUploader, rule *build.Rule, log *logger.Logger) error { +func learnHashForRule(ctx context.Context, mirrorUpload mirrorUploader, rule *build.Rule, log *slog.Logger) error { upstreamURLs, err := rules.UpstreamURLs(rule) if err != nil { return err @@ -141,12 +141,12 @@ func learnHashForRule(ctx context.Context, mirrorUpload mirrorUploader, rule *bu return err } rules.SetHash(rule, learnedHash) - log.Debugf("Learned hash for rule %s: %s", rule.Name(), learnedHash) + log.Debug("Learned hash for rule %s: %s", rule.Name(), learnedHash) return nil } -func fixRule(ctx context.Context, mirrorUpload mirrorUploader, rule *build.Rule, log *logger.Logger) (changed bool, iss []error) { - log.Debugf("Fixing rule: %s", rule.Name()) +func fixRule(ctx context.Context, mirrorUpload mirrorUploader, rule *build.Rule, log *slog.Logger) (changed bool, iss []error) { + log.Debug("Fixing rule: %s", rule.Name()) // try to learn the hash if hash, err := rules.GetHash(rule); err != nil || hash == "" { @@ -182,14 +182,14 @@ func fixRule(ctx context.Context, mirrorUpload mirrorUploader, rule *build.Rule, } if checkErr := mirrorUpload.Check(ctx, expectedHash); checkErr != nil { - log.Infof("Artifact %s with hash %s is not yet mirrored. Uploading...", rule.Name(), expectedHash) + log.Info("Artifact %s with hash %s is not yet mirrored. Uploading...", rule.Name(), expectedHash) if uploadErr := mirrorUpload.Mirror(ctx, expectedHash, rules.GetURLs(rule)); uploadErr != nil { // don't try to fix the rule if the upload failed iss = append(iss, uploadErr) return changed, iss } } else { - log.Infof("Artifact %s with hash %s was already uploaded before. Adding to rule...", rule.Name(), expectedHash) + log.Info("Artifact %s with hash %s was already uploaded before. Adding to rule...", rule.Name(), expectedHash) } // now the artifact is mirrored (if it wasn't already) and we can fix the rule @@ -211,7 +211,7 @@ type fixFlags struct { region string bucket string mirrorBaseURL string - logLevel zapcore.Level + logLevel slog.Level } func parseFixFlags(cmd *cobra.Command) (fixFlags, error) { @@ -227,9 +227,9 @@ func parseFixFlags(cmd *cobra.Command) (fixFlags, error) { if err != nil { return fixFlags{}, err } - logLevel := zapcore.InfoLevel + logLevel := slog.LevelInfo if verbose { - logLevel = zapcore.DebugLevel + logLevel = slog.LevelDebug } region, err := cmd.Flags().GetString("region") if err != nil { diff --git a/hack/bazel-deps-mirror/internal/mirror/mirror_test.go b/hack/bazel-deps-mirror/internal/mirror/mirror_test.go index 541c1fa52c..829c043035 100644 --- a/hack/bazel-deps-mirror/internal/mirror/mirror_test.go +++ b/hack/bazel-deps-mirror/internal/mirror/mirror_test.go @@ -135,7 +135,7 @@ func TestMirror(t *testing.T) { }(), }, unauthenticated: tc.unauthenticated, - log: logger.NewTest(t), + log: slog.New(slog.NewTextHandler(logger.TestWriter{T: t}, nil)), } err := m.Mirror(context.Background(), tc.hash, []string{tc.upstreamURL}) if tc.wantErr { @@ -178,7 +178,7 @@ func TestLearn(t *testing.T) { body: tc.upstreamResponse, }, }, - log: logger.NewTest(t), + log: slog.New(slog.NewTextHandler(logger.TestWriter{T: t}, nil)), } gotHash, err := m.Learn(context.Background(), []string{"https://example.com/foo"}) if tc.wantErr { @@ -272,7 +272,7 @@ func TestCheck(t *testing.T) { response: tc.authenticatedResponse, err: tc.authenticatedErr, }, - log: logger.NewTest(t), + log: slog.New(slog.NewTextHandler(logger.TestWriter{T: t}, nil)), } err := m.Check(context.Background(), tc.hash) if tc.wantErr { diff --git a/hack/bazel-deps-mirror/upgrade.go b/hack/bazel-deps-mirror/upgrade.go index 8e2af75f03..21094553c9 100644 --- a/hack/bazel-deps-mirror/upgrade.go +++ b/hack/bazel-deps-mirror/upgrade.go @@ -9,15 +9,15 @@ package main import ( "context" "errors" + "log/slog" + "os" "github.com/bazelbuild/buildtools/build" "github.com/edgelesssys/constellation/v2/hack/bazel-deps-mirror/internal/bazelfiles" "github.com/edgelesssys/constellation/v2/hack/bazel-deps-mirror/internal/issues" "github.com/edgelesssys/constellation/v2/hack/bazel-deps-mirror/internal/mirror" "github.com/edgelesssys/constellation/v2/hack/bazel-deps-mirror/internal/rules" - "github.com/edgelesssys/constellation/v2/internal/logger" "github.com/spf13/cobra" - "go.uber.org/zap/zapcore" ) func newUpgradeCmd() *cobra.Command { @@ -38,15 +38,15 @@ func runUpgrade(cmd *cobra.Command, _ []string) error { if err != nil { return err } - log := logger.New(logger.PlainLog, flags.logLevel) - log.Debugf("Parsed flags: %+v", flags) + log := slog.New(slog.NewTextHandler(os.Stdout, &slog.HandlerOptions{Level: flags.logLevel})) + log.Debug("Parsed flags: %+v", flags) fileHelper, err := bazelfiles.New() if err != nil { return err } - log.Debugf("Searching for Bazel files in the current WORKSPACE and all subdirectories...") + log.Debug("Searching for Bazel files in the current WORKSPACE and all subdirectories...") bazelFiles, err := fileHelper.FindFiles() if err != nil { return err @@ -55,10 +55,10 @@ func runUpgrade(cmd *cobra.Command, _ []string) error { var mirrorUpload mirrorUploader switch { case flags.unauthenticated: - log.Warnf("Upgrading rules without authentication for AWS S3. If artifacts are not yet mirrored, this will fail.") + log.Warn("Upgrading rules without authentication for AWS S3. If artifacts are not yet mirrored, this will fail.") mirrorUpload = mirror.NewUnauthenticated(flags.mirrorBaseURL, flags.dryRun, log) default: - log.Debugf("Upgrading rules with authentication for AWS S3.") + log.Debug("Upgrading rules with authentication for AWS S3.") mirrorUpload, err = mirror.New(cmd.Context(), flags.region, flags.bucket, flags.mirrorBaseURL, flags.dryRun, log) if err != nil { return err @@ -76,29 +76,29 @@ func runUpgrade(cmd *cobra.Command, _ []string) error { } } if len(issues) > 0 { - log.Warnf("Found %d issues in rules", len(issues)) + log.Warn("Found %d issues in rules", len(issues)) issues.Report(cmd.OutOrStdout()) return errors.New("found issues in rules") } - log.Infof("No issues found") + log.Info("No issues found") return nil } -func upgradeBazelFile(ctx context.Context, fileHelper *bazelfiles.Helper, mirrorUpload mirrorUploader, bazelFile bazelfiles.BazelFile, dryRun bool, log *logger.Logger) (iss issues.ByFile, err error) { +func upgradeBazelFile(ctx context.Context, fileHelper *bazelfiles.Helper, mirrorUpload mirrorUploader, bazelFile bazelfiles.BazelFile, dryRun bool, log *slog.Logger) (iss issues.ByFile, err error) { iss = issues.NewByFile() var changed bool // true if any rule in this file was changed - log.Infof("Checking file: %s", bazelFile.RelPath) + log.Info("Checking file: %s", bazelFile.RelPath) buildfile, err := fileHelper.LoadFile(bazelFile) if err != nil { return iss, err } found := rules.Rules(buildfile, rules.SupportedRules) if len(found) == 0 { - log.Debugf("No rules found in file: %s", bazelFile.RelPath) + log.Debug("No rules found in file: %s", bazelFile.RelPath) return iss, nil } - log.Debugf("Found %d rules in file: %s", len(found), bazelFile.RelPath) + log.Debug("Found %d rules in file: %s", len(found), bazelFile.RelPath) for _, rule := range found { changedRule, ruleIssues := upgradeRule(ctx, mirrorUpload, rule, log) if len(ruleIssues) > 0 { @@ -108,11 +108,11 @@ func upgradeBazelFile(ctx context.Context, fileHelper *bazelfiles.Helper, mirror } if len(iss) > 0 { - log.Warnf("File %s has issues. Not saving!", bazelFile.RelPath) + log.Warn("File %s has issues. Not saving!", bazelFile.RelPath) return iss, nil } if !changed { - log.Debugf("No changes to file: %s", bazelFile.RelPath) + log.Debug("No changes to file: %s", bazelFile.RelPath) return iss, nil } if dryRun { @@ -120,10 +120,10 @@ func upgradeBazelFile(ctx context.Context, fileHelper *bazelfiles.Helper, mirror if err != nil { return iss, err } - log.Infof("Dry run: would save updated file %s with diff:\n%s", bazelFile.RelPath, diff) + log.Info("Dry run: would save updated file %s with diff:\n%s", bazelFile.RelPath, diff) return iss, nil } - log.Infof("Saving updated file: %s", bazelFile.RelPath) + log.Info("Saving updated file: %s", bazelFile.RelPath) if err := fileHelper.WriteFile(bazelFile, buildfile); err != nil { return iss, err } @@ -131,12 +131,12 @@ func upgradeBazelFile(ctx context.Context, fileHelper *bazelfiles.Helper, mirror return iss, nil } -func upgradeRule(ctx context.Context, mirrorUpload mirrorUploader, rule *build.Rule, log *logger.Logger) (changed bool, iss []error) { - log.Debugf("Upgrading rule: %s", rule.Name()) +func upgradeRule(ctx context.Context, mirrorUpload mirrorUploader, rule *build.Rule, log *slog.Logger) (changed bool, iss []error) { + log.Debug("Upgrading rule: %s", rule.Name()) upstreamURLs, err := rules.UpstreamURLs(rule) if errors.Is(err, rules.ErrNoUpstreamURL) { - log.Debugf("Rule has no upstream URL. Skipping.") + log.Debug("Rule has no upstream URL. Skipping.") return false, nil } else if err != nil { iss = append(iss, err) @@ -152,7 +152,7 @@ func upgradeRule(ctx context.Context, mirrorUpload mirrorUploader, rule *build.R existingHash, err := rules.GetHash(rule) if err == nil && learnedHash == existingHash { - log.Debugf("Rule already upgraded. Skipping.") + log.Debug("Rule already upgraded. Skipping.") return false, nil } @@ -177,7 +177,7 @@ type upgradeFlags struct { region string bucket string mirrorBaseURL string - logLevel zapcore.Level + logLevel slog.Level } func parseUpgradeFlags(cmd *cobra.Command) (upgradeFlags, error) { @@ -193,9 +193,9 @@ func parseUpgradeFlags(cmd *cobra.Command) (upgradeFlags, error) { if err != nil { return upgradeFlags{}, err } - logLevel := zapcore.InfoLevel + logLevel := slog.LevelInfo if verbose { - logLevel = zapcore.DebugLevel + logLevel = slog.LevelDebug } region, err := cmd.Flags().GetString("region") if err != nil { diff --git a/hack/cli-k8s-compatibility/main.go b/hack/cli-k8s-compatibility/main.go index 616c6fd984..d72f9cf53d 100644 --- a/hack/cli-k8s-compatibility/main.go +++ b/hack/cli-k8s-compatibility/main.go @@ -8,8 +8,10 @@ SPDX-License-Identifier: AGPL-3.0-only package main import ( - "context" - "flag" + "context" + "flag" + "log/slog" + "os" "github.com/edgelesssys/constellation/v2/internal/api/versionsapi" "github.com/edgelesssys/constellation/v2/internal/constants" @@ -19,36 +21,39 @@ import ( ) var ( - refFlag = flag.String("ref", "", "the reference name of the image") - streamFlag = flag.String("stream", "", "the stream name of the image") - versionFlag = flag.String("version", "", "the version of the image") + refFlag = flag.String("ref", "", "the reference name of the image") + streamFlag = flag.String("stream", "", "the stream name of the image") + versionFlag = flag.String("version", "", "the version of the image") ) func main() { - log := logger.New(logger.PlainLog, zapcore.DebugLevel) - ctx := context.Background() + log := slog.New(slog.NewTextHandler(os.Stdout, &slog.HandlerOptions{Level: slog.LevelDebug})) + ctx := context.Background() - flag.Parse() - if *refFlag == "" { - log.Fatalf("ref must be set") - } - if *streamFlag == "" { - log.Fatalf("stream must be set") - } - if *versionFlag == "" { - log.Fatalf("version must be set") - } + flag.Parse() + if *refFlag == "" { + log.Error("ref must be set") + os.Exit(1) + } + if *streamFlag == "" { + log.Error("stream must be set") + os.Exit(1) + } + if *versionFlag == "" { + log.Error("version must be set") + os.Exit(1) + } - cliInfo := versionsapi.CLIInfo{ - Ref: *refFlag, - Stream: *streamFlag, - Version: *versionFlag, - Kubernetes: []string{}, - } + cliInfo := versionsapi.CLIInfo{ + Ref: *refFlag, + Stream: *streamFlag, + Version: *versionFlag, + Kubernetes: []string{}, + } - for _, v := range versions.VersionConfigs { - cliInfo.Kubernetes = append(cliInfo.Kubernetes, v.ClusterVersion) - } + for _, v := range versions.VersionConfigs { + cliInfo.Kubernetes = append(cliInfo.Kubernetes, v.ClusterVersion) + } c, cclose, err := versionsapi.NewClient(ctx, "eu-central-1", "cdn-constellation-backend", constants.CDNDefaultDistributionID, false, log) if err != nil { @@ -60,7 +65,8 @@ func main() { } }() - if err := c.UpdateCLIInfo(ctx, cliInfo); err != nil { - log.Fatalf("updating cli info: %w", err) - } + if err := c.UpdateCLIInfo(ctx, cliInfo); err != nil { + log.Error("updating cli info: %w", err) + os.Exit(1) + } } diff --git a/hack/oci-pin/codegen.go b/hack/oci-pin/codegen.go index 4c8f9fafc7..ed6c927b5e 100644 --- a/hack/oci-pin/codegen.go +++ b/hack/oci-pin/codegen.go @@ -8,15 +8,14 @@ package main import ( "fmt" "io" + "log/slog" "os" "path/filepath" "strings" "github.com/edgelesssys/constellation/v2/hack/oci-pin/internal/extract" "github.com/edgelesssys/constellation/v2/hack/oci-pin/internal/inject" - "github.com/edgelesssys/constellation/v2/internal/logger" "github.com/spf13/cobra" - "go.uber.org/zap/zapcore" ) func newCodegenCmd() *cobra.Command { @@ -44,15 +43,15 @@ func runCodegen(cmd *cobra.Command, _ []string) error { if err != nil { return err } - log := logger.New(logger.PlainLog, flags.logLevel) - log.Debugf("Parsed flags: %+v", flags) + log := slog.New(slog.NewTextHandler(os.Stdout, &slog.HandlerOptions{Level: flags.logLevel})) + log.Debug("Parsed flags: %+v", flags) registry, prefix, name, tag, err := splitRepoTag(flags.imageRepoTag) if err != nil { return fmt.Errorf("splitting OCI image reference %q: %w", flags.imageRepoTag, err) } - log.Debugf("Generating Go code for OCI image %s.", name) + log.Debug("Generating Go code for OCI image %s.", name) ociIndexPath := filepath.Join(flags.ociPath, "index.json") index, err := os.Open(ociIndexPath) @@ -78,7 +77,7 @@ func runCodegen(cmd *cobra.Command, _ []string) error { return err } - log.Debugf("OCI image digest: %s", digest) + log.Debug("OCI image digest: %s", digest) if err := inject.Render(out, inject.PinningValues{ Package: flags.pkg, @@ -92,7 +91,7 @@ func runCodegen(cmd *cobra.Command, _ []string) error { return fmt.Errorf("rendering Go code: %w", err) } - log.Debugf("Go code created at %q 🤖", flags.output) + log.Debug("Go code created at %q 🤖", flags.output) return nil } @@ -102,7 +101,7 @@ type codegenFlags struct { pkg string identifier string imageRepoTag string - logLevel zapcore.Level + logLevel slog.Level } func parseCodegenFlags(cmd *cobra.Command) (codegenFlags, error) { @@ -137,9 +136,9 @@ func parseCodegenFlags(cmd *cobra.Command) (codegenFlags, error) { if err != nil { return codegenFlags{}, err } - logLevel := zapcore.InfoLevel + logLevel := slog.LevelInfo if verbose { - logLevel = zapcore.DebugLevel + logLevel = slog.LevelDebug } return codegenFlags{ diff --git a/hack/oci-pin/merge.go b/hack/oci-pin/merge.go index 081ea6a6f2..281d4ce197 100644 --- a/hack/oci-pin/merge.go +++ b/hack/oci-pin/merge.go @@ -8,12 +8,11 @@ package main import ( "fmt" "io" + "log/slog" "os" "github.com/edgelesssys/constellation/v2/hack/oci-pin/internal/sums" - "github.com/edgelesssys/constellation/v2/internal/logger" "github.com/spf13/cobra" - "go.uber.org/zap/zapcore" ) func newMergeCmd() *cobra.Command { @@ -35,10 +34,10 @@ func runMerge(cmd *cobra.Command, _ []string) error { if err != nil { return err } - log := logger.New(logger.PlainLog, flags.logLevel) - log.Debugf("Parsed flags: %+v", flags) + log := slog.New(slog.NewTextHandler(os.Stdout, &slog.HandlerOptions{Level: flags.logLevel})) + log.Debug("Parsed flags: %+v", flags) - log.Debugf("Merging sum file from %q into %q.", flags.inputs, flags.output) + log.Debug("Merging sum file from %q into %q.", flags.inputs, flags.output) var out io.Writer if flags.output == "-" { @@ -61,7 +60,7 @@ func runMerge(cmd *cobra.Command, _ []string) error { return fmt.Errorf("creating merged sum file: %w", err) } - log.Debugf("Sum file created at %q 🤖", flags.output) + log.Debug("Sum file created at %q 🤖", flags.output) return nil } @@ -93,7 +92,7 @@ func parseInput(input string) ([]sums.PinnedImageReference, error) { type mergeFlags struct { inputs []string output string - logLevel zapcore.Level + logLevel slog.Level } func parseMergeFlags(cmd *cobra.Command) (mergeFlags, error) { @@ -109,9 +108,9 @@ func parseMergeFlags(cmd *cobra.Command) (mergeFlags, error) { if err != nil { return mergeFlags{}, err } - logLevel := zapcore.InfoLevel + logLevel := slog.LevelInfo if verbose { - logLevel = zapcore.DebugLevel + logLevel = slog.LevelDebug } return mergeFlags{ diff --git a/hack/oci-pin/sum.go b/hack/oci-pin/sum.go index 2268ae3587..0eca8f9e03 100644 --- a/hack/oci-pin/sum.go +++ b/hack/oci-pin/sum.go @@ -8,15 +8,14 @@ package main import ( "fmt" "io" + "log/slog" "os" "path/filepath" "strings" "github.com/edgelesssys/constellation/v2/hack/oci-pin/internal/extract" "github.com/edgelesssys/constellation/v2/hack/oci-pin/internal/sums" - "github.com/edgelesssys/constellation/v2/internal/logger" "github.com/spf13/cobra" - "go.uber.org/zap/zapcore" ) func newSumCmd() *cobra.Command { @@ -41,15 +40,15 @@ func runSum(cmd *cobra.Command, _ []string) error { if err != nil { return err } - log := logger.New(logger.PlainLog, flags.logLevel) - log.Debugf("Parsed flags: %+v", flags) + log := slog.New(slog.NewTextHandler(os.Stdout, &slog.HandlerOptions{Level: flags.logLevel})) + log.Debug(fmt.Sprintf("Parsed flags: %+v", flags)) registry, prefix, name, tag, err := splitRepoTag(flags.imageRepoTag) if err != nil { return fmt.Errorf("splitting repo tag: %w", err) } - log.Debugf("Generating sum file for OCI image %s.", name) + log.Debug(fmt.Sprintf("Generating sum file for OCI image %s.", name)) ociIndexPath := filepath.Join(flags.ociPath, "index.json") index, err := os.Open(ociIndexPath) @@ -75,7 +74,7 @@ func runSum(cmd *cobra.Command, _ []string) error { return fmt.Errorf("extracting OCI image digest: %w", err) } - log.Debugf("OCI image digest: %s", digest) + log.Debug(fmt.Sprintf("OCI image digest: %s", digest)) refs := []sums.PinnedImageReference{ { @@ -91,7 +90,7 @@ func runSum(cmd *cobra.Command, _ []string) error { return fmt.Errorf("creating sum file: %w", err) } - log.Debugf("Sum file created at %q 🤖", flags.output) + log.Debug(fmt.Sprintf("Sum file created at %q 🤖", flags.output)) return nil } @@ -99,7 +98,7 @@ type sumFlags struct { ociPath string output string imageRepoTag string - logLevel zapcore.Level + logLevel slog.Level } func parseSumFlags(cmd *cobra.Command) (sumFlags, error) { @@ -126,9 +125,9 @@ func parseSumFlags(cmd *cobra.Command) (sumFlags, error) { if err != nil { return sumFlags{}, err } - logLevel := zapcore.InfoLevel + logLevel := slog.LevelInfo if verbose { - logLevel = zapcore.DebugLevel + logLevel = slog.LevelDebug } return sumFlags{ diff --git a/hack/qemu-metadata-api/main.go b/hack/qemu-metadata-api/main.go index bec42e2c7c..0cbc0213cf 100644 --- a/hack/qemu-metadata-api/main.go +++ b/hack/qemu-metadata-api/main.go @@ -9,33 +9,34 @@ SPDX-License-Identifier: AGPL-3.0-only package main import ( - "flag" - - "github.com/edgelesssys/constellation/v2/hack/qemu-metadata-api/server" - "github.com/edgelesssys/constellation/v2/hack/qemu-metadata-api/virtwrapper" - "github.com/edgelesssys/constellation/v2/internal/logger" - "go.uber.org/zap" - "go.uber.org/zap/zapcore" - "libvirt.org/go/libvirt" + "flag" + "log/slog" + "os" + + "github.com/edgelesssys/constellation/v2/hack/qemu-metadata-api/server" + "github.com/edgelesssys/constellation/v2/hack/qemu-metadata-api/virtwrapper" + "libvirt.org/go/libvirt" ) func main() { - bindPort := flag.String("port", "8080", "Port to bind to") - targetNetwork := flag.String("network", "constellation-network", "Name of the network in QEMU to use") - libvirtURI := flag.String("libvirt-uri", "qemu:///system", "URI of the libvirt connection") - initSecretHash := flag.String("initsecrethash", "", "brcypt hash of the init secret") - flag.Parse() - - log := logger.New(logger.JSONLog, zapcore.InfoLevel) - - conn, err := libvirt.NewConnect(*libvirtURI) - if err != nil { - log.With(zap.Error(err)).Fatalf("Failed to connect to libvirt") - } - defer conn.Close() - - serv := server.New(log, *targetNetwork, *initSecretHash, &virtwrapper.Connect{Conn: conn}) - if err := serv.ListenAndServe(*bindPort); err != nil { - log.With(zap.Error(err)).Fatalf("Failed to serve") - } + bindPort := flag.String("port", "8080", "Port to bind to") + targetNetwork := flag.String("network", "constellation-network", "Name of the network in QEMU to use") + libvirtURI := flag.String("libvirt-uri", "qemu:///system", "URI of the libvirt connection") + initSecretHash := flag.String("initsecrethash", "", "brcypt hash of the init secret") + flag.Parse() + + log := slog.New(slog.NewJSONHandler(os.Stdout, &slog.HandlerOptions{Level: slog.LevelInfo})) + + conn, err := libvirt.NewConnect(*libvirtURI) + if err != nil { + log.With(slog.Any("error", err)).Error("Failed to connect to libvirt") + os.Exit(1) + } + defer conn.Close() + + serv := server.New(log, *targetNetwork, *initSecretHash, &virtwrapper.Connect{Conn: conn}) + if err := serv.ListenAndServe(*bindPort); err != nil { + log.With(slog.Any("error", err)).Error("Failed to serve") + os.Exit(1) + } } diff --git a/hack/qemu-metadata-api/server/server.go b/hack/qemu-metadata-api/server/server.go index 93d6f3a044..61c524d03b 100644 --- a/hack/qemu-metadata-api/server/server.go +++ b/hack/qemu-metadata-api/server/server.go @@ -9,27 +9,27 @@ package server import ( "encoding/json" "fmt" + "io" + "log/slog" "net" "net/http" "strings" "github.com/edgelesssys/constellation/v2/hack/qemu-metadata-api/virtwrapper" "github.com/edgelesssys/constellation/v2/internal/cloud/metadata" - "github.com/edgelesssys/constellation/v2/internal/logger" "github.com/edgelesssys/constellation/v2/internal/role" - "go.uber.org/zap" ) // Server that provides QEMU metadata. type Server struct { - log *logger.Logger + log *slog.Logger virt virConnect network string initSecretHashVal []byte } // New creates a new Server. -func New(log *logger.Logger, network, initSecretHash string, conn virConnect) *Server { +func New(log *slog.Logger, network, initSecretHash string, conn virConnect) *Server { return &Server{ log: log, virt: conn, @@ -55,25 +55,25 @@ func (s *Server) ListenAndServe(port string) error { return err } - s.log.Infof("Starting QEMU metadata API on %s", lis.Addr()) + s.log.Info("Starting QEMU metadata API on %s", lis.Addr()) return server.Serve(lis) } // listSelf returns peer information about the instance issuing the request. func (s *Server) listSelf(w http.ResponseWriter, r *http.Request) { - log := s.log.With(zap.String("peer", r.RemoteAddr)) - log.Infof("Serving GET request for /self") + log := s.log.With(slog.String("peer", r.RemoteAddr)) + log.Info("Serving GET request for /self") remoteIP, _, err := net.SplitHostPort(r.RemoteAddr) if err != nil { - log.With(zap.Error(err)).Errorf("Failed to parse remote address") + log.With(slog.Any("error", err)).Error("Failed to parse remote address") http.Error(w, fmt.Sprintf("Failed to parse remote address: %s\n", err), http.StatusInternalServerError) return } peers, err := s.listAll() if err != nil { - log.With(zap.Error(err)).Errorf("Failed to list peer metadata") + log.With(slog.Any("error", err)).Error("Failed to list peer metadata") http.Error(w, err.Error(), http.StatusInternalServerError) return } @@ -85,23 +85,23 @@ func (s *Server) listSelf(w http.ResponseWriter, r *http.Request) { http.Error(w, err.Error(), http.StatusInternalServerError) return } - log.Infof("Request successful") + log.Info("Request successful") return } } - log.Errorf("Failed to find peer in active leases") + log.Error("Failed to find peer in active leases") http.Error(w, "No matching peer found", http.StatusNotFound) } // listPeers returns a list of all active peers. func (s *Server) listPeers(w http.ResponseWriter, r *http.Request) { - log := s.log.With(zap.String("peer", r.RemoteAddr)) - log.Infof("Serving GET request for /peers") + log := s.log.With(slog.String("peer", r.RemoteAddr)) + log.Info("Serving GET request for /peers") peers, err := s.listAll() if err != nil { - log.With(zap.Error(err)).Errorf("Failed to list peer metadata") + log.With(slog.Any("error", err)).Error("Failed to list peer metadata") http.Error(w, err.Error(), http.StatusInternalServerError) return } @@ -111,38 +111,38 @@ func (s *Server) listPeers(w http.ResponseWriter, r *http.Request) { http.Error(w, err.Error(), http.StatusInternalServerError) return } - log.Infof("Request successful") + log.Info("Request successful") } // initSecretHash returns the hash of the init secret. func (s *Server) initSecretHash(w http.ResponseWriter, r *http.Request) { - log := s.log.With(zap.String("initSecretHash", r.RemoteAddr)) + log := s.log.With(slog.String("initSecretHash", r.RemoteAddr)) if r.Method != http.MethodGet { - log.With(zap.String("method", r.Method)).Errorf("Invalid method for /initSecretHash") + log.With(slog.String("method", r.Method)).Error("Invalid method for /initSecretHash") http.Error(w, "Method not allowed", http.StatusMethodNotAllowed) return } - log.Infof("Serving GET request for /initsecrethash") + log.Info("Serving GET request for /initsecrethash") w.Header().Set("Content-Type", "text/plain") _, err := w.Write(s.initSecretHashVal) if err != nil { - log.With(zap.Error(err)).Errorf("Failed to write init secret hash") + log.With(slog.Any("error", err)).Error("Failed to write init secret hash") http.Error(w, err.Error(), http.StatusInternalServerError) return } - log.Infof("Request successful") + log.Info("Request successful") } // getEndpoint returns the IP address of the first control-plane instance. // This allows us to fake a load balancer for QEMU instances. func (s *Server) getEndpoint(w http.ResponseWriter, r *http.Request) { - log := s.log.With(zap.String("peer", r.RemoteAddr)) - log.Infof("Serving GET request for /endpoint") + log := s.log.With(slog.String("peer", r.RemoteAddr)) + log.Info("Serving GET request for /endpoint") net, err := s.virt.LookupNetworkByName(s.network) if err != nil { - log.With(zap.Error(err)).Errorf("Failed to lookup network") + log.With(slog.Any("error", err)).Error("Failed to lookup network") http.Error(w, err.Error(), http.StatusInternalServerError) return } @@ -150,7 +150,7 @@ func (s *Server) getEndpoint(w http.ResponseWriter, r *http.Request) { leases, err := net.GetDHCPLeases() if err != nil { - log.With(zap.Error(err)).Errorf("Failed to get DHCP leases") + log.With(slog.Any("error", err)).Error("Failed to get DHCP leases") http.Error(w, err.Error(), http.StatusInternalServerError) } @@ -162,12 +162,12 @@ func (s *Server) getEndpoint(w http.ResponseWriter, r *http.Request) { http.Error(w, err.Error(), http.StatusInternalServerError) return } - log.Infof("Request successful") + log.Info("Request successful") return } } - log.Errorf("Failed to find control-plane peer in active leases") + log.Error("Failed to find control-plane peer in active leases") http.Error(w, "No matching peer found", http.StatusNotFound) } diff --git a/hack/qemu-metadata-api/server/server_test.go b/hack/qemu-metadata-api/server/server_test.go index 3b04d214d8..71674ac2c9 100644 --- a/hack/qemu-metadata-api/server/server_test.go +++ b/hack/qemu-metadata-api/server/server_test.go @@ -11,6 +11,7 @@ import ( "encoding/json" "errors" "io" + "log/slog" "net/http" "net/http/httptest" "testing" @@ -67,7 +68,7 @@ func TestListAll(t *testing.T) { t.Run(name, func(t *testing.T) { assert := assert.New(t) - server := New(logger.NewTest(t), "test", "initSecretHash", tc.connect) + server := New(slog.New(slog.NewTextHandler(logger.TestWriter{T: t}, nil)), "test", "initSecretHash", tc.connect) res, err := server.listAll() @@ -138,7 +139,7 @@ func TestListSelf(t *testing.T) { assert := assert.New(t) require := require.New(t) - server := New(logger.NewTest(t), "test", "initSecretHash", tc.connect) + server := New(slog.New(slog.NewTextHandler(logger.TestWriter{T: t}, nil)), "test", "initSecretHash", tc.connect) req, err := http.NewRequestWithContext(context.Background(), http.MethodGet, "http://192.0.0.1/self", nil) require.NoError(err) @@ -198,7 +199,7 @@ func TestListPeers(t *testing.T) { assert := assert.New(t) require := require.New(t) - server := New(logger.NewTest(t), "test", "initSecretHash", tc.connect) + server := New(slog.New(slog.NewTextHandler(logger.TestWriter{T: t}, nil)), "test", "initSecretHash", tc.connect) req, err := http.NewRequestWithContext(context.Background(), http.MethodGet, "http://192.0.0.1/peers", nil) require.NoError(err) @@ -253,7 +254,7 @@ func TestInitSecretHash(t *testing.T) { assert := assert.New(t) require := require.New(t) - server := New(logger.NewTest(t), "test", tc.wantHash, defaultConnect) + server := New(slog.New(slog.NewTextHandler(logger.TestWriter{T: t}, nil)), "test", tc.wantHash, defaultConnect) req, err := http.NewRequestWithContext(context.Background(), tc.method, "http://192.0.0.1/initsecrethash", nil) require.NoError(err) diff --git a/image/upload/internal/cmd/aws.go b/image/upload/internal/cmd/aws.go new file mode 100644 index 0000000000..1d196ce84b --- /dev/null +++ b/image/upload/internal/cmd/aws.go @@ -0,0 +1,106 @@ +/* +Copyright (c) Edgeless Systems GmbH + +SPDX-License-Identifier: AGPL-3.0-only +*/ + +package cmd + +import ( + "fmt" + "io" + "log/slog" + "os" + + "github.com/edgelesssys/constellation/v2/internal/osimage" + "github.com/edgelesssys/constellation/v2/internal/osimage/archive" + awsupload "github.com/edgelesssys/constellation/v2/internal/osimage/aws" + "github.com/spf13/cobra" +) + +// newAWSCmd returns the command that uploads an OS image to AWS. +func newAWSCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "aws", + Short: "Upload OS image to AWS", + Long: "Upload OS image to AWS.", + Args: cobra.ExactArgs(0), + RunE: runAWS, + } + + cmd.Flags().String("aws-region", "eu-central-1", "AWS region used during AMI creation") + cmd.Flags().String("aws-bucket", "constellation-images", "S3 bucket used during AMI creation") + return cmd +} + +func runAWS(cmd *cobra.Command, _ []string) error { + workdir := os.Getenv("BUILD_WORKING_DIRECTORY") + if len(workdir) > 0 { + must(os.Chdir(workdir)) + } + + flags, err := parseAWSFlags(cmd) + if err != nil { + return err + } + log := slog.New(slog.NewTextHandler(os.Stdout, &slog.HandlerOptions{Level: flags.logLevel})) + log.Debug("Parsed flags: %+v", flags) + + archiveC, archiveCClose, err := archive.New(cmd.Context(), flags.region, flags.bucket, flags.distributionID, log) + if err != nil { + return err + } + defer func() { + if err := archiveCClose(cmd.Context()); err != nil { + log.Error("closing archive client: %v", err) + } + }() + + uploadC, err := awsupload.New(flags.awsRegion, flags.awsBucket, log) + if err != nil { + return fmt.Errorf("uploading image: %w", err) + } + + file, err := os.Open(flags.rawImage) + if err != nil { + return fmt.Errorf("uploading image: opening image file %w", err) + } + defer file.Close() + size, err := file.Seek(0, io.SeekEnd) + if err != nil { + return err + } + if _, err := file.Seek(0, io.SeekStart); err != nil { + return err + } + out := cmd.OutOrStdout() + if len(flags.out) > 0 { + outF, err := os.Create(flags.out) + if err != nil { + return fmt.Errorf("uploading image: opening output file %w", err) + } + defer outF.Close() + out = outF + } + + uploadReq := &osimage.UploadRequest{ + Provider: flags.provider, + Version: flags.version, + AttestationVariant: flags.attestationVariant, + SecureBoot: flags.secureBoot, + Size: size, + Timestamp: flags.timestamp, + Image: file, + } + + if flags.secureBoot { + sbDatabase, uefiVarStore, err := loadSecureBootKeys(flags.pki) + if err != nil { + return err + } + uploadReq.SBDatabase = sbDatabase + uploadReq.UEFIVarStore = uefiVarStore + } + + return uploadImage(cmd.Context(), archiveC, uploadC, uploadReq, out) +} diff --git a/image/upload/internal/cmd/azure.go b/image/upload/internal/cmd/azure.go new file mode 100644 index 0000000000..7b29f16d0e --- /dev/null +++ b/image/upload/internal/cmd/azure.go @@ -0,0 +1,107 @@ +/* +Copyright (c) Edgeless Systems GmbH + +SPDX-License-Identifier: AGPL-3.0-only +*/ + +package cmd + +import ( + "fmt" + "io" + "log/slog" + "os" + + "github.com/edgelesssys/constellation/v2/internal/osimage" + "github.com/edgelesssys/constellation/v2/internal/osimage/archive" + azureupload "github.com/edgelesssys/constellation/v2/internal/osimage/azure" + "github.com/spf13/cobra" +) + +// newAzureCmd returns the command that uploads an OS image to Azure. +func newAzureCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "azure", + Short: "Upload OS image to Azure", + Long: "Upload OS image to Azure.", + Args: cobra.ExactArgs(0), + RunE: runAzure, + } + + cmd.Flags().String("az-subscription", "0d202bbb-4fa7-4af8-8125-58c269a05435", "Azure subscription to use") + cmd.Flags().String("az-location", "northeurope", "Azure location to use") + cmd.Flags().String("az-resource-group", "constellation-images", "Azure resource group to use") + return cmd +} + +func runAzure(cmd *cobra.Command, _ []string) error { + workdir := os.Getenv("BUILD_WORKING_DIRECTORY") + if len(workdir) > 0 { + must(os.Chdir(workdir)) + } + + flags, err := parseAzureFlags(cmd) + if err != nil { + return err + } + log := slog.New(slog.NewTextHandler(os.Stdout, &slog.HandlerOptions{Level: flags.logLevel})) + log.Debug("Parsed flags: %+v", flags) + + archiveC, archiveCClose, err := archive.New(cmd.Context(), flags.region, flags.bucket, flags.distributionID, log) + if err != nil { + return err + } + defer func() { + if err := archiveCClose(cmd.Context()); err != nil { + log.Error("closing archive client: %v", err) + } + }() + + uploadC, err := azureupload.New(flags.azSubscription, flags.azLocation, flags.azResourceGroup, log) + if err != nil { + return fmt.Errorf("uploading image: %w", err) + } + + file, err := os.Open(flags.rawImage) + if err != nil { + return fmt.Errorf("uploading image: opening image file %w", err) + } + defer file.Close() + size, err := file.Seek(0, io.SeekEnd) + if err != nil { + return err + } + if _, err := file.Seek(0, io.SeekStart); err != nil { + return err + } + out := cmd.OutOrStdout() + if len(flags.out) > 0 { + outF, err := os.Create(flags.out) + if err != nil { + return fmt.Errorf("uploading image: opening output file %w", err) + } + defer outF.Close() + out = outF + } + + uploadReq := &osimage.UploadRequest{ + Provider: flags.provider, + Version: flags.version, + AttestationVariant: flags.attestationVariant, + SecureBoot: flags.secureBoot, + Size: size, + Timestamp: flags.timestamp, + Image: file, + } + + if flags.secureBoot { + sbDatabase, uefiVarStore, err := loadSecureBootKeys(flags.pki) + if err != nil { + return err + } + uploadReq.SBDatabase = sbDatabase + uploadReq.UEFIVarStore = uefiVarStore + } + + return uploadImage(cmd.Context(), archiveC, uploadC, uploadReq, out) +} diff --git a/image/upload/internal/cmd/flags.go b/image/upload/internal/cmd/flags.go index 24bbaf455c..01a3c5202d 100644 --- a/image/upload/internal/cmd/flags.go +++ b/image/upload/internal/cmd/flags.go @@ -8,20 +8,21 @@ package cmd import ( "errors" + "log/slog" + "os" "path/filepath" "strings" "github.com/edgelesssys/constellation/v2/internal/api/versionsapi" "github.com/edgelesssys/constellation/v2/internal/cloud/cloudprovider" "github.com/spf13/cobra" - "go.uber.org/zap/zapcore" ) type s3Flags struct { region string bucket string distributionID string - logLevel zapcore.Level + logLevel slog.Level } func parseS3Flags(cmd *cobra.Command) (s3Flags, error) { @@ -41,9 +42,9 @@ func parseS3Flags(cmd *cobra.Command) (s3Flags, error) { if err != nil { return s3Flags{}, err } - logLevel := zapcore.InfoLevel + logLevel := slog.LevelInfo if verbose { - logLevel = zapcore.DebugLevel + logLevel = slog.LevelDebug } return s3Flags{ @@ -84,7 +85,7 @@ func parseUploadMeasurementsFlags(cmd *cobra.Command) (measurementsFlags, error) type mergeMeasurementsFlags struct { out string - logLevel zapcore.Level + logLevel slog.Level } func parseMergeMeasurementsFlags(cmd *cobra.Command) (mergeMeasurementsFlags, error) { @@ -96,9 +97,9 @@ func parseMergeMeasurementsFlags(cmd *cobra.Command) (mergeMeasurementsFlags, er if err != nil { return mergeMeasurementsFlags{}, err } - logLevel := zapcore.InfoLevel + logLevel := slog.LevelInfo if verbose { - logLevel = zapcore.DebugLevel + logLevel = slog.LevelDebug } return mergeMeasurementsFlags{ @@ -112,7 +113,7 @@ type envelopeMeasurementsFlags struct { csp cloudprovider.Provider attestationVariant string in, out string - logLevel zapcore.Level + logLevel slog.Level } func parseEnvelopeMeasurementsFlags(cmd *cobra.Command) (envelopeMeasurementsFlags, error) { @@ -148,9 +149,9 @@ func parseEnvelopeMeasurementsFlags(cmd *cobra.Command) (envelopeMeasurementsFla if err != nil { return envelopeMeasurementsFlags{}, err } - logLevel := zapcore.InfoLevel + logLevel := slog.LevelInfo if verbose { - logLevel = zapcore.DebugLevel + logLevel = slog.LevelDebug } return envelopeMeasurementsFlags{ diff --git a/image/upload/internal/cmd/gcp.go b/image/upload/internal/cmd/gcp.go new file mode 100644 index 0000000000..e903788459 --- /dev/null +++ b/image/upload/internal/cmd/gcp.go @@ -0,0 +1,107 @@ +/* +Copyright (c) Edgeless Systems GmbH + +SPDX-License-Identifier: AGPL-3.0-only +*/ + +package cmd + +import ( + "fmt" + "io" + "log/slog" + "os" + + "github.com/edgelesssys/constellation/v2/internal/osimage" + "github.com/edgelesssys/constellation/v2/internal/osimage/archive" + gcpupload "github.com/edgelesssys/constellation/v2/internal/osimage/gcp" + "github.com/spf13/cobra" +) + +// newGCPCommand returns the command that uploads an OS image to GCP. +func newGCPCommand() *cobra.Command { + cmd := &cobra.Command{ + Use: "gcp", + Short: "Upload OS image to GCP", + Long: "Upload OS image to GCP.", + Args: cobra.ExactArgs(0), + RunE: runGCP, + } + + cmd.Flags().String("gcp-project", "constellation-images", "GCP project to use") + cmd.Flags().String("gcp-location", "europe-west3", "GCP location to use") + cmd.Flags().String("gcp-bucket", "constellation-os-images", "GCP bucket to use") + return cmd +} + +func runGCP(cmd *cobra.Command, _ []string) error { + workdir := os.Getenv("BUILD_WORKING_DIRECTORY") + if len(workdir) > 0 { + must(os.Chdir(workdir)) + } + + flags, err := parseGCPFlags(cmd) + if err != nil { + return err + } + log := slog.New(slog.NewTextHandler(os.Stdout, &slog.HandlerOptions{Level: flags.logLevel})) + log.Debug("Parsed flags: %+v", flags) + + archiveC, archiveCClose, err := archive.New(cmd.Context(), flags.region, flags.bucket, flags.distributionID, log) + if err != nil { + return err + } + defer func() { + if err := archiveCClose(cmd.Context()); err != nil { + log.Error("closing archive client: %v", err) + } + }() + + uploadC, err := gcpupload.New(cmd.Context(), flags.gcpProject, flags.gcpLocation, flags.gcpBucket, log) + if err != nil { + return fmt.Errorf("uploading image: %w", err) + } + + file, err := os.Open(flags.rawImage) + if err != nil { + return fmt.Errorf("uploading image: opening image file %w", err) + } + defer file.Close() + size, err := file.Seek(0, io.SeekEnd) + if err != nil { + return err + } + if _, err := file.Seek(0, io.SeekStart); err != nil { + return err + } + out := cmd.OutOrStdout() + if len(flags.out) > 0 { + outF, err := os.Create(flags.out) + if err != nil { + return fmt.Errorf("uploading image: opening output file %w", err) + } + defer outF.Close() + out = outF + } + + uploadReq := &osimage.UploadRequest{ + Provider: flags.provider, + Version: flags.version, + AttestationVariant: flags.attestationVariant, + SecureBoot: flags.secureBoot, + Size: size, + Timestamp: flags.timestamp, + Image: file, + } + + if flags.secureBoot { + sbDatabase, uefiVarStore, err := loadSecureBootKeys(flags.pki) + if err != nil { + return err + } + uploadReq.SBDatabase = sbDatabase + uploadReq.UEFIVarStore = uefiVarStore + } + + return uploadImage(cmd.Context(), archiveC, uploadC, uploadReq, out) +} diff --git a/image/upload/internal/cmd/info.go b/image/upload/internal/cmd/info.go index c837c6a03e..c6de5421ac 100644 --- a/image/upload/internal/cmd/info.go +++ b/image/upload/internal/cmd/info.go @@ -9,6 +9,7 @@ package cmd import ( "encoding/json" "fmt" + "log/slog" "os" "github.com/edgelesssys/constellation/v2/internal/api/versionsapi" @@ -49,8 +50,8 @@ func runInfo(cmd *cobra.Command, args []string) error { return err } - log := logger.New(logger.PlainLog, flags.logLevel) - log.Debugf("Parsed flags: %+v", flags) + log := slog.New(slog.NewTextHandler(os.Stdout, &slog.HandlerOptions{Level: flags.logLevel})) + log.Debug("Parsed flags: %+v", flags) info, err := readInfoArgs(args) if err != nil { return err @@ -62,7 +63,7 @@ func runInfo(cmd *cobra.Command, args []string) error { } defer func() { if err := uploadCClose(cmd.Context()); err != nil { - log.Errorf("closing upload client: %v", err) + log.Error("closing upload client: %v", err) } }() @@ -70,7 +71,7 @@ func runInfo(cmd *cobra.Command, args []string) error { if err != nil { return fmt.Errorf("uploading image info: %w", err) } - log.Infof("Uploaded image info to %s", url) + log.Info("Uploaded image info to %s", url) return nil } diff --git a/image/upload/internal/cmd/measurementsenvelope.go b/image/upload/internal/cmd/measurementsenvelope.go index e7480d1ca4..d31b609737 100644 --- a/image/upload/internal/cmd/measurementsenvelope.go +++ b/image/upload/internal/cmd/measurementsenvelope.go @@ -9,10 +9,10 @@ package cmd import ( "encoding/json" "fmt" + "log/slog" "os" "github.com/edgelesssys/constellation/v2/internal/attestation/measurements" - "github.com/edgelesssys/constellation/v2/internal/logger" "github.com/spf13/cobra" ) @@ -53,8 +53,8 @@ func runEnvelopeMeasurements(cmd *cobra.Command, _ []string) error { return err } - log := logger.New(logger.PlainLog, flags.logLevel) - log.Debugf("Parsed flags: %+v", flags) + log := slog.New(slog.NewTextHandler(os.Stdout, &slog.HandlerOptions{Level: flags.logLevel})) + log.Debug("Parsed flags: %+v", flags) f, err := os.Open(flags.in) if err != nil { @@ -97,7 +97,7 @@ func runEnvelopeMeasurements(cmd *cobra.Command, _ []string) error { if err := json.NewEncoder(out).Encode(enveloped); err != nil { return fmt.Errorf("enveloping measurements: writing output file: %w", err) } - log.Infof("Enveloped image measurements") + log.Info("Enveloped image measurements") return nil } diff --git a/image/upload/internal/cmd/measurementsmerge.go b/image/upload/internal/cmd/measurementsmerge.go index 758f54e5d8..86501d95a2 100644 --- a/image/upload/internal/cmd/measurementsmerge.go +++ b/image/upload/internal/cmd/measurementsmerge.go @@ -9,10 +9,10 @@ package cmd import ( "encoding/json" "fmt" + "log/slog" "os" "github.com/edgelesssys/constellation/v2/internal/attestation/measurements" - "github.com/edgelesssys/constellation/v2/internal/logger" "github.com/spf13/cobra" ) @@ -44,8 +44,8 @@ func runMergeMeasurements(cmd *cobra.Command, args []string) error { return err } - log := logger.New(logger.PlainLog, flags.logLevel) - log.Debugf("Parsed flags: %+v", flags) + log := slog.New(slog.NewTextHandler(os.Stdout, &slog.HandlerOptions{Level: flags.logLevel})) + log.Debug("Parsed flags: %+v", flags) mergedMeasurements, err := readMeasurementsArgs(args) if err != nil { @@ -65,7 +65,7 @@ func runMergeMeasurements(cmd *cobra.Command, args []string) error { if err := json.NewEncoder(out).Encode(mergedMeasurements); err != nil { return fmt.Errorf("merging measurements: writing output file: %w", err) } - log.Infof("Merged image measurements") + log.Info("Merged image measurements") return nil } diff --git a/image/upload/internal/cmd/measurementsupload.go b/image/upload/internal/cmd/measurementsupload.go index e266fda3f5..38724f910c 100644 --- a/image/upload/internal/cmd/measurementsupload.go +++ b/image/upload/internal/cmd/measurementsupload.go @@ -8,6 +8,7 @@ package cmd import ( "fmt" + "log/slog" "os" "github.com/edgelesssys/constellation/v2/internal/constants" @@ -52,8 +53,8 @@ func runMeasurementsUpload(cmd *cobra.Command, _ []string) error { return err } - log := logger.New(logger.PlainLog, flags.logLevel) - log.Debugf("Parsed flags: %+v", flags) + log := slog.New(slog.NewTextHandler(os.Stdout, &slog.HandlerOptions{Level: flags.logLevel})) + log.Debug("Parsed flags: %+v", flags) uploadC, uploadCClose, err := measurementsuploader.New(cmd.Context(), flags.region, flags.bucket, flags.distributionID, log) if err != nil { @@ -61,7 +62,7 @@ func runMeasurementsUpload(cmd *cobra.Command, _ []string) error { } defer func() { if err := uploadCClose(cmd.Context()); err != nil { - log.Errorf("closing upload client: %v", err) + log.Error("closing upload client: %v", err) } }() @@ -80,6 +81,6 @@ func runMeasurementsUpload(cmd *cobra.Command, _ []string) error { if err != nil { return fmt.Errorf("uploading image info: %w", err) } - log.Infof("Uploaded image measurements to %s (and signature to %s)", measurementsURL, signatureURL) + log.Info("Uploaded image measurements to %s (and signature to %s)", measurementsURL, signatureURL) return nil } diff --git a/image/upload/internal/cmd/nop.go b/image/upload/internal/cmd/nop.go new file mode 100644 index 0000000000..9b2cd00519 --- /dev/null +++ b/image/upload/internal/cmd/nop.go @@ -0,0 +1,90 @@ +/* +Copyright (c) Edgeless Systems GmbH + +SPDX-License-Identifier: AGPL-3.0-only +*/ + +package cmd + +import ( + "fmt" + "io" + "log/slog" + "os" + + "github.com/edgelesssys/constellation/v2/internal/cloud/cloudprovider" + "github.com/edgelesssys/constellation/v2/internal/osimage" + "github.com/edgelesssys/constellation/v2/internal/osimage/archive" + nopupload "github.com/edgelesssys/constellation/v2/internal/osimage/nop" + "github.com/spf13/cobra" +) + +func runNOP(cmd *cobra.Command, provider cloudprovider.Provider, _ []string) error { + workdir := os.Getenv("BUILD_WORKING_DIRECTORY") + if len(workdir) > 0 { + must(os.Chdir(workdir)) + } + + flags, err := parseCommonFlags(cmd) + if err != nil { + return err + } + flags.provider = provider + log := slog.New(slog.NewTextHandler(os.Stdout, &slog.HandlerOptions{Level: flags.logLevel})) + log.Debug("Parsed flags: %+v", flags) + + archiveC, archiveCClose, err := archive.New(cmd.Context(), flags.region, flags.bucket, flags.distributionID, log) + if err != nil { + return err + } + defer func() { + if err := archiveCClose(cmd.Context()); err != nil { + log.Error("closing archive client: %v", err) + } + }() + + uploadC := nopupload.New(log) + + file, err := os.Open(flags.rawImage) + if err != nil { + return fmt.Errorf("uploading image: opening image file %w", err) + } + defer file.Close() + size, err := file.Seek(0, io.SeekEnd) + if err != nil { + return err + } + if _, err := file.Seek(0, io.SeekStart); err != nil { + return err + } + out := cmd.OutOrStdout() + if len(flags.out) > 0 { + outF, err := os.Create(flags.out) + if err != nil { + return fmt.Errorf("uploading image: opening output file %w", err) + } + defer outF.Close() + out = outF + } + + uploadReq := &osimage.UploadRequest{ + Provider: flags.provider, + Version: flags.version, + AttestationVariant: flags.attestationVariant, + SecureBoot: flags.secureBoot, + Size: size, + Timestamp: flags.timestamp, + Image: file, + } + + if flags.secureBoot { + sbDatabase, uefiVarStore, err := loadSecureBootKeys(flags.pki) + if err != nil { + return err + } + uploadReq.SBDatabase = sbDatabase + uploadReq.UEFIVarStore = uefiVarStore + } + + return uploadImage(cmd.Context(), archiveC, uploadC, uploadReq, out) +} diff --git a/internal/api/attestationconfigapi/cli/delete.go b/internal/api/attestationconfigapi/cli/delete.go index faedabd118..f46a27c534 100644 --- a/internal/api/attestationconfigapi/cli/delete.go +++ b/internal/api/attestationconfigapi/cli/delete.go @@ -8,15 +8,15 @@ package main import ( "errors" "fmt" + "log/slog" + "os" "path" "github.com/edgelesssys/constellation/v2/internal/api/attestationconfigapi" "github.com/edgelesssys/constellation/v2/internal/attestation/variant" "github.com/edgelesssys/constellation/v2/internal/cloud/cloudprovider" - "github.com/edgelesssys/constellation/v2/internal/logger" "github.com/edgelesssys/constellation/v2/internal/staticupload" "github.com/spf13/cobra" - "go.uber.org/zap" ) // newDeleteCmd creates the delete command. @@ -46,7 +46,7 @@ func newDeleteCmd() *cobra.Command { } func runDelete(cmd *cobra.Command, args []string) (retErr error) { - log := logger.New(logger.PlainLog, zap.DebugLevel).Named("attestationconfigapi") + log := slog.New(slog.NewTextHandler(os.Stdout, &slog.HandlerOptions{Level: slog.LevelDebug})).WithGroup("attestationconfigapi") deleteCfg, err := newDeleteConfig(cmd, ([3]string)(args[:3])) if err != nil { @@ -89,7 +89,7 @@ func runRecursiveDelete(cmd *cobra.Command, args []string) (retErr error) { return fmt.Errorf("creating delete config: %w", err) } - log := logger.New(logger.PlainLog, zap.DebugLevel).Named("attestationconfigapi") + log := slog.New(slog.NewTextHandler(os.Stdout, &slog.HandlerOptions{Level: slog.LevelDebug})).WithGroup("attestationconfigapi") client, closeFn, err := staticupload.New(cmd.Context(), staticupload.Config{ Bucket: deleteCfg.bucket, Region: deleteCfg.region, diff --git a/internal/api/attestationconfigapi/cli/upload.go b/internal/api/attestationconfigapi/cli/upload.go index 831f99da7d..ef3e339b5f 100644 --- a/internal/api/attestationconfigapi/cli/upload.go +++ b/internal/api/attestationconfigapi/cli/upload.go @@ -9,6 +9,7 @@ import ( "context" "errors" "fmt" + "log/slog" "os" "time" @@ -16,12 +17,10 @@ import ( "github.com/edgelesssys/constellation/v2/internal/attestation/variant" "github.com/edgelesssys/constellation/v2/internal/cloud/cloudprovider" "github.com/edgelesssys/constellation/v2/internal/file" - "github.com/edgelesssys/constellation/v2/internal/logger" "github.com/edgelesssys/constellation/v2/internal/staticupload" "github.com/edgelesssys/constellation/v2/internal/verify" "github.com/spf13/afero" "github.com/spf13/cobra" - "go.uber.org/zap" ) func newUploadCmd() *cobra.Command { @@ -61,7 +60,7 @@ func envCheck(_ *cobra.Command, _ []string) error { func runUpload(cmd *cobra.Command, args []string) (retErr error) { ctx := cmd.Context() - log := logger.New(logger.PlainLog, zap.DebugLevel).Named("attestationconfigapi") + log := slog.New(slog.NewTextHandler(os.Stdout, &slog.HandlerOptions{Level: slog.LevelDebug})).WithGroup("attestationconfigapi") uploadCfg, err := newConfig(cmd, ([3]string)(args[:3])) if err != nil { @@ -110,25 +109,25 @@ func uploadReport(ctx context.Context, client *attestationconfigapi.Client, cfg uploadConfig, fs file.Handler, - log *logger.Logger, + log *slog.Logger, ) error { if cfg.kind != snpReport { return fmt.Errorf("kind %s not supported", cfg.kind) } - log.Infof("Reading SNP report from file: %s", cfg.path) + log.Info("Reading SNP report from file: %s", cfg.path) var report verify.Report if err := fs.ReadJSON(cfg.path, &report); err != nil { return fmt.Errorf("reading snp report: %w", err) } inputVersion := convertTCBVersionToSNPVersion(report.SNPReport.LaunchTCB) - log.Infof("Input report: %+v", inputVersion) + log.Info("Input report: %+v", inputVersion) latestAPIVersionAPI, err := attestationconfigapi.NewFetcherWithCustomCDNAndCosignKey(cfg.url, cfg.cosignPublicKey).FetchSEVSNPVersionLatest(ctx, attestation) if err != nil { if errors.Is(err, attestationconfigapi.ErrNoVersionsFound) { - log.Infof("No versions found in API, but assuming that we are uploading the first version.") + log.Info("No versions found in API, but assuming that we are uploading the first version.") } else { return fmt.Errorf("fetching latest version: %w", err) } @@ -137,7 +136,7 @@ func uploadReport(ctx context.Context, latestAPIVersion := latestAPIVersionAPI.SEVSNPVersion if err := client.UploadSEVSNPVersionLatest(ctx, attestation, inputVersion, latestAPIVersion, cfg.uploadDate, cfg.force); err != nil { if errors.Is(err, attestationconfigapi.ErrNoNewerVersion) { - log.Infof("Input version: %+v is not newer than latest API version: %+v", inputVersion, latestAPIVersion) + log.Info("Input version: %+v is not newer than latest API version: %+v", inputVersion, latestAPIVersion) return nil } return fmt.Errorf("updating latest version: %w", err) diff --git a/internal/api/attestationconfigapi/client.go b/internal/api/attestationconfigapi/client.go index 9b4575a4c5..583e3bba4d 100644 --- a/internal/api/attestationconfigapi/client.go +++ b/internal/api/attestationconfigapi/client.go @@ -9,11 +9,11 @@ import ( "context" "errors" "fmt" + "log/slog" "time" apiclient "github.com/edgelesssys/constellation/v2/internal/api/client" "github.com/edgelesssys/constellation/v2/internal/attestation/variant" - "github.com/edgelesssys/constellation/v2/internal/logger" "github.com/edgelesssys/constellation/v2/internal/sigstore" "github.com/edgelesssys/constellation/v2/internal/staticupload" @@ -32,7 +32,7 @@ type Client struct { } // NewClient returns a new Client. -func NewClient(ctx context.Context, cfg staticupload.Config, cosignPwd, privateKey []byte, dryRun bool, versionWindowSize int, log *logger.Logger) (*Client, apiclient.CloseFunc, error) { +func NewClient(ctx context.Context, cfg staticupload.Config, cosignPwd, privateKey []byte, dryRun bool, versionWindowSize int, log *slog.Logger) (*Client, apiclient.CloseFunc, error) { s3Client, clientClose, err := apiclient.NewClient(ctx, cfg.Region, cfg.Bucket, cfg.DistributionID, dryRun, log) if err != nil { return nil, nil, fmt.Errorf("failed to create s3 storage: %w", err) diff --git a/internal/api/attestationconfigapi/reporter.go b/internal/api/attestationconfigapi/reporter.go index 4cc4bcad68..00656e8816 100644 --- a/internal/api/attestationconfigapi/reporter.go +++ b/internal/api/attestationconfigapi/reporter.go @@ -55,23 +55,23 @@ func (c Client) UploadSEVSNPVersionLatest(ctx context.Context, attestation varia return fmt.Errorf("list reported versions: %w", err) } if len(versionDates) < c.cacheWindowSize { - c.s3Client.Logger.Warnf("Skipping version update, found %d, expected %d reported versions.", len(versionDates), c.cacheWindowSize) + c.s3Client.Logger.Warn(fmt.Sprintf("Skipping version update, found %d, expected %d reported versions.", len(versionDates), c.cacheWindowSize)) return nil } minVersion, minDate, err := c.findMinVersion(ctx, attestation, versionDates) if err != nil { return fmt.Errorf("get minimal version: %w", err) } - c.s3Client.Logger.Infof("Found minimal version: %+v with date: %s", minVersion, minDate) + c.s3Client.Logger.Info(fmt.Sprintf("Found minimal version: %+v with date: %s", minVersion, minDate)) shouldUpdateAPI, err := isInputNewerThanOtherVersion(minVersion, latestAPIVersion) if err != nil { return ErrNoNewerVersion } if !shouldUpdateAPI { - c.s3Client.Logger.Infof("Input version: %+v is not newer than latest API version: %+v", minVersion, latestAPIVersion) + c.s3Client.Logger.Info(fmt.Sprintf("Input version: %+v is not newer than latest API version: %+v", minVersion, latestAPIVersion)) return nil } - c.s3Client.Logger.Infof("Input version: %+v is newer than latest API version: %+v", minVersion, latestAPIVersion) + c.s3Client.Logger.Info(fmt.Sprintf("Input version: %+v is newer than latest API version: %+v", minVersion, latestAPIVersion)) t, err := time.Parse(VersionFormat, minDate) if err != nil { return fmt.Errorf("parsing date: %w", err) @@ -79,7 +79,7 @@ func (c Client) UploadSEVSNPVersionLatest(ctx context.Context, attestation varia if err := c.uploadSEVSNPVersion(ctx, attestation, minVersion, t); err != nil { return fmt.Errorf("uploading version: %w", err) } - c.s3Client.Logger.Infof("Successfully uploaded new Azure SEV-SNP version: %+v", minVersion) + c.s3Client.Logger.Info(fmt.Sprintf("Successfully uploaded new Azure SEV-SNP version: %+v", minVersion)) return nil } diff --git a/internal/api/client/client.go b/internal/api/client/client.go index 1a26945119..0f85bd927a 100644 --- a/internal/api/client/client.go +++ b/internal/api/client/client.go @@ -33,16 +33,15 @@ import ( "encoding/json" "errors" "fmt" + "log/slog" "strings" "time" s3manager "github.com/aws/aws-sdk-go-v2/feature/s3/manager" "github.com/aws/aws-sdk-go-v2/service/s3" s3types "github.com/aws/aws-sdk-go-v2/service/s3/types" - "github.com/edgelesssys/constellation/v2/internal/logger" "github.com/edgelesssys/constellation/v2/internal/sigstore" "github.com/edgelesssys/constellation/v2/internal/staticupload" - "go.uber.org/zap" ) // Client is the a general client for all APIs. @@ -54,13 +53,13 @@ type Client struct { dirtyPaths []string // written paths to be invalidated DryRun bool // no write operations are performed - Logger *logger.Logger + Logger *slog.Logger } // NewReadOnlyClient creates a new read-only client. // This client can be used to fetch objects but cannot write updates. func NewReadOnlyClient(ctx context.Context, region, bucket, distributionID string, - log *logger.Logger, + log *slog.Logger, ) (*Client, CloseFunc, error) { staticUploadClient, staticUploadClientClose, err := staticupload.New(ctx, staticupload.Config{ Region: region, @@ -89,7 +88,7 @@ func NewReadOnlyClient(ctx context.Context, region, bucket, distributionID strin // NewClient creates a new client for the versions API. func NewClient(ctx context.Context, region, bucket, distributionID string, dryRun bool, - log *logger.Logger, + log *slog.Logger, ) (*Client, CloseFunc, error) { staticUploadClient, staticUploadClientClose, err := staticupload.New(ctx, staticupload.Config{ Region: region, @@ -120,7 +119,7 @@ func NewClient(ctx context.Context, region, bucket, distributionID string, dryRu // It invalidates the CDN cache for all uploaded files. func (c *Client) Close(ctx context.Context) error { if c.s3ClientClose == nil { - c.Logger.Debugf("Client has no s3ClientClose") + c.Logger.Debug("Client has no s3ClientClose") return nil } return c.s3ClientClose(ctx) @@ -132,7 +131,7 @@ func (c *Client) DeletePath(ctx context.Context, path string) error { Bucket: &c.bucket, Prefix: &path, } - c.Logger.Debugf("Listing objects in %s", path) + c.Logger.Debug("Listing objects in %s", path) objs := []s3types.Object{} out := &s3.ListObjectsV2Output{IsTruncated: ptr(true)} for out.IsTruncated != nil && *out.IsTruncated { @@ -143,10 +142,10 @@ func (c *Client) DeletePath(ctx context.Context, path string) error { } objs = append(objs, out.Contents...) } - c.Logger.Debugf("Found %d objects in %s", len(objs), path) + c.Logger.Debug("Found %d objects in %s", len(objs), path) if len(objs) == 0 { - c.Logger.Warnf("Path %s is already empty", path) + c.Logger.Warn("Path %s is already empty", path) return nil } @@ -156,7 +155,7 @@ func (c *Client) DeletePath(ctx context.Context, path string) error { } if c.DryRun { - c.Logger.Debugf("DryRun: Deleting %d objects with IDs %v", len(objs), objIDs) + c.Logger.Debug("DryRun: Deleting %d objects with IDs %v", len(objs), objIDs) return nil } @@ -168,7 +167,7 @@ func (c *Client) DeletePath(ctx context.Context, path string) error { Objects: objIDs, }, } - c.Logger.Debugf("Deleting %d objects in %s", len(objs), path) + c.Logger.Debug("Deleting %d objects in %s", len(objs), path) if _, err := c.s3Client.DeleteObjects(ctx, deleteIn); err != nil { return fmt.Errorf("deleting objects in %s: %w", path, err) } @@ -198,7 +197,7 @@ func Fetch[T APIObject](ctx context.Context, c *Client, obj T) (T, error) { Key: ptr(obj.JSONPath()), } - c.Logger.Debugf("Fetching %T from s3: %s", obj, obj.JSONPath()) + c.Logger.Debug("Fetching %T from s3: %s", obj, obj.JSONPath()) out, err := c.s3Client.GetObject(ctx, in) var noSuchkey *s3types.NoSuchKey if errors.As(err, &noSuchkey) { @@ -232,7 +231,7 @@ func Update(ctx context.Context, c *Client, obj APIObject) error { } if c.DryRun { - c.Logger.With(zap.String("bucket", c.bucket), zap.String("key", obj.JSONPath()), zap.String("body", string(rawJSON))).Debugf("DryRun: s3 put object") + c.Logger.With(slog.String("bucket", c.bucket), slog.String("key", obj.JSONPath()), slog.String("body", string(rawJSON))).Debug("DryRun: s3 put object") return nil } @@ -244,7 +243,7 @@ func Update(ctx context.Context, c *Client, obj APIObject) error { c.dirtyPaths = append(c.dirtyPaths, "/"+obj.JSONPath()) - c.Logger.Debugf("Uploading %T to s3: %v", obj, obj.JSONPath()) + c.Logger.Debug("Uploading %T to s3: %v", obj, obj.JSONPath()) if _, err := c.Upload(ctx, in); err != nil { return fmt.Errorf("uploading %T: %w", obj, err) } @@ -307,7 +306,7 @@ func Delete(ctx context.Context, c *Client, obj APIObject) error { Key: ptr(obj.JSONPath()), } - c.Logger.Debugf("Deleting %T from s3: %s", obj, obj.JSONPath()) + c.Logger.Debug("Deleting %T from s3: %s", obj, obj.JSONPath()) if _, err := c.DeleteObject(ctx, in); err != nil { return fmt.Errorf("deleting s3 object at %s: %w", obj.JSONPath(), err) } diff --git a/internal/api/versionsapi/cli/add.go b/internal/api/versionsapi/cli/add.go index 4efd94612c..e48d66a969 100644 --- a/internal/api/versionsapi/cli/add.go +++ b/internal/api/versionsapi/cli/add.go @@ -10,12 +10,12 @@ import ( "context" "errors" "fmt" + "log/slog" + "os" apiclient "github.com/edgelesssys/constellation/v2/internal/api/client" "github.com/edgelesssys/constellation/v2/internal/api/versionsapi" - "github.com/edgelesssys/constellation/v2/internal/logger" "github.com/spf13/cobra" - "go.uber.org/zap/zapcore" "golang.org/x/mod/semver" ) @@ -52,21 +52,21 @@ func runAdd(cmd *cobra.Command, _ []string) (retErr error) { if err != nil { return err } - log := logger.New(logger.PlainLog, flags.logLevel) - log.Debugf("Parsed flags: %+v", flags) + log := slog.New(slog.NewTextHandler(os.Stdout, &slog.HandlerOptions{Level: flags.logLevel})) + log.Debug("Parsed flags: %+v", flags) - log.Debugf("Validating flags") + log.Debug("Validating flags") if err := flags.validate(log); err != nil { return err } - log.Debugf("Creating version struct") + log.Debug("Creating version struct") ver, err := versionsapi.NewVersion(flags.ref, flags.stream, flags.version, flags.kind) if err != nil { return fmt.Errorf("creating version: %w", err) } - log.Debugf("Creating versions API client") + log.Debug("Creating versions API client") client, clientClose, err := versionsapi.NewClient(cmd.Context(), flags.region, flags.bucket, flags.distributionID, flags.dryRun, log) if err != nil { return fmt.Errorf("creating client: %w", err) @@ -78,7 +78,7 @@ func runAdd(cmd *cobra.Command, _ []string) (retErr error) { } }() - log.Infof("Adding version") + log.Info("Adding version") if err := ensureVersion(cmd.Context(), client, flags.kind, ver, versionsapi.GranularityMajor, log); err != nil { return err } @@ -93,14 +93,14 @@ func runAdd(cmd *cobra.Command, _ []string) (retErr error) { } } - log.Infof("List major->minor URL: %s", ver.ListURL(versionsapi.GranularityMajor)) - log.Infof("List minor->patch URL: %s", ver.ListURL(versionsapi.GranularityMinor)) + log.Info("List major->minor URL: %s", ver.ListURL(versionsapi.GranularityMajor)) + log.Info("List minor->patch URL: %s", ver.ListURL(versionsapi.GranularityMinor)) return nil } func ensureVersion(ctx context.Context, client *versionsapi.Client, kind versionsapi.VersionKind, ver versionsapi.Version, gran versionsapi.Granularity, - log *logger.Logger, + log *slog.Logger, ) error { verListReq := versionsapi.List{ Ref: ver.Ref(), @@ -112,34 +112,34 @@ func ensureVersion(ctx context.Context, client *versionsapi.Client, kind version verList, err := client.FetchVersionList(ctx, verListReq) var notFoundErr *apiclient.NotFoundError if errors.As(err, ¬FoundErr) { - log.Infof("Version list for %s versions under %q does not exist. Creating new list", gran.String(), ver.Major()) + log.Info("Version list for %s versions under %q does not exist. Creating new list", gran.String(), ver.Major()) verList = verListReq } else if err != nil { return fmt.Errorf("failed to list minor versions: %w", err) } - log.Debugf("%s version list: %v", gran.String(), verList) + log.Debug("%s version list: %v", gran.String(), verList) insertGran := gran + 1 insertVersion := ver.WithGranularity(insertGran) if verList.Contains(insertVersion) { - log.Infof("Version %q already exists in list %v", insertVersion, verList.Versions) + log.Info("Version %q already exists in list %v", insertVersion, verList.Versions) return nil } - log.Infof("Inserting %s version %q into list", insertGran.String(), insertVersion) + log.Info("Inserting %s version %q into list", insertGran.String(), insertVersion) verList.Versions = append(verList.Versions, insertVersion) - log.Debugf("New %s version list: %v", gran.String(), verList) + log.Debug("New %s version list: %v", gran.String(), verList) if err := client.UpdateVersionList(ctx, verList); err != nil { return fmt.Errorf("failed to add %s version: %w", gran.String(), err) } - log.Infof("Added %q to list", insertVersion) + log.Info("Added %q to list", insertVersion) return nil } -func updateLatest(ctx context.Context, client *versionsapi.Client, kind versionsapi.VersionKind, ver versionsapi.Version, log *logger.Logger) error { +func updateLatest(ctx context.Context, client *versionsapi.Client, kind versionsapi.VersionKind, ver versionsapi.Version, log *slog.Logger) error { latest := versionsapi.Latest{ Ref: ver.Ref(), Stream: ver.Stream(), @@ -148,17 +148,17 @@ func updateLatest(ctx context.Context, client *versionsapi.Client, kind versions latest, err := client.FetchVersionLatest(ctx, latest) var notFoundErr *apiclient.NotFoundError if errors.As(err, ¬FoundErr) { - log.Debugf("Latest version for ref %q and stream %q not found", ver.Ref(), ver.Stream()) + log.Debug("Latest version for ref %q and stream %q not found", ver.Ref(), ver.Stream()) } else if err != nil { return fmt.Errorf("fetching latest version: %w", err) } if latest.Version == ver.Version() { - log.Infof("Version %q is already latest version", ver) + log.Info("Version %q is already latest version", ver) return nil } - log.Infof("Setting %q as latest version", ver) + log.Info("Setting %q as latest version", ver) latest = versionsapi.Latest{ Ref: ver.Ref(), Stream: ver.Stream(), @@ -183,10 +183,10 @@ type addFlags struct { bucket string distributionID string kind versionsapi.VersionKind - logLevel zapcore.Level + logLevel slog.Level } -func (f *addFlags) validate(log *logger.Logger) error { +func (f *addFlags) validate(log *slog.Logger) error { if !semver.IsValid(f.version) { return fmt.Errorf("version %q is not a valid semantic version", f.version) } @@ -203,10 +203,10 @@ func (f *addFlags) validate(log *logger.Logger) error { } if f.release { - log.Debugf("Setting ref to %q, as release flag is set", versionsapi.ReleaseRef) + log.Debug("Setting ref to %q, as release flag is set", versionsapi.ReleaseRef) f.ref = versionsapi.ReleaseRef } else { - log.Debugf("Setting latest to true, as release flag is not set") + log.Debug("Setting latest to true, as release flag is not set") f.latest = true // always set latest for non-release versions } @@ -256,9 +256,9 @@ func parseAddFlags(cmd *cobra.Command) (addFlags, error) { if err != nil { return addFlags{}, err } - logLevel := zapcore.InfoLevel + logLevel := slog.LevelInfo if verbose { - logLevel = zapcore.DebugLevel + logLevel = slog.LevelDebug } region, err := cmd.Flags().GetString("region") if err != nil { diff --git a/internal/api/versionsapi/cli/latest.go b/internal/api/versionsapi/cli/latest.go index 0a77406970..33f7186c27 100644 --- a/internal/api/versionsapi/cli/latest.go +++ b/internal/api/versionsapi/cli/latest.go @@ -10,11 +10,11 @@ import ( "encoding/json" "errors" "fmt" + "log/slog" + "os" "github.com/edgelesssys/constellation/v2/internal/api/versionsapi" - "github.com/edgelesssys/constellation/v2/internal/logger" "github.com/spf13/cobra" - "go.uber.org/zap/zapcore" ) func newLatestCmd() *cobra.Command { @@ -38,15 +38,15 @@ func runLatest(cmd *cobra.Command, _ []string) (retErr error) { if err != nil { return err } - log := logger.New(logger.PlainLog, flags.logLevel) - log.Debugf("Parsed flags: %+v", flags) + log := slog.New(slog.NewTextHandler(os.Stdout, &slog.HandlerOptions{Level: flags.logLevel})) + log.Debug("Parsed flags: %+v", flags) - log.Debugf("Validating flags") + log.Debug("Validating flags") if err := flags.validate(); err != nil { return err } - log.Debugf("Creating versions API client") + log.Debug("Creating versions API client") client, clientClose, err := versionsapi.NewReadOnlyClient(cmd.Context(), flags.region, flags.bucket, flags.distributionID, log) if err != nil { return fmt.Errorf("creating client: %w", err) @@ -58,7 +58,7 @@ func runLatest(cmd *cobra.Command, _ []string) (retErr error) { } }() - log.Debugf("Requesting latest version") + log.Debug("Requesting latest version") latest := versionsapi.Latest{ Ref: flags.ref, Stream: flags.stream, @@ -89,7 +89,7 @@ type latestFlags struct { region string bucket string distributionID string - logLevel zapcore.Level + logLevel slog.Level } func (l *latestFlags) validate() error { @@ -133,9 +133,9 @@ func parseLatestFlags(cmd *cobra.Command) (latestFlags, error) { if err != nil { return latestFlags{}, err } - logLevel := zapcore.InfoLevel + logLevel := slog.LevelInfo if verbose { - logLevel = zapcore.DebugLevel + logLevel = slog.LevelDebug } return latestFlags{ diff --git a/internal/api/versionsapi/cli/list.go b/internal/api/versionsapi/cli/list.go index f158d6d3c8..776a0a9dd7 100644 --- a/internal/api/versionsapi/cli/list.go +++ b/internal/api/versionsapi/cli/list.go @@ -11,14 +11,14 @@ import ( "encoding/json" "errors" "fmt" + "os" + "log/slog" "github.com/spf13/cobra" - "go.uber.org/zap/zapcore" "golang.org/x/mod/semver" apiclient "github.com/edgelesssys/constellation/v2/internal/api/client" "github.com/edgelesssys/constellation/v2/internal/api/versionsapi" - "github.com/edgelesssys/constellation/v2/internal/logger" ) func newListCmd() *cobra.Command { @@ -43,15 +43,15 @@ func runList(cmd *cobra.Command, _ []string) (retErr error) { if err != nil { return err } - log := logger.New(logger.PlainLog, flags.logLevel) - log.Debugf("Parsed flags: %+v", flags) + log := slog.New(slog.NewTextHandler(os.Stdout, &slog.HandlerOptions{Level: flags.logLevel})) + log.Debug("Parsed flags: %+v", flags) - log.Debugf("Validating flags") + log.Debug("Validating flags") if err := flags.validate(); err != nil { return err } - log.Debugf("Creating versions API client") + log.Debug("Creating versions API client") client, clientClose, err := versionsapi.NewReadOnlyClient(cmd.Context(), flags.region, flags.bucket, flags.distributionID, log) if err != nil { return fmt.Errorf("creating client: %w", err) @@ -67,29 +67,29 @@ func runList(cmd *cobra.Command, _ []string) (retErr error) { if flags.minorVersion != "" { minorVersions = []string{flags.minorVersion} } else { - log.Debugf("Getting minor versions") + log.Debug("Getting minor versions") minorVersions, err = listMinorVersions(cmd.Context(), client, flags.ref, flags.stream) var errNotFound *apiclient.NotFoundError if err != nil && errors.As(err, &errNotFound) { - log.Infof("No minor versions found for ref %q and stream %q.", flags.ref, flags.stream) + log.Info("No minor versions found for ref %q and stream %q.", flags.ref, flags.stream) return nil } else if err != nil { return err } } - log.Debugf("Getting patch versions") + log.Debug("Getting patch versions") patchVersions, err := listPatchVersions(cmd.Context(), client, flags.ref, flags.stream, minorVersions) var errNotFound *apiclient.NotFoundError if err != nil && errors.As(err, &errNotFound) { - log.Infof("No patch versions found for ref %q, stream %q and minor versions %v.", flags.ref, flags.stream, minorVersions) + log.Info("No patch versions found for ref %q, stream %q and minor versions %v.", flags.ref, flags.stream, minorVersions) return nil } else if err != nil { return err } if flags.json { - log.Debugf("Printing versions as JSON") + log.Debug("Printing versions as JSON") var vers []string for _, v := range patchVersions { vers = append(vers, v.Version()) @@ -102,7 +102,7 @@ func runList(cmd *cobra.Command, _ []string) (retErr error) { return nil } - log.Debugf("Printing versions") + log.Debug("Printing versions") for _, v := range patchVersions { fmt.Println(v.ShortPath()) } @@ -158,7 +158,7 @@ type listFlags struct { bucket string distributionID string json bool - logLevel zapcore.Level + logLevel slog.Level } func (l *listFlags) validate() error { @@ -211,9 +211,9 @@ func parseListFlags(cmd *cobra.Command) (listFlags, error) { if err != nil { return listFlags{}, err } - logLevel := zapcore.InfoLevel + logLevel := slog.LevelInfo if verbose { - logLevel = zapcore.DebugLevel + logLevel = slog.LevelDebug } return listFlags{ diff --git a/internal/api/versionsapi/cli/rm.go b/internal/api/versionsapi/cli/rm.go index f41d7510c3..761d9b1d0a 100644 --- a/internal/api/versionsapi/cli/rm.go +++ b/internal/api/versionsapi/cli/rm.go @@ -12,6 +12,8 @@ import ( "fmt" "io" "log" + "log/slog" + "os" "regexp" "strings" "time" @@ -26,10 +28,8 @@ import ( "github.com/aws/smithy-go" apiclient "github.com/edgelesssys/constellation/v2/internal/api/client" "github.com/edgelesssys/constellation/v2/internal/api/versionsapi" - "github.com/edgelesssys/constellation/v2/internal/logger" gaxv2 "github.com/googleapis/gax-go/v2" "github.com/spf13/cobra" - "go.uber.org/zap/zapcore" ) func newRemoveCmd() *cobra.Command { @@ -74,33 +74,33 @@ func runRemove(cmd *cobra.Command, _ []string) (retErr error) { if err != nil { return err } - log := logger.New(logger.PlainLog, flags.logLevel) - log.Debugf("Parsed flags: %+v", flags) + log := slog.New(slog.NewTextHandler(os.Stdout, &slog.HandlerOptions{Level: flags.logLevel})) + log.Debug("Parsed flags: %+v", flags) - log.Debugf("Validating flags") + log.Debug("Validating flags") if err := flags.validate(); err != nil { return err } - log.Debugf("Creating GCP client") + log.Debug("Creating GCP client") gcpClient, err := newGCPClient(cmd.Context(), flags.gcpProject) if err != nil { return fmt.Errorf("creating GCP client: %w", err) } - log.Debugf("Creating AWS client") + log.Debug("Creating AWS client") awsClient, err := newAWSClient() if err != nil { return fmt.Errorf("creating AWS client: %w", err) } - log.Debugf("Creating Azure client") + log.Debug("Creating Azure client") azClient, err := newAzureClient(flags.azSubscription, flags.azLocation, flags.azResourceGroup) if err != nil { return fmt.Errorf("creating Azure client: %w", err) } - log.Debugf("Creating versions API client") + log.Debug("Creating versions API client") verclient, verclientClose, err := versionsapi.NewClient(cmd.Context(), flags.region, flags.bucket, flags.distributionID, flags.dryrun, log) if err != nil { return fmt.Errorf("creating client: %w", err) @@ -120,14 +120,14 @@ func runRemove(cmd *cobra.Command, _ []string) (retErr error) { } if flags.all { - log.Infof("Deleting ref %s", flags.ref) + log.Info("Deleting ref %s", flags.ref) if err := deleteRef(cmd.Context(), imageClients, flags.ref, flags.dryrun, log); err != nil { return fmt.Errorf("deleting ref: %w", err) } return nil } - log.Infof("Deleting single version %s", flags.ver.ShortPath()) + log.Info("Deleting single version %s", flags.ver.ShortPath()) if err := deleteSingleVersion(cmd.Context(), imageClients, flags.ver, flags.dryrun, log); err != nil { return fmt.Errorf("deleting single version: %w", err) } @@ -135,15 +135,15 @@ func runRemove(cmd *cobra.Command, _ []string) (retErr error) { return nil } -func deleteSingleVersion(ctx context.Context, clients rmImageClients, ver versionsapi.Version, dryrun bool, log *logger.Logger) error { +func deleteSingleVersion(ctx context.Context, clients rmImageClients, ver versionsapi.Version, dryrun bool, log *slog.Logger) error { var retErr error - log.Debugf("Deleting images for %s", ver.Version) + log.Debug("Deleting images for %s", ver.Version) if err := deleteImage(ctx, clients, ver, dryrun, log); err != nil { retErr = errors.Join(retErr, fmt.Errorf("deleting images: %w", err)) } - log.Debugf("Deleting version %s from versions API", ver.Version) + log.Debug("Deleting version %s from versions API", ver.Version) if err := clients.version.DeleteVersion(ctx, ver); err != nil { retErr = errors.Join(retErr, fmt.Errorf("deleting version from versions API: %w", err)) } @@ -151,15 +151,15 @@ func deleteSingleVersion(ctx context.Context, clients rmImageClients, ver versio return retErr } -func deleteRef(ctx context.Context, clients rmImageClients, ref string, dryrun bool, log *logger.Logger) error { +func deleteRef(ctx context.Context, clients rmImageClients, ref string, dryrun bool, log *slog.Logger) error { var vers []versionsapi.Version for _, stream := range []string{"nightly", "console", "debug"} { - log.Infof("Listing versions of stream %s", stream) + log.Info("Listing versions of stream %s", stream) minorVersions, err := listMinorVersions(ctx, clients.version, ref, stream) var notFoundErr *apiclient.NotFoundError if errors.As(err, ¬FoundErr) { - log.Debugf("No minor versions found for stream %s", stream) + log.Debug("No minor versions found for stream %s", stream) continue } else if err != nil { return fmt.Errorf("listing minor versions for stream %s: %w", stream, err) @@ -167,7 +167,7 @@ func deleteRef(ctx context.Context, clients rmImageClients, ref string, dryrun b patchVersions, err := listPatchVersions(ctx, clients.version, ref, stream, minorVersions) if errors.As(err, ¬FoundErr) { - log.Debugf("No patch versions found for stream %s", stream) + log.Debug("No patch versions found for stream %s", stream) continue } else if err != nil { return fmt.Errorf("listing patch versions for stream %s: %w", stream, err) @@ -175,7 +175,7 @@ func deleteRef(ctx context.Context, clients rmImageClients, ref string, dryrun b vers = append(vers, patchVersions...) } - log.Infof("Found %d versions to delete", len(vers)) + log.Info("Found %d versions to delete", len(vers)) var retErr error @@ -185,7 +185,7 @@ func deleteRef(ctx context.Context, clients rmImageClients, ref string, dryrun b } } - log.Infof("Deleting ref %s from versions API", ref) + log.Info("Deleting ref %s from versions API", ref) if err := clients.version.DeleteRef(ctx, ref); err != nil { retErr = errors.Join(retErr, fmt.Errorf("deleting ref from versions API: %w", err)) } @@ -193,7 +193,7 @@ func deleteRef(ctx context.Context, clients rmImageClients, ref string, dryrun b return retErr } -func deleteImage(ctx context.Context, clients rmImageClients, ver versionsapi.Version, dryrun bool, log *logger.Logger) error { +func deleteImage(ctx context.Context, clients rmImageClients, ver versionsapi.Version, dryrun bool, log *slog.Logger) error { var retErr error imageInfo := versionsapi.ImageInfo{ @@ -204,8 +204,8 @@ func deleteImage(ctx context.Context, clients rmImageClients, ver versionsapi.Ve imageInfo, err := clients.version.FetchImageInfo(ctx, imageInfo) var notFound *apiclient.NotFoundError if errors.As(err, ¬Found) { - log.Warnf("Image info for %s not found", ver.Version) - log.Warnf("Skipping image deletion") + log.Warn("Image info for %s not found", ver.Version) + log.Warn("Skipping image deletion") return nil } else if err != nil { return fmt.Errorf("fetching image info: %w", err) @@ -214,17 +214,17 @@ func deleteImage(ctx context.Context, clients rmImageClients, ver versionsapi.Ve for _, entry := range imageInfo.List { switch entry.CSP { case "aws": - log.Infof("Deleting AWS images from %s", imageInfo.JSONPath()) + log.Info("Deleting AWS images from %s", imageInfo.JSONPath()) if err := clients.aws.deleteImage(ctx, entry.Reference, entry.Region, dryrun, log); err != nil { retErr = errors.Join(retErr, fmt.Errorf("deleting AWS image %s: %w", entry.Reference, err)) } case "gcp": - log.Infof("Deleting GCP images from %s", imageInfo.JSONPath()) + log.Info("Deleting GCP images from %s", imageInfo.JSONPath()) if err := clients.gcp.deleteImage(ctx, entry.Reference, dryrun, log); err != nil { retErr = errors.Join(retErr, fmt.Errorf("deleting GCP image %s: %w", entry.Reference, err)) } case "azure": - log.Infof("Deleting Azure images from %s", imageInfo.JSONPath()) + log.Info("Deleting Azure images from %s", imageInfo.JSONPath()) if err := clients.az.deleteImage(ctx, entry.Reference, dryrun, log); err != nil { retErr = errors.Join(retErr, fmt.Errorf("deleting Azure image %s: %w", entry.Reference, err)) } @@ -259,7 +259,7 @@ type rmFlags struct { azSubscription string azLocation string azResourceGroup string - logLevel zapcore.Level + logLevel slog.Level ver versionsapi.Version } @@ -358,9 +358,9 @@ func parseRmFlags(cmd *cobra.Command) (*rmFlags, error) { if err != nil { return nil, err } - logLevel := zapcore.InfoLevel + logLevel := slog.LevelInfo if verbose { - logLevel = zapcore.DebugLevel + logLevel = slog.LevelDebug } return &rmFlags{ @@ -400,17 +400,17 @@ type ec2API interface { ) (*ec2.DeleteSnapshotOutput, error) } -func (a *awsClient) deleteImage(ctx context.Context, ami string, region string, dryrun bool, log *logger.Logger) error { +func (a *awsClient) deleteImage(ctx context.Context, ami string, region string, dryrun bool, log *slog.Logger) error { cfg, err := awsconfig.LoadDefaultConfig(ctx, awsconfig.WithRegion(region)) if err != nil { return err } a.ec2 = ec2.NewFromConfig(cfg) - log.Debugf("Deleting resources in AWS region %s", region) + log.Debug("Deleting resources in AWS region %s", region) snapshotID, err := a.getSnapshotID(ctx, ami, log) if err != nil { - log.Warnf("Failed to get AWS snapshot ID for image %s: %v", ami, err) + log.Warn("Failed to get AWS snapshot ID for image %s: %v", ami, err) } if err := a.deregisterImage(ctx, ami, dryrun, log); err != nil { @@ -426,8 +426,8 @@ func (a *awsClient) deleteImage(ctx context.Context, ami string, region string, return nil } -func (a *awsClient) deregisterImage(ctx context.Context, ami string, dryrun bool, log *logger.Logger) error { - log.Debugf("Deregistering image %s", ami) +func (a *awsClient) deregisterImage(ctx context.Context, ami string, dryrun bool, log *slog.Logger) error { + log.Debug("Deregistering image %s", ami) deregisterReq := ec2.DeregisterImageInput{ ImageId: &ami, @@ -438,15 +438,15 @@ func (a *awsClient) deregisterImage(ctx context.Context, ami string, dryrun bool if errors.As(err, &apiErr) && (apiErr.ErrorCode() == "InvalidAMIID.NotFound" || apiErr.ErrorCode() == "InvalidAMIID.Unavailable") { - log.Warnf("AWS image %s not found", ami) + log.Warn("AWS image %s not found", ami) return nil } return err } -func (a *awsClient) getSnapshotID(ctx context.Context, ami string, log *logger.Logger) (string, error) { - log.Debugf("Describing image %s", ami) +func (a *awsClient) getSnapshotID(ctx context.Context, ami string, log *slog.Logger) (string, error) { + log.Debug("Describing image %s", ami) req := ec2.DescribeImagesInput{ ImageIds: []string{ami}, @@ -481,8 +481,8 @@ func (a *awsClient) getSnapshotID(ctx context.Context, ami string, log *logger.L return snapshotID, nil } -func (a *awsClient) deleteSnapshot(ctx context.Context, snapshotID string, dryrun bool, log *logger.Logger) error { - log.Debugf("Deleting AWS snapshot %s", snapshotID) +func (a *awsClient) deleteSnapshot(ctx context.Context, snapshotID string, dryrun bool, log *slog.Logger) error { + log.Debug("Deleting AWS snapshot %s", snapshotID) req := ec2.DeleteSnapshotInput{ SnapshotId: &snapshotID, @@ -493,7 +493,7 @@ func (a *awsClient) deleteSnapshot(ctx context.Context, snapshotID string, dryru if errors.As(err, &apiErr) && (apiErr.ErrorCode() == "InvalidSnapshot.NotFound" || apiErr.ErrorCode() == "InvalidSnapshot.Unavailable") { - log.Warnf("AWS snapshot %s not found", snapshotID) + log.Warn("AWS snapshot %s not found", snapshotID) return nil } @@ -523,7 +523,7 @@ type gcpComputeAPI interface { io.Closer } -func (g *gcpClient) deleteImage(ctx context.Context, imageURI string, dryrun bool, log *logger.Logger) error { +func (g *gcpClient) deleteImage(ctx context.Context, imageURI string, dryrun bool, log *slog.Logger) error { // Extract image name from image URI // Expected input into function: "projects/constellation-images/global/images/v2-6-0-stable" // Required for computepb.DeleteImageRequest: "v2-6-0-stable" @@ -536,20 +536,20 @@ func (g *gcpClient) deleteImage(ctx context.Context, imageURI string, dryrun boo } if dryrun { - log.Debugf("DryRun: delete image request: %v", req) + log.Debug("DryRun: delete image request: %v", req) return nil } - log.Debugf("Deleting image %s", image) + log.Debug("Deleting image %s", image) op, err := g.compute.Delete(ctx, req) if err != nil && strings.Contains(err.Error(), "404") { - log.Warnf("GCP image %s not found", image) + log.Warn("GCP image %s not found", image) return nil } else if err != nil { return fmt.Errorf("deleting image %s: %w", image, err) } - log.Debugf("Waiting for operation to finish") + log.Debug("Waiting for operation to finish") if err := op.Wait(ctx); err != nil { return fmt.Errorf("waiting for operation: %w", err) } @@ -624,30 +624,30 @@ var ( azCommunityImageRegex = regexp.MustCompile("^/CommunityGalleries/([[:alnum:]-]+)/Images/([[:alnum:]._-]+)/Versions/([[:alnum:]._-]+)$") ) -func (a *azureClient) deleteImage(ctx context.Context, image string, dryrun bool, log *logger.Logger) error { +func (a *azureClient) deleteImage(ctx context.Context, image string, dryrun bool, log *slog.Logger) error { azImage, err := a.parseImage(ctx, image, log) if err != nil { return err } if dryrun { - log.Debugf("DryRun: delete image %v", azImage) + log.Debug("DryRun: delete image %v", azImage) return nil } - log.Debugf("Deleting image %q, version %q", azImage.imageDefinition, azImage.version) + log.Debug("Deleting image %q, version %q", azImage.imageDefinition, azImage.version) poller, err := a.imageVersions.BeginDelete(ctx, azImage.resourceGroup, azImage.gallery, azImage.imageDefinition, azImage.version, nil) if err != nil { return fmt.Errorf("begin delete image version: %w", err) } - log.Debugf("Waiting for operation to finish") + log.Debug("Waiting for operation to finish") if _, err := poller.PollUntilDone(ctx, nil); err != nil { return fmt.Errorf("waiting for operation: %w", err) } - log.Debugf("Checking if image definition %q still has versions left", azImage.imageDefinition) + log.Debug("Checking if image definition %q still has versions left", azImage.imageDefinition) pager := a.imageVersions.NewListByGalleryImagePager(azImage.resourceGroup, azImage.gallery, azImage.imageDefinition, nil) for pager.More() { @@ -656,20 +656,20 @@ func (a *azureClient) deleteImage(ctx context.Context, image string, dryrun bool return fmt.Errorf("listing image versions of image definition %s: %w", azImage.imageDefinition, err) } if len(nextResult.Value) != 0 { - log.Debugf("Image definition %q still has versions left, won't be deleted", azImage.imageDefinition) + log.Debug("Image definition %q still has versions left, won't be deleted", azImage.imageDefinition) return nil } } time.Sleep(15 * time.Second) // Azure needs time understand that there is no version left... - log.Debugf("Deleting image definition %s", azImage.imageDefinition) + log.Debug("Deleting image definition %s", azImage.imageDefinition) op, err := a.image.BeginDelete(ctx, azImage.resourceGroup, azImage.gallery, azImage.imageDefinition, nil) if err != nil { return fmt.Errorf("deleting image definition %s: %w", azImage.imageDefinition, err) } - log.Debugf("Waiting for operation to finish") + log.Debug("Waiting for operation to finish") if _, err := op.PollUntilDone(ctx, nil); err != nil { return fmt.Errorf("waiting for operation: %w", err) } @@ -684,9 +684,9 @@ type azImage struct { version string } -func (a *azureClient) parseImage(ctx context.Context, image string, log *logger.Logger) (azImage, error) { +func (a *azureClient) parseImage(ctx context.Context, image string, log *slog.Logger) (azImage, error) { if m := azImageRegex.FindStringSubmatch(image); len(m) == 5 { - log.Debugf( + log.Debug( "Image matches local image format, resource group: %s, gallery: %s, image definition: %s, version: %s", m[1], m[2], m[3], m[4], ) @@ -707,7 +707,7 @@ func (a *azureClient) parseImage(ctx context.Context, image string, log *logger. imageDefinition := m[2] version := m[3] - log.Debugf( + log.Debug( "Image matches community image format, gallery public name: %s, image definition: %s, version: %s", galleryPublicName, imageDefinition, version, ) @@ -721,24 +721,24 @@ func (a *azureClient) parseImage(ctx context.Context, image string, log *logger. } for _, v := range nextResult.Value { if v.Name == nil { - log.Debugf("Skipping gallery with nil name") + log.Debug("Skipping gallery with nil name") continue } if v.Properties.SharingProfile == nil { - log.Debugf("Skipping gallery %s with nil sharing profile", *v.Name) + log.Debug("Skipping gallery %s with nil sharing profile", *v.Name) continue } if v.Properties.SharingProfile.CommunityGalleryInfo == nil { - log.Debugf("Skipping gallery %s with nil community gallery info", *v.Name) + log.Debug("Skipping gallery %s with nil community gallery info", *v.Name) continue } if v.Properties.SharingProfile.CommunityGalleryInfo.PublicNames == nil { - log.Debugf("Skipping gallery %s with nil public names", *v.Name) + log.Debug("Skipping gallery %s with nil public names", *v.Name) continue } for _, publicName := range v.Properties.SharingProfile.CommunityGalleryInfo.PublicNames { if publicName == nil { - log.Debugf("Skipping nil public name") + log.Debug("Skipping nil public name") continue } if *publicName == galleryPublicName { diff --git a/internal/api/versionsapi/client.go b/internal/api/versionsapi/client.go index 67900ec0e3..9d90c8201b 100644 --- a/internal/api/versionsapi/client.go +++ b/internal/api/versionsapi/client.go @@ -10,13 +10,13 @@ import ( "context" "errors" "fmt" + "log/slog" "path" "golang.org/x/mod/semver" apiclient "github.com/edgelesssys/constellation/v2/internal/api/client" "github.com/edgelesssys/constellation/v2/internal/constants" - "github.com/edgelesssys/constellation/v2/internal/logger" ) // Client is a client for the versions API. @@ -27,7 +27,7 @@ type Client struct { // NewClient creates a new client for the versions API. func NewClient(ctx context.Context, region, bucket, distributionID string, dryRun bool, - log *logger.Logger, + log *slog.Logger, ) (*Client, CloseFunc, error) { genericClient, genericClientClose, err := apiclient.NewClient(ctx, region, bucket, distributionID, dryRun, log) versionsClient := &Client{ @@ -43,7 +43,7 @@ func NewClient(ctx context.Context, region, bucket, distributionID string, dryRu // NewReadOnlyClient creates a new read-only client. // This client can be used to fetch objects but cannot write updates. func NewReadOnlyClient(ctx context.Context, region, bucket, distributionID string, - log *logger.Logger, + log *slog.Logger, ) (*Client, CloseFunc, error) { genericClient, genericClientClose, err := apiclient.NewReadOnlyClient(ctx, region, bucket, distributionID, log) if err != nil { diff --git a/internal/attestation/attestation.go b/internal/attestation/attestation.go index f09988dce8..795fee7269 100644 --- a/internal/attestation/attestation.go +++ b/internal/attestation/attestation.go @@ -45,8 +45,8 @@ const ( // Logger is a logger used to print warnings and infos during attestation validation. type Logger interface { - Infof(format string, args ...any) - Warnf(format string, args ...any) + Info(format string, args ...any) + Warn(format string, args ...any) } // NOPLogger is a no-op implementation of [Logger]. diff --git a/internal/attestation/aws/snp/validator_test.go b/internal/attestation/aws/snp/validator_test.go index 84804a8865..50261781bd 100644 --- a/internal/attestation/aws/snp/validator_test.go +++ b/internal/attestation/aws/snp/validator_test.go @@ -17,6 +17,7 @@ import ( "encoding/pem" "errors" "fmt" + "log/slog" "regexp" "testing" @@ -161,7 +162,7 @@ func TestValidateSNPReport(t *testing.T) { require.NoError(err) v := awsValidator{httpsGetter: newStubHTTPSGetter(&urlResponseMatcher{}, nil), verifier: tc.verifier, validator: tc.validator} - err = v.validate(vtpm.AttestationDocument{InstanceInfo: infoMarshalled}, ask, ark, [64]byte(hash), config.DefaultForAWSSEVSNP(), logger.NewTest(t)) + err = v.validate(vtpm.AttestationDocument{InstanceInfo: infoMarshalled}, ask, ark, [64]byte(hash), config.DefaultForAWSSEVSNP(), slog.New(slog.NewTextHandler(logger.TestWriter{T: t}, nil))) if tc.wantErr { assert.Error(err) } else { diff --git a/internal/attestation/azure/snp/validator_test.go b/internal/attestation/azure/snp/validator_test.go index 64d6f03e2f..c5bcd5d128 100644 --- a/internal/attestation/azure/snp/validator_test.go +++ b/internal/attestation/azure/snp/validator_test.go @@ -48,7 +48,7 @@ func TestNewValidator(t *testing.T) { }{ "success": { cfg: config.DefaultForAzureSEVSNP(), - logger: logger.NewTest(t), + logger: slog.New(slog.NewTextHandler(logger.TestWriter{T: t}, nil)), }, "nil logger": { cfg: config.DefaultForAzureSEVSNP(), @@ -127,7 +127,7 @@ func TestCheckIDKeyDigest(t *testing.T) { return report } newTestValidator := func(cfg *config.AzureSEVSNP, validateTokenErr error) *Validator { - validator := NewValidator(cfg, logger.NewTest(t)) + validator := NewValidator(cfg, slog.New(slog.NewTextHandler(logger.TestWriter{T: t}, nil))) validator.maa = &stubMaaValidator{ validateTokenErr: validateTokenErr, } @@ -644,7 +644,7 @@ func TestTrustedKeyFromSNP(t *testing.T) { validator := &Validator{ hclValidator: &stubAttestationKey{}, config: defaultCfg, - log: logger.NewTest(t), + log: slog.New(slog.NewTextHandler(logger.TestWriter{T: t}, nil)), getter: tc.getter, attestationVerifier: tc.verifier, attestationValidator: tc.validator, diff --git a/internal/attestation/azure/trustedlaunch/trustedlaunch_test.go b/internal/attestation/azure/trustedlaunch/trustedlaunch_test.go index 023bb785f7..fb55e1e558 100644 --- a/internal/attestation/azure/trustedlaunch/trustedlaunch_test.go +++ b/internal/attestation/azure/trustedlaunch/trustedlaunch_test.go @@ -14,6 +14,7 @@ import ( "crypto/x509" "crypto/x509/pkix" "io" + "log/slog" "net/http" "os" "testing" @@ -189,7 +190,7 @@ func TestGetAttestationCert(t *testing.T) { )) require.NoError(tpm2.NVWrite(tpm, tpm2.HandleOwner, tpmAkCertIdx, "", akCert, 0)) - issuer := NewIssuer(logger.NewTest(t)) + issuer := NewIssuer(slog.New(slog.NewTextHandler(logger.TestWriter{T: t}, nil))) issuer.hClient = newTestClient(tc.crlServer) certs, err := issuer.getAttestationCert(context.Background(), tpm, nil) diff --git a/internal/attestation/snp/snp_test.go b/internal/attestation/snp/snp_test.go index 0179ac05b4..42437d6a81 100644 --- a/internal/attestation/snp/snp_test.go +++ b/internal/attestation/snp/snp_test.go @@ -266,7 +266,7 @@ func TestAttestationWithCerts(t *testing.T) { } defer trust.ClearProductCertCache() - att, err := instanceInfo.AttestationWithCerts(tc.getter, tc.fallbackCerts, logger.NewTest(t)) + att, err := instanceInfo.AttestationWithCerts(tc.getter, tc.fallbackCerts, slog.New(slog.NewTextHandler(logger.TestWriter{T: t}, nil))) if tc.wantErr { assert.Error(err) } else { diff --git a/internal/attestation/vtpm/attestation_test.go b/internal/attestation/vtpm/attestation_test.go index 9eeeef3b83..fc2924d2ba 100644 --- a/internal/attestation/vtpm/attestation_test.go +++ b/internal/attestation/vtpm/attestation_test.go @@ -84,8 +84,8 @@ func TestValidate(t *testing.T) { tpmOpen, tpmCloser := tpmsim.NewSimulatedTPMOpenFunc() defer tpmCloser.Close() - issuer := NewIssuer(tpmOpen, tpmclient.AttestationKeyRSA, fakeGetInstanceInfo, logger.NewTest(t)) - validator := NewValidator(testExpectedPCRs, fakeGetTrustedKey, fakeValidateCVM, logger.NewTest(t)) + issuer := NewIssuer(tpmOpen, tpmclient.AttestationKeyRSA, fakeGetInstanceInfo, slog.New(slog.NewTextHandler(logger.TestWriter{T: t}, nil))) + validator := NewValidator(testExpectedPCRs, fakeGetTrustedKey, fakeValidateCVM, slog.New(slog.NewTextHandler(logger.TestWriter{T: t}, nil))) nonce := []byte{1, 2, 3, 4} challenge := []byte("Constellation") @@ -237,7 +237,7 @@ func TestValidate(t *testing.T) { }, fakeGetTrustedKey, fakeValidateCVM, - logger.NewTest(t)), + slog.New(slog.NewTextHandler(logger.TestWriter{T: t}, nil))), attDoc: mustMarshalAttestation(attDoc, require), nonce: nonce, wantErr: false, @@ -345,7 +345,7 @@ func TestFailIssuer(t *testing.T) { t.Run(name, func(t *testing.T) { assert := assert.New(t) - tc.issuer.log = logger.NewTest(t) + tc.issuer.log = slog.New(slog.NewTextHandler(logger.TestWriter{T: t}, nil)) _, err := tc.issuer.Issue(context.Background(), tc.userData, tc.nonce) assert.Error(err) diff --git a/internal/cloud/azure/azure.go b/internal/cloud/azure/azure.go index 8d8735e099..52a4684713 100644 --- a/internal/cloud/azure/azure.go +++ b/internal/cloud/azure/azure.go @@ -19,6 +19,7 @@ import ( "context" "errors" "fmt" + "log/slog" "path" "strconv" @@ -29,9 +30,7 @@ import ( "github.com/edgelesssys/constellation/v2/internal/cloud/azureshared" "github.com/edgelesssys/constellation/v2/internal/cloud/metadata" "github.com/edgelesssys/constellation/v2/internal/constants" - "github.com/edgelesssys/constellation/v2/internal/logger" "github.com/edgelesssys/constellation/v2/internal/role" - "go.uber.org/zap" "k8s.io/kubernetes/pkg/util/iptables" "k8s.io/utils/exec" ) @@ -455,7 +454,7 @@ func (c *Cloud) getLoadBalancerDNSName(ctx context.Context) (string, error) { // // OpenShift also uses the same mechanism to redirect traffic to the API server: // https://github.com/openshift/machine-config-operator/blob/e453bd20bac0e48afa74e9a27665abaf454d93cd/templates/master/00-master/azure/files/opt-libexec-openshift-azure-routes-sh.yaml -func (c *Cloud) PrepareControlPlaneNode(ctx context.Context, log *logger.Logger) error { +func (c *Cloud) PrepareControlPlaneNode(ctx context.Context, log *slog.Logger) error { selfMetadata, err := c.Self(ctx) if err != nil { return fmt.Errorf("failed to get self metadata: %w", err) @@ -463,7 +462,7 @@ func (c *Cloud) PrepareControlPlaneNode(ctx context.Context, log *logger.Logger) // skipping iptables setup for worker nodes if selfMetadata.Role != role.ControlPlane { - log.Infof("not a control plane node, skipping iptables setup") + log.Info("not a control plane node, skipping iptables setup") return nil } @@ -471,11 +470,11 @@ func (c *Cloud) PrepareControlPlaneNode(ctx context.Context, log *logger.Logger) // for public LB architectures loadbalancerIP, err := c.getLoadBalancerPrivateIP(ctx) if err != nil { - log.With(zap.Error(err)).Warnf("skipping iptables setup, failed to get load balancer private IP") + log.With(slog.Any("error", err)).Warn("skipping iptables setup, failed to get load balancer private IP") return nil } - log.Infof("Setting up iptables for control plane node with load balancer IP %s", loadbalancerIP) + log.Info(fmt.Sprintf("Setting up iptables for control plane node with load balancer IP %s", loadbalancerIP)) iptablesExec := iptables.New(exec.New(), iptables.ProtocolIPv4) if err != nil { diff --git a/internal/constellation/apply.go b/internal/constellation/apply.go index 611b1557f1..b887e8e8a4 100644 --- a/internal/constellation/apply.go +++ b/internal/constellation/apply.go @@ -51,7 +51,7 @@ type licenseChecker interface { } type debugLog interface { - Debugf(format string, args ...any) + Debug(format string, args ...any) } // NewApplier creates a new Applier. diff --git a/internal/constellation/apply_test.go b/internal/constellation/apply_test.go index 54e845033b..b5826657cd 100644 --- a/internal/constellation/apply_test.go +++ b/internal/constellation/apply_test.go @@ -37,7 +37,7 @@ func TestCheckLicense(t *testing.T) { t.Run(name, func(t *testing.T) { require := require.New(t) - a := &Applier{licenseChecker: tc.licenseChecker, log: logger.NewTest(t)} + a := &Applier{licenseChecker: tc.licenseChecker, log: slog.New(slog.NewTextHandler(logger.TestWriter{T: t}, nil))} _, err := a.CheckLicense(context.Background(), cloudprovider.Unknown, true, license.CommunityLicense) if tc.wantErr { require.Error(err) @@ -58,7 +58,7 @@ func (c *stubLicenseChecker) CheckLicense(context.Context, cloudprovider.Provide func TestGenerateMasterSecret(t *testing.T) { assert := assert.New(t) - a := &Applier{log: logger.NewTest(t)} + a := &Applier{log: slog.New(slog.NewTextHandler(logger.TestWriter{T: t}, nil))} sec, err := a.GenerateMasterSecret() assert.NoError(err) assert.Len(sec.Key, crypto.MasterSecretLengthDefault) @@ -67,7 +67,7 @@ func TestGenerateMasterSecret(t *testing.T) { func TestGenerateMeasurementSalt(t *testing.T) { assert := assert.New(t) - a := &Applier{log: logger.NewTest(t)} + a := &Applier{log: slog.New(slog.NewTextHandler(logger.TestWriter{T: t}, nil))} salt, err := a.GenerateMeasurementSalt() assert.NoError(err) assert.Len(salt, crypto.RNGLengthDefault) diff --git a/internal/constellation/applyinit_test.go b/internal/constellation/applyinit_test.go index 7d16d5fe74..9e356366fe 100644 --- a/internal/constellation/applyinit_test.go +++ b/internal/constellation/applyinit_test.go @@ -206,7 +206,7 @@ func TestInit(t *testing.T) { defer stop() a := &Applier{ - log: logger.NewTest(t), + log: slog.New(slog.NewTextHandler(logger.TestWriter{T: t}, nil)), spinner: &nopSpinner{}, newDialer: func(atls.Validator) *dialer.Dialer { return dialer.New(nil, nil, netDialer) @@ -285,7 +285,7 @@ func TestAttestation(t *testing.T) { defer cancel() initer := &Applier{ - log: logger.NewTest(t), + log: slog.New(slog.NewTextHandler(logger.TestWriter{T: t}, nil)), newDialer: func(v atls.Validator) *dialer.Dialer { return dialer.New(nil, v, netDialer) }, diff --git a/internal/constellation/helm/action.go b/internal/constellation/helm/action.go index 5d73347e08..6d3f5a78b5 100644 --- a/internal/constellation/helm/action.go +++ b/internal/constellation/helm/action.go @@ -37,7 +37,7 @@ type applyAction interface { func newActionConfig(kubeConfig []byte, logger debugLog) (*action.Configuration, error) { actionConfig := &action.Configuration{} if err := actionConfig.Init(&clientGetter{kubeConfig: kubeConfig}, constants.HelmNamespace, - "secret", logger.Debugf); err != nil { + "secret", logger.Debug); err != nil { return nil, err } return actionConfig, nil diff --git a/internal/constellation/helm/actionfactory.go b/internal/constellation/helm/actionfactory.go index 4237b77310..f2fb35c045 100644 --- a/internal/constellation/helm/actionfactory.go +++ b/internal/constellation/helm/actionfactory.go @@ -90,15 +90,15 @@ func (a actionFactory) appendNewAction( ) } - a.log.Debugf("release %s not found, adding to new releases...", release.releaseName) + a.log.Debug("release %s not found, adding to new releases...", release.releaseName) *actions = append(*actions, a.newInstall(release, timeout)) return nil } if err != nil { return fmt.Errorf("getting version for %s: %w", release.releaseName, err) } - a.log.Debugf("Current %s version: %s", release.releaseName, currentVersion) - a.log.Debugf("New %s version: %s", release.releaseName, newVersion) + a.log.Debug("Current %s version: %s", release.releaseName, currentVersion) + a.log.Debug("New %s version: %s", release.releaseName, newVersion) if !force { // For charts we package ourselves, the version is equal to the CLI version (charts are embedded in the binary). @@ -132,7 +132,7 @@ func (a actionFactory) appendNewAction( release.releaseName == certManagerInfo.releaseName { return ErrConfirmationMissing } - a.log.Debugf("Upgrading %s from %s to %s", release.releaseName, currentVersion, newVersion) + a.log.Debug("Upgrading %s from %s to %s", release.releaseName, currentVersion, newVersion) *actions = append(*actions, a.newUpgrade(release, timeout)) return nil } @@ -162,7 +162,7 @@ func (a actionFactory) updateCRDs(ctx context.Context, chart *chart.Chart) error for _, dep := range chart.Dependencies() { for _, crdFile := range dep.Files { if strings.HasPrefix(crdFile.Name, "crds/") { - a.log.Debugf("Updating crd: %s", crdFile.Name) + a.log.Debug("Updating crd: %s", crdFile.Name) err := a.kubeClient.ApplyCRD(ctx, crdFile.Data) if err != nil { return err diff --git a/internal/constellation/helm/actionfactory_test.go b/internal/constellation/helm/actionfactory_test.go index 960ea5a520..8dc617d5e0 100644 --- a/internal/constellation/helm/actionfactory_test.go +++ b/internal/constellation/helm/actionfactory_test.go @@ -8,6 +8,7 @@ package helm import ( "errors" + "log/slog" "testing" "time" @@ -240,7 +241,7 @@ func TestAppendNewAction(t *testing.T) { assert := assert.New(t) actions := []applyAction{} - actionFactory := newActionFactory(nil, tc.lister, &action.Configuration{}, logger.NewTest(t)) + actionFactory := newActionFactory(nil, tc.lister, &action.Configuration{}, slog.New(slog.NewTextHandler(logger.TestWriter{T: t}, nil))) err := actionFactory.appendNewAction(tc.release, tc.configTargetVersion, tc.force, tc.allowDestructive, time.Second, &actions) if tc.wantErr { diff --git a/internal/constellation/helm/helm.go b/internal/constellation/helm/helm.go index 990ec2b2b2..da318d4341 100644 --- a/internal/constellation/helm/helm.go +++ b/internal/constellation/helm/helm.go @@ -53,7 +53,7 @@ const ( ) type debugLog interface { - Debugf(format string, args ...any) + Debug(format string, args ...any) } // Client is a Helm client to apply charts. @@ -102,7 +102,7 @@ func (h Client) PrepareApply( return nil, false, fmt.Errorf("loading Helm releases: %w", err) } - h.log.Debugf("Loaded Helm releases") + h.log.Debug("Loaded Helm releases") actions, includesUpgrades, err := h.factory.GetActions( releases, flags.MicroserviceVersion, flags.Force, flags.AllowDestructive, flags.ApplyTimeout, ) @@ -114,7 +114,7 @@ func (h Client) loadReleases( stateFile *state.State, flags Options, serviceAccURI string, openStackCfg *config.OpenStackConfig, ) ([]release, error) { helmLoader := newLoader(csp, attestationVariant, k8sVersion, stateFile, h.cliVersion) - h.log.Debugf("Created new Helm loader") + h.log.Debug("Created new Helm loader") return helmLoader.loadReleases(flags.Conformance, flags.DeployCSIDriver, flags.HelmWaitMode, secret, serviceAccURI, openStackCfg) } @@ -133,7 +133,7 @@ type ChartApplyExecutor struct { // Apply applies the charts in order. func (c ChartApplyExecutor) Apply(ctx context.Context) error { for _, action := range c.actions { - c.log.Debugf("Applying %q", action.ReleaseName()) + c.log.Debug("Applying %q", action.ReleaseName()) if err := action.Apply(ctx); err != nil { return fmt.Errorf("applying %s: %w", action.ReleaseName(), err) } diff --git a/internal/constellation/helm/helm_test.go b/internal/constellation/helm/helm_test.go index aed7689d00..42d55ce0a9 100644 --- a/internal/constellation/helm/helm_test.go +++ b/internal/constellation/helm/helm_test.go @@ -8,6 +8,7 @@ package helm import ( "errors" + "log/slog" "testing" "github.com/edgelesssys/constellation/v2/internal/attestation/variant" @@ -173,7 +174,7 @@ func TestHelmApply(t *testing.T) { }, } - log := logger.NewTest(t) + log := slog.New(slog.NewTextHandler(logger.TestWriter{T: t}, nil)) options := Options{ DeployCSIDriver: true, Conformance: false, diff --git a/internal/constellation/helm/retryaction.go b/internal/constellation/helm/retryaction.go index ca61944b09..721645fd0e 100644 --- a/internal/constellation/helm/retryaction.go +++ b/internal/constellation/helm/retryaction.go @@ -49,7 +49,7 @@ func retryApply(ctx context.Context, action retrieableApplier, retryInterval tim return fmt.Errorf("helm install: %w", err) } retryLoopFinishDuration := time.Since(retryLoopStartTime) - log.Debugf("Helm chart %q installation finished after %s", action.ReleaseName(), retryLoopFinishDuration) + log.Debug("Helm chart %q installation finished after %s", action.ReleaseName(), retryLoopFinishDuration) return nil } @@ -61,9 +61,9 @@ type applyDoer struct { // Do tries to apply the action. func (i applyDoer) Do(ctx context.Context) error { - i.log.Debugf("Trying to apply Helm chart %s", i.applier.ReleaseName()) + i.log.Debug("Trying to apply Helm chart %s", i.applier.ReleaseName()) if err := i.applier.apply(ctx); err != nil { - i.log.Debugf("Helm chart installation %s failed: %v", i.applier.ReleaseName(), err) + i.log.Debug("Helm chart installation %s failed: %v", i.applier.ReleaseName(), err) return err } diff --git a/internal/constellation/helm/retryaction_test.go b/internal/constellation/helm/retryaction_test.go index 6a39d7cb26..c228e10e1c 100644 --- a/internal/constellation/helm/retryaction_test.go +++ b/internal/constellation/helm/retryaction_test.go @@ -8,6 +8,7 @@ package helm import ( "context" + "log/slog" "testing" "time" @@ -64,7 +65,7 @@ func TestRetryApply(t *testing.T) { t.Run(name, func(t *testing.T) { assert := assert.New(t) - err := retryApply(context.Background(), tc.applier, time.Millisecond, logger.NewTest(t)) + err := retryApply(context.Background(), tc.applier, time.Millisecond, slog.New(slog.NewTextHandler(logger.TestWriter{T: t}, nil))) if tc.wantErr { assert.Error(err) } else { diff --git a/internal/constellation/kubecmd/kubecmd.go b/internal/constellation/kubecmd/kubecmd.go index d2ea0d7f4e..07ed020374 100644 --- a/internal/constellation/kubecmd/kubecmd.go +++ b/internal/constellation/kubecmd/kubecmd.go @@ -93,7 +93,7 @@ func (k *KubeCmd) UpgradeNodeImage(ctx context.Context, imageVersion semver.Semv return err } - k.log.Debugf("Checking if image upgrade is valid") + k.log.Debug("Checking if image upgrade is valid") var upgradeErr *compatibility.InvalidUpgradeError err = k.isValidImageUpgrade(nodeVersion, imageVersion.String(), force) switch { @@ -214,20 +214,20 @@ func (k *KubeCmd) ApplyJoinConfig(ctx context.Context, newAttestConfig config.At return fmt.Errorf("getting %s ConfigMap: %w", constants.JoinConfigMap, err) } - k.log.Debugf("ConfigMap %q does not exist in namespace %q, creating it now", constants.JoinConfigMap, constants.ConstellationNamespace) + k.log.Debug("ConfigMap %q does not exist in namespace %q, creating it now", constants.JoinConfigMap, constants.ConstellationNamespace) if err := retryAction(ctx, k.retryInterval, maxRetryAttempts, func(ctx context.Context) error { return k.kubectl.CreateConfigMap(ctx, joinConfigMap(newConfigJSON, measurementSalt)) }, k.log); err != nil { return fmt.Errorf("creating join-config ConfigMap: %w", err) } - k.log.Debugf("Created %q ConfigMap in namespace %q", constants.JoinConfigMap, constants.ConstellationNamespace) + k.log.Debug("Created %q ConfigMap in namespace %q", constants.JoinConfigMap, constants.ConstellationNamespace) return nil } // create backup of previous config joinConfig.Data[constants.AttestationConfigFilename+"_backup"] = joinConfig.Data[constants.AttestationConfigFilename] joinConfig.Data[constants.AttestationConfigFilename] = string(newConfigJSON) - k.log.Debugf("Triggering attestation config update now") + k.log.Debug("Triggering attestation config update now") if err := retryAction(ctx, k.retryInterval, maxRetryAttempts, func(ctx context.Context) error { _, err = k.kubectl.UpdateConfigMap(ctx, joinConfig) return err @@ -263,10 +263,10 @@ func (k *KubeCmd) ExtendClusterConfigCertSANs(ctx context.Context, alternativeNa } if len(missingSANs) == 0 { - k.log.Debugf("No new SANs to add to the cluster's apiserver SAN field") + k.log.Debug("No new SANs to add to the cluster's apiserver SAN field") return nil } - k.log.Debugf("Extending the cluster's apiserver SAN field with the following SANs: %s\n", strings.Join(missingSANs, ", ")) + k.log.Debug("Extending the cluster's apiserver SAN field with the following SANs: %s\n", strings.Join(missingSANs, ", ")) clusterConfiguration.APIServer.CertSANs = append(clusterConfiguration.APIServer.CertSANs, missingSANs...) sort.Strings(clusterConfiguration.APIServer.CertSANs) @@ -277,12 +277,12 @@ func (k *KubeCmd) ExtendClusterConfigCertSANs(ctx context.Context, alternativeNa } kubeadmConfig.Data[constants.ClusterConfigurationKey] = string(newConfigYAML) - k.log.Debugf("Triggering kubeadm config update now") + k.log.Debug("Triggering kubeadm config update now") if _, err = k.kubectl.UpdateConfigMap(ctx, kubeadmConfig); err != nil { return fmt.Errorf("setting new kubeadm config: %w", err) } - k.log.Debugf("Successfully extended the cluster's apiserver SAN field") + k.log.Debug("Successfully extended the cluster's apiserver SAN field") return nil } @@ -345,7 +345,7 @@ func (k *KubeCmd) applyComponentsCM(ctx context.Context, components *corev1.Conf } func (k *KubeCmd) applyNodeVersion(ctx context.Context, nodeVersion updatev1alpha1.NodeVersion) (updatev1alpha1.NodeVersion, error) { - k.log.Debugf("Triggering NodeVersion upgrade now") + k.log.Debug("Triggering NodeVersion upgrade now") var updatedNodeVersion updatev1alpha1.NodeVersion err := retry.RetryOnConflict(retry.DefaultBackoff, func() error { newNode, err := k.getConstellationVersion(ctx) @@ -409,7 +409,7 @@ func (k *KubeCmd) prepareUpdateK8s(nodeVersion *updatev1alpha1.NodeVersion, newC } } - k.log.Debugf("Updating local copy of nodeVersion Kubernetes version from %s to %s", nodeVersion.Spec.KubernetesClusterVersion, newClusterVersion) + k.log.Debug("Updating local copy of nodeVersion Kubernetes version from %s to %s", nodeVersion.Spec.KubernetesClusterVersion, newClusterVersion) nodeVersion.Spec.KubernetesComponentsReference = configMap.ObjectMeta.Name nodeVersion.Spec.KubernetesClusterVersion = newClusterVersion @@ -461,7 +461,7 @@ func retryGetJoinConfig(ctx context.Context, kubectl kubectlInterface, retryInte return false } retries++ - log.Debugf("Getting join-config ConfigMap failed (attempt %d/%d): %s", retries, maxRetryAttempts, err) + log.Debug("Getting join-config ConfigMap failed (attempt %d/%d): %s", retries, maxRetryAttempts, err) return retries < maxRetryAttempts } @@ -483,7 +483,7 @@ func retryAction(ctx context.Context, retryInterval time.Duration, maxRetries in ctr := 0 retrier := conretry.NewIntervalRetrier(&kubeDoer{action: action}, retryInterval, func(err error) bool { ctr++ - log.Debugf("Action failed (attempt %d/%d): %s", ctr, maxRetries, err) + log.Debug("Action failed (attempt %d/%d): %s", ctr, maxRetries, err) return ctr < maxRetries }) return retrier.Do(ctx) @@ -502,5 +502,5 @@ type kubectlInterface interface { } type debugLog interface { - Debugf(format string, args ...any) + Debug(format string, args ...any) } diff --git a/internal/constellation/kubecmd/kubecmd_test.go b/internal/constellation/kubecmd/kubecmd_test.go index cdaf999215..54b151c309 100644 --- a/internal/constellation/kubecmd/kubecmd_test.go +++ b/internal/constellation/kubecmd/kubecmd_test.go @@ -175,7 +175,7 @@ func TestUpgradeNodeImage(t *testing.T) { upgrader := KubeCmd{ kubectl: kubectl, - log: logger.NewTest(t), + log: slog.New(slog.NewTextHandler(logger.TestWriter{T: t}, nil)), } err = upgrader.UpgradeNodeImage(context.Background(), tc.newImageVersion, fmt.Sprintf("/path/to/image:%s", tc.newImageVersion.String()), tc.force) @@ -286,7 +286,7 @@ func TestUpgradeKubernetesVersion(t *testing.T) { upgrader := KubeCmd{ kubectl: kubectl, - log: logger.NewTest(t), + log: slog.New(slog.NewTextHandler(logger.TestWriter{T: t}, nil)), } err = upgrader.UpgradeKubernetesVersion(context.Background(), tc.newKubernetesVersion, tc.force) @@ -341,7 +341,7 @@ func TestIsValidImageUpgrade(t *testing.T) { assert := assert.New(t) upgrader := &KubeCmd{ - log: logger.NewTest(t), + log: slog.New(slog.NewTextHandler(logger.TestWriter{T: t}, nil)), } nodeVersion := updatev1alpha1.NodeVersion{ @@ -392,7 +392,7 @@ func TestUpdateK8s(t *testing.T) { assert := assert.New(t) upgrader := &KubeCmd{ - log: logger.NewTest(t), + log: slog.New(slog.NewTextHandler(logger.TestWriter{T: t}, nil)), } nodeVersion := updatev1alpha1.NodeVersion{ @@ -587,7 +587,7 @@ func TestApplyJoinConfig(t *testing.T) { cmd := &KubeCmd{ kubectl: tc.kubectl, - log: logger.NewTest(t), + log: slog.New(slog.NewTextHandler(logger.TestWriter{T: t}, nil)), retryInterval: time.Millisecond, } diff --git a/internal/logger/grpclogger.go b/internal/logger/grpclogger.go index 3381df30ad..c8f7537874 100644 --- a/internal/logger/grpclogger.go +++ b/internal/logger/grpclogger.go @@ -8,21 +8,22 @@ package logger import ( "fmt" + "log/slog" - "go.uber.org/zap" "google.golang.org/grpc/grpclog" ) -func replaceGRPCLogger(log *zap.Logger) { +func replaceGRPCLogger(log *slog.Logger) { gl := &grpcLogger{ - logger: log.With(zap.String("system", "grpc"), zap.Bool("grpc_log", true)).WithOptions(zap.AddCallerSkip(2)), + // TODO(miampf): Find a way to permanently skip two callers with slog + logger: log.With(slog.String("system", "grpc"), slog.Bool("grpc_log", true)), // .WithOptions(zap.AddCallerSkip(2)), verbosity: 0, } grpclog.SetLoggerV2(gl) } type grpcLogger struct { - logger *zap.Logger + logger *slog.Logger verbosity int } diff --git a/internal/logger/log.go b/internal/logger/log.go index 2c2b333665..b226cbe65b 100644 --- a/internal/logger/log.go +++ b/internal/logger/log.go @@ -156,8 +156,8 @@ func (l *Logger) Named(name string) *Logger { } // ReplaceGRPCLogger replaces grpc's internal logger with the given logger. -func (l *Logger) ReplaceGRPCLogger() { - replaceGRPCLogger(l.getZapLogger()) +func ReplaceGRPCLogger(l *slog.Logger) { + replaceGRPCLogger(l) } // GetServerUnaryInterceptor returns a gRPC server option for intercepting unary gRPC logs. @@ -234,10 +234,10 @@ func middlewareLogger(l *slog.Logger) logging.Logger { } type TestWriter struct { - t *testing.T + T *testing.T } func (t TestWriter) Write(p []byte) (int, error) { - t.t.Log(p) + t.T.Log(p) return len(p), nil } diff --git a/internal/osimage/archive/archive.go b/internal/osimage/archive/archive.go index b31f202027..1f978bb51b 100644 --- a/internal/osimage/archive/archive.go +++ b/internal/osimage/archive/archive.go @@ -10,6 +10,7 @@ package archive import ( "context" "io" + "log/slog" "net/url" "time" @@ -18,7 +19,6 @@ import ( s3types "github.com/aws/aws-sdk-go-v2/service/s3/types" "github.com/edgelesssys/constellation/v2/internal/api/versionsapi" "github.com/edgelesssys/constellation/v2/internal/constants" - "github.com/edgelesssys/constellation/v2/internal/logger" "github.com/edgelesssys/constellation/v2/internal/staticupload" ) @@ -29,11 +29,11 @@ type Archivist struct { // bucket is the name of the S3 bucket to use. bucket string - log *logger.Logger + log *slog.Logger } // New creates a new Archivist. -func New(ctx context.Context, region, bucket, distributionID string, log *logger.Logger) (*Archivist, CloseFunc, error) { +func New(ctx context.Context, region, bucket, distributionID string, log *slog.Logger) (*Archivist, CloseFunc, error) { staticUploadClient, staticUploadClientClose, err := staticupload.New(ctx, staticupload.Config{ Region: region, Bucket: bucket, @@ -73,7 +73,7 @@ func (a *Archivist) Archive(ctx context.Context, version versionsapi.Version, cs if err != nil { return "", err } - a.log.Debugf("Archiving OS image %s %s %v to s3://%v/%v", csp, attestationVariant, version.ShortPath(), a.bucket, key) + a.log.Debug("Archiving OS image %s %s %v to s3://%v/%v", csp, attestationVariant, version.ShortPath(), a.bucket, key) _, err = a.uploadClient.Upload(ctx, &s3.PutObjectInput{ Bucket: &a.bucket, Key: &key, diff --git a/internal/osimage/aws/awsupload.go b/internal/osimage/aws/awsupload.go new file mode 100644 index 0000000000..9ae85da969 --- /dev/null +++ b/internal/osimage/aws/awsupload.go @@ -0,0 +1,603 @@ +/* +Copyright (c) Edgeless Systems GmbH + +SPDX-License-Identifier: AGPL-3.0-only +*/ + +// package aws implements uploading os images to aws. +package aws + +import ( + "context" + "errors" + "fmt" + "io" + "log/slog" + "time" + + awsconfig "github.com/aws/aws-sdk-go-v2/config" + s3manager "github.com/aws/aws-sdk-go-v2/feature/s3/manager" + "github.com/aws/aws-sdk-go-v2/service/ec2" + ec2types "github.com/aws/aws-sdk-go-v2/service/ec2/types" + "github.com/aws/aws-sdk-go-v2/service/s3" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + s3types "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go" + + "github.com/edgelesssys/constellation/v2/internal/api/versionsapi" + "github.com/edgelesssys/constellation/v2/internal/osimage" + "github.com/edgelesssys/constellation/v2/internal/osimage/secureboot" +) + +// Uploader can upload and remove os images on GCP. +type Uploader struct { + region string + bucketName string + ec2 func(ctx context.Context, region string) (ec2API, error) + s3 func(ctx context.Context, region string) (s3API, error) + s3uploader func(ctx context.Context, region string) (s3UploaderAPI, error) + + log *slog.Logger +} + +// New creates a new Uploader. +func New(region, bucketName string, log *slog.Logger) (*Uploader, error) { + return &Uploader{ + region: region, + bucketName: bucketName, + ec2: func(ctx context.Context, region string) (ec2API, error) { + cfg, err := awsconfig.LoadDefaultConfig(ctx, awsconfig.WithRegion(region)) + if err != nil { + return nil, err + } + return ec2.NewFromConfig(cfg), nil + }, + s3: func(ctx context.Context, region string) (s3API, error) { + cfg, err := awsconfig.LoadDefaultConfig(ctx, awsconfig.WithRegion(region)) + if err != nil { + return nil, err + } + return s3.NewFromConfig(cfg), nil + }, + s3uploader: func(ctx context.Context, region string) (s3UploaderAPI, error) { + cfg, err := awsconfig.LoadDefaultConfig(ctx, awsconfig.WithRegion(region)) + if err != nil { + return nil, err + } + return s3manager.NewUploader(s3.NewFromConfig(cfg)), nil + }, + + log: log, + }, nil +} + +// Upload uploads an OS image to AWS. +func (u *Uploader) Upload(ctx context.Context, req *osimage.UploadRequest) ([]versionsapi.ImageInfoEntry, error) { + blobName := fmt.Sprintf("image-%s-%s-%d.raw", req.Version.Stream(), req.Version.Version(), req.Timestamp.Unix()) + imageName := imageName(req.Version, req.AttestationVariant, req.Timestamp) + allRegions := []string{u.region} + allRegions = append(allRegions, replicationRegions...) + // TODO(malt3): make this configurable + publish := true + amiIDs := make(map[string]string, len(allRegions)) + if err := u.ensureBucket(ctx); err != nil { + return nil, fmt.Errorf("ensuring bucket %s exists: %w", u.bucketName, err) + } + + // pre-cleaning + for _, region := range allRegions { + if err := u.ensureImageDeleted(ctx, imageName, region); err != nil { + return nil, fmt.Errorf("pre-cleaning: ensuring no image under the name %s in region %s: %w", imageName, region, err) + } + } + if err := u.ensureSnapshotDeleted(ctx, imageName, u.region); err != nil { + return nil, fmt.Errorf("pre-cleaning: ensuring no snapshot using the same name exists: %w", err) + } + if err := u.ensureBlobDeleted(ctx, blobName); err != nil { + return nil, fmt.Errorf("pre-cleaning: ensuring no blob using the same name exists: %w", err) + } + + // create primary image + if err := u.uploadBlob(ctx, blobName, req.Image); err != nil { + return nil, fmt.Errorf("uploading image to s3: %w", err) + } + defer func() { + if err := u.ensureBlobDeleted(ctx, blobName); err != nil { + u.log.Errorf("post-cleaning: deleting temporary blob from s3", err) + } + }() + snapshotID, err := u.importSnapshot(ctx, blobName, imageName) + if err != nil { + return nil, fmt.Errorf("importing snapshot: %w", err) + } + primaryAMIID, err := u.createImageFromSnapshot(ctx, req.Version, imageName, snapshotID, req.SecureBoot, req.UEFIVarStore) + if err != nil { + return nil, fmt.Errorf("creating image from snapshot: %w", err) + } + amiIDs[u.region] = primaryAMIID + if err := u.waitForImage(ctx, primaryAMIID, u.region); err != nil { + return nil, fmt.Errorf("waiting for primary image to become available: %w", err) + } + + // replicate image + for _, region := range replicationRegions { + amiID, err := u.replicateImage(ctx, imageName, primaryAMIID, region) + if err != nil { + return nil, fmt.Errorf("replicating image to region %s: %w", region, err) + } + amiIDs[region] = amiID + } + + // wait for replication, tag, publish + var imageInfo []versionsapi.ImageInfoEntry + for _, region := range allRegions { + if err := u.waitForImage(ctx, amiIDs[region], region); err != nil { + return nil, fmt.Errorf("waiting for image to become available in region %s: %w", region, err) + } + if err := u.tagImageAndSnapshot(ctx, imageName, amiIDs[region], region); err != nil { + return nil, fmt.Errorf("tagging image in region %s: %w", region, err) + } + if !publish { + continue + } + if err := u.publishImage(ctx, amiIDs[region], region); err != nil { + return nil, fmt.Errorf("publishing image in region %s: %w", region, err) + } + imageInfo = append(imageInfo, versionsapi.ImageInfoEntry{ + CSP: "aws", + AttestationVariant: req.AttestationVariant, + Reference: amiIDs[region], + Region: region, + }) + } + + return imageInfo, nil +} + +func (u *Uploader) ensureBucket(ctx context.Context) error { + s3C, err := u.s3(ctx, u.region) + if err != nil { + return fmt.Errorf("determining if bucket %s exists: %w", u.bucketName, err) + } + _, err = s3C.HeadBucket(ctx, &s3.HeadBucketInput{ + Bucket: &u.bucketName, + }) + if err == nil { + u.log.Debug("Bucket %s exists", u.bucketName) + return nil + } + var noSuchBucketErr *types.NoSuchBucket + if !errors.As(err, &noSuchBucketErr) { + return fmt.Errorf("determining if bucket %s exists: %w", u.bucketName, err) + } + u.log.Debug("Creating bucket %s", u.bucketName) + _, err = s3C.CreateBucket(ctx, &s3.CreateBucketInput{ + Bucket: &u.bucketName, + }) + if err != nil { + return fmt.Errorf("creating bucket %s: %w", u.bucketName, err) + } + return nil +} + +func (u *Uploader) uploadBlob(ctx context.Context, blobName string, img io.Reader) error { + u.log.Debug("Uploading os image as %s", blobName) + uploadC, err := u.s3uploader(ctx, u.region) + if err != nil { + return err + } + _, err = uploadC.Upload(ctx, &s3.PutObjectInput{ + Bucket: &u.bucketName, + Key: &blobName, + Body: img, + ChecksumAlgorithm: s3types.ChecksumAlgorithmSha256, + }) + return err +} + +func (u *Uploader) ensureBlobDeleted(ctx context.Context, blobName string) error { + s3C, err := u.s3(ctx, u.region) + if err != nil { + return err + } + _, err = s3C.HeadObject(ctx, &s3.HeadObjectInput{ + Bucket: &u.bucketName, + Key: &blobName, + }) + var apiError smithy.APIError + if errors.As(err, &apiError) && apiError.ErrorCode() == "NotFound" { + u.log.Debug("Blob %s in %s doesn't exist. Nothing to clean up.", blobName, u.bucketName) + return nil + } + if err != nil { + return err + } + u.log.Debug("Deleting blob %s", blobName) + _, err = s3C.DeleteObject(ctx, &s3.DeleteObjectInput{ + Bucket: &u.bucketName, + Key: &blobName, + }) + return err +} + +func (u *Uploader) findSnapshots(ctx context.Context, snapshotName, region string) ([]string, error) { + ec2C, err := u.ec2(ctx, region) + if err != nil { + return nil, fmt.Errorf("creating ec2 client: %w", err) + } + snapshots, err := ec2C.DescribeSnapshots(ctx, &ec2.DescribeSnapshotsInput{ + Filters: []ec2types.Filter{ + { + Name: toPtr("tag:Name"), + Values: []string{snapshotName}, + }, + }, + }) + if err != nil { + return nil, fmt.Errorf("describing snapshots: %w", err) + } + var snapshotIDs []string + for _, s := range snapshots.Snapshots { + if s.SnapshotId == nil { + continue + } + snapshotIDs = append(snapshotIDs, *s.SnapshotId) + } + return snapshotIDs, nil +} + +func (u *Uploader) importSnapshot(ctx context.Context, blobName, snapshotName string) (string, error) { + u.log.Debug("Importing %s as snapshot %s", blobName, snapshotName) + ec2C, err := u.ec2(ctx, u.region) + if err != nil { + return "", fmt.Errorf("creating ec2 client: %w", err) + } + importResp, err := ec2C.ImportSnapshot(ctx, &ec2.ImportSnapshotInput{ + ClientData: &ec2types.ClientData{ + Comment: &snapshotName, + }, + Description: &snapshotName, + DiskContainer: &ec2types.SnapshotDiskContainer{ + Description: &snapshotName, + Format: toPtr(string(ec2types.DiskImageFormatRaw)), + UserBucket: &ec2types.UserBucket{ + S3Bucket: &u.bucketName, + S3Key: &blobName, + }, + }, + }) + if err != nil { + return "", fmt.Errorf("importing snapshot: %w", err) + } + if importResp.ImportTaskId == nil { + return "", fmt.Errorf("importing snapshot: no import task ID returned") + } + u.log.Debug("Waiting for snapshot %s to be ready", snapshotName) + return waitForSnapshotImport(ctx, ec2C, *importResp.ImportTaskId) +} + +func (u *Uploader) ensureSnapshotDeleted(ctx context.Context, snapshotName, region string) error { + ec2C, err := u.ec2(ctx, region) + if err != nil { + return fmt.Errorf("creating ec2 client: %w", err) + } + snapshots, err := u.findSnapshots(ctx, snapshotName, region) + if err != nil { + return fmt.Errorf("finding snapshots: %w", err) + } + for _, snapshot := range snapshots { + u.log.Debug("Deleting snapshot %s in %s", snapshot, region) + _, err = ec2C.DeleteSnapshot(ctx, &ec2.DeleteSnapshotInput{ + SnapshotId: toPtr(snapshot), + }) + if err != nil { + return fmt.Errorf("deleting snapshot %s: %w", snapshot, err) + } + } + return nil +} + +func (u *Uploader) createImageFromSnapshot(ctx context.Context, version versionsapi.Version, imageName, snapshotID string, enableSecureBoot bool, uefiVarStore secureboot.UEFIVarStore) (string, error) { + u.log.Debug("Creating image %s in %s", imageName, u.region) + ec2C, err := u.ec2(ctx, u.region) + if err != nil { + return "", fmt.Errorf("creating ec2 client: %w", err) + } + var uefiData *string + if enableSecureBoot { + awsUEFIData, err := uefiVarStore.ToAWS() + if err != nil { + return "", fmt.Errorf("creating uefi data: %w", err) + } + uefiData = toPtr(awsUEFIData) + } + + createReq, err := ec2C.RegisterImage(ctx, &ec2.RegisterImageInput{ + Name: &imageName, + Architecture: ec2types.ArchitectureValuesX8664, + BlockDeviceMappings: []ec2types.BlockDeviceMapping{ + { + DeviceName: toPtr("/dev/xvda"), + Ebs: &ec2types.EbsBlockDevice{ + DeleteOnTermination: toPtr(true), + SnapshotId: &snapshotID, + }, + }, + }, + BootMode: ec2types.BootModeValuesUefi, + Description: toPtr("Constellation " + version.ShortPath()), + EnaSupport: toPtr(true), + RootDeviceName: toPtr("/dev/xvda"), + TpmSupport: ec2types.TpmSupportValuesV20, + UefiData: uefiData, + VirtualizationType: toPtr("hvm"), + }) + if err != nil { + return "", fmt.Errorf("creating image: %w", err) + } + if createReq.ImageId == nil { + return "", fmt.Errorf("creating image: no image ID returned") + } + return *createReq.ImageId, nil +} + +func (u *Uploader) replicateImage(ctx context.Context, imageName, amiID string, region string) (string, error) { + u.log.Debug("Replicating image %s to %s", imageName, region) + ec2C, err := u.ec2(ctx, region) + if err != nil { + return "", fmt.Errorf("creating ec2 client: %w", err) + } + replicateReq, err := ec2C.CopyImage(ctx, &ec2.CopyImageInput{ + Name: &imageName, + SourceImageId: &amiID, + SourceRegion: &u.region, + }) + if err != nil { + return "", fmt.Errorf("replicating image: %w", err) + } + if replicateReq.ImageId == nil { + return "", fmt.Errorf("replicating image: no image ID returned") + } + return *replicateReq.ImageId, nil +} + +func (u *Uploader) findImage(ctx context.Context, imageName, region string) (string, error) { + ec2C, err := u.ec2(ctx, region) + if err != nil { + return "", fmt.Errorf("creating ec2 client: %w", err) + } + snapshots, err := ec2C.DescribeImages(ctx, &ec2.DescribeImagesInput{ + Filters: []ec2types.Filter{ + { + Name: toPtr("name"), + Values: []string{imageName}, + }, + }, + }) + if err != nil { + return "", fmt.Errorf("describing images: %w", err) + } + if len(snapshots.Images) == 0 { + return "", errAMIDoesNotExist + } + if len(snapshots.Images) != 1 { + return "", fmt.Errorf("expected 1 image, got %d", len(snapshots.Images)) + } + if snapshots.Images[0].ImageId == nil { + return "", fmt.Errorf("image ID is nil") + } + return *snapshots.Images[0].ImageId, nil +} + +func (u *Uploader) waitForImage(ctx context.Context, amiID, region string) error { + u.log.Debug("Waiting for image %s in %s to be created", amiID, region) + ec2C, err := u.ec2(ctx, region) + if err != nil { + return fmt.Errorf("creating ec2 client: %w", err) + } + waiter := ec2.NewImageAvailableWaiter(ec2C) + err = waiter.Wait(ctx, &ec2.DescribeImagesInput{ + ImageIds: []string{amiID}, + }, maxWait) + if err != nil { + return fmt.Errorf("waiting for image: %w", err) + } + return nil +} + +func (u *Uploader) tagImageAndSnapshot(ctx context.Context, imageName, amiID, region string) error { + u.log.Debug("Tagging backing snapshot of image %s in %s", amiID, region) + ec2C, err := u.ec2(ctx, region) + if err != nil { + return fmt.Errorf("creating ec2 client: %w", err) + } + snapshotID, err := getBackingSnapshotID(ctx, ec2C, amiID) + if err != nil { + return fmt.Errorf("getting backing snapshot ID: %w", err) + } + _, err = ec2C.CreateTags(ctx, &ec2.CreateTagsInput{ + Resources: []string{amiID, snapshotID}, + Tags: []ec2types.Tag{ + { + Key: toPtr("Name"), + Value: toPtr(imageName), + }, + }, + }) + if err != nil { + return fmt.Errorf("tagging ami and snapshot: %w", err) + } + return nil +} + +func (u *Uploader) publishImage(ctx context.Context, imageName, region string) error { + u.log.Debug("Publishing image %s in %s", imageName, region) + ec2C, err := u.ec2(ctx, region) + if err != nil { + return fmt.Errorf("creating ec2 client: %w", err) + } + _, err = ec2C.ModifyImageAttribute(ctx, &ec2.ModifyImageAttributeInput{ + ImageId: &imageName, + LaunchPermission: &ec2types.LaunchPermissionModifications{ + Add: []ec2types.LaunchPermission{ + { + Group: ec2types.PermissionGroupAll, + }, + }, + }, + }) + if err != nil { + return fmt.Errorf("publishing image: %w", err) + } + return nil +} + +func (u *Uploader) ensureImageDeleted(ctx context.Context, imageName, region string) error { + ec2C, err := u.ec2(ctx, region) + if err != nil { + return fmt.Errorf("creating ec2 client: %w", err) + } + amiID, err := u.findImage(ctx, imageName, region) + if err == errAMIDoesNotExist { + u.log.Debug("Image %s in %s doesn't exist. Nothing to clean up.", imageName, region) + return nil + } + snapshotID, err := getBackingSnapshotID(ctx, ec2C, amiID) + if err == errAMIDoesNotExist { + u.log.Debug("Image %s doesn't exist. Nothing to clean up.", amiID) + return nil + } + u.log.Debug("Deleting image %s in %s with backing snapshot", amiID, region) + _, err = ec2C.DeregisterImage(ctx, &ec2.DeregisterImageInput{ + ImageId: &amiID, + }) + if err != nil { + return fmt.Errorf("deleting image: %w", err) + } + _, err = ec2C.DeleteSnapshot(ctx, &ec2.DeleteSnapshotInput{ + SnapshotId: &snapshotID, + }) + if err != nil { + return fmt.Errorf("deleting snapshot: %w", err) + } + return nil +} + +func imageName(version versionsapi.Version, attestationVariant string, timestamp time.Time) string { + if version.Stream() == "stable" { + return fmt.Sprintf("constellation-%s-%s", version.Version(), attestationVariant) + } + return fmt.Sprintf("constellation-%s-%s-%s-%s", version.Stream(), version.Version(), attestationVariant, timestamp.Format(timestampFormat)) +} + +func waitForSnapshotImport(ctx context.Context, ec2C ec2API, importTaskID string) (string, error) { + for { + taskResp, err := ec2C.DescribeImportSnapshotTasks(ctx, &ec2.DescribeImportSnapshotTasksInput{ + ImportTaskIds: []string{importTaskID}, + }) + if err != nil { + return "", fmt.Errorf("describing import snapshot task: %w", err) + } + if len(taskResp.ImportSnapshotTasks) == 0 { + return "", fmt.Errorf("describing import snapshot task: no tasks returned") + } + if taskResp.ImportSnapshotTasks[0].SnapshotTaskDetail == nil { + return "", fmt.Errorf("describing import snapshot task: no snapshot task detail returned") + } + if taskResp.ImportSnapshotTasks[0].SnapshotTaskDetail.Status == nil { + return "", fmt.Errorf("describing import snapshot task: no status returned") + } + switch *taskResp.ImportSnapshotTasks[0].SnapshotTaskDetail.Status { + case string(ec2types.SnapshotStateCompleted): + return *taskResp.ImportSnapshotTasks[0].SnapshotTaskDetail.SnapshotId, nil + case string(ec2types.SnapshotStateError): + return "", fmt.Errorf("importing snapshot: task failed") + } + time.Sleep(waitInterval) + } +} + +func getBackingSnapshotID(ctx context.Context, ec2C ec2API, amiID string) (string, error) { + describeResp, err := ec2C.DescribeImages(ctx, &ec2.DescribeImagesInput{ + ImageIds: []string{amiID}, + }) + if err != nil || len(describeResp.Images) == 0 { + return "", errAMIDoesNotExist + } + if len(describeResp.Images) != 1 { + return "", fmt.Errorf("describing image: expected 1 image, got %d", len(describeResp.Images)) + } + image := describeResp.Images[0] + if len(image.BlockDeviceMappings) != 1 { + return "", fmt.Errorf("found %d block device mappings for image %s, expected 1", len(image.BlockDeviceMappings), amiID) + } + if image.BlockDeviceMappings[0].Ebs == nil { + return "", fmt.Errorf("image %s does not have an EBS block device mapping", amiID) + } + ebs := image.BlockDeviceMappings[0].Ebs + if ebs.SnapshotId == nil { + return "", fmt.Errorf("image %s does not have an EBS snapshot", amiID) + } + return *ebs.SnapshotId, nil +} + +type ec2API interface { + DescribeImages(ctx context.Context, params *ec2.DescribeImagesInput, + optFns ...func(*ec2.Options), + ) (*ec2.DescribeImagesOutput, error) + ModifyImageAttribute(ctx context.Context, params *ec2.ModifyImageAttributeInput, + optFns ...func(*ec2.Options), + ) (*ec2.ModifyImageAttributeOutput, error) + RegisterImage(ctx context.Context, params *ec2.RegisterImageInput, + optFns ...func(*ec2.Options), + ) (*ec2.RegisterImageOutput, error) + CopyImage(ctx context.Context, params *ec2.CopyImageInput, optFns ...func(*ec2.Options), + ) (*ec2.CopyImageOutput, error) + DeregisterImage(ctx context.Context, params *ec2.DeregisterImageInput, + optFns ...func(*ec2.Options), + ) (*ec2.DeregisterImageOutput, error) + ImportSnapshot(ctx context.Context, params *ec2.ImportSnapshotInput, + optFns ...func(*ec2.Options), + ) (*ec2.ImportSnapshotOutput, error) + DescribeImportSnapshotTasks(ctx context.Context, params *ec2.DescribeImportSnapshotTasksInput, + optFns ...func(*ec2.Options), + ) (*ec2.DescribeImportSnapshotTasksOutput, error) + DescribeSnapshots(ctx context.Context, params *ec2.DescribeSnapshotsInput, + optFns ...func(*ec2.Options), + ) (*ec2.DescribeSnapshotsOutput, error) + DeleteSnapshot(ctx context.Context, params *ec2.DeleteSnapshotInput, optFns ...func(*ec2.Options), + ) (*ec2.DeleteSnapshotOutput, error) + CreateTags(ctx context.Context, params *ec2.CreateTagsInput, optFns ...func(*ec2.Options), + ) (*ec2.CreateTagsOutput, error) +} + +type s3API interface { + HeadBucket(ctx context.Context, params *s3.HeadBucketInput, optFns ...func(*s3.Options), + ) (*s3.HeadBucketOutput, error) + CreateBucket(ctx context.Context, params *s3.CreateBucketInput, optFns ...func(*s3.Options), + ) (*s3.CreateBucketOutput, error) + HeadObject(ctx context.Context, params *s3.HeadObjectInput, optFns ...func(*s3.Options), + ) (*s3.HeadObjectOutput, error) + DeleteObject(ctx context.Context, params *s3.DeleteObjectInput, optFns ...func(*s3.Options), + ) (*s3.DeleteObjectOutput, error) +} + +type s3UploaderAPI interface { + Upload(ctx context.Context, input *s3.PutObjectInput, opts ...func(*s3manager.Uploader), + ) (*s3manager.UploadOutput, error) +} + +func toPtr[T any](v T) *T { + return &v +} + +const ( + waitInterval = 15 * time.Second + maxWait = 30 * time.Minute + timestampFormat = "20060102150405" +) + +var ( + errAMIDoesNotExist = errors.New("ami does not exist") + replicationRegions = []string{"eu-west-1", "eu-west-3", "us-east-2", "ap-south-1"} +) diff --git a/internal/osimage/azure/azureupload.go b/internal/osimage/azure/azureupload.go new file mode 100644 index 0000000000..ac43fa6cb6 --- /dev/null +++ b/internal/osimage/azure/azureupload.go @@ -0,0 +1,710 @@ +/* +Copyright (c) Edgeless Systems GmbH + +SPDX-License-Identifier: AGPL-3.0-only +*/ + +// package azure implements uploading os images to azure. +package azure + +import ( + "bytes" + "context" + "errors" + "fmt" + "io" + "log/slog" + "strings" + "time" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azidentity" + armcomputev5 "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/pageblob" + "github.com/edgelesssys/constellation/v2/internal/api/versionsapi" + "github.com/edgelesssys/constellation/v2/internal/osimage" +) + +// Uploader can upload and remove os images on Azure. +type Uploader struct { + subscription string + location string + resourceGroup string + pollingFrequency time.Duration + disks azureDiskAPI + managedImages azureManagedImageAPI + blob sasBlobUploader + galleries azureGalleriesAPI + image azureGalleriesImageAPI + imageVersions azureGalleriesImageVersionAPI + communityVersions azureCommunityGalleryImageVersionAPI + + log *slog.Logger +} + +// New creates a new Uploader. +func New(subscription, location, resourceGroup string, log *slog.Logger) (*Uploader, error) { + cred, err := azidentity.NewDefaultAzureCredential(nil) + if err != nil { + return nil, err + } + diskClient, err := armcomputev5.NewDisksClient(subscription, cred, nil) + if err != nil { + return nil, err + } + managedImagesClient, err := armcomputev5.NewImagesClient(subscription, cred, nil) + if err != nil { + return nil, err + } + galleriesClient, err := armcomputev5.NewGalleriesClient(subscription, cred, nil) + if err != nil { + return nil, err + } + galleriesImageClient, err := armcomputev5.NewGalleryImagesClient(subscription, cred, nil) + if err != nil { + return nil, err + } + galleriesImageVersionClient, err := armcomputev5.NewGalleryImageVersionsClient(subscription, cred, nil) + if err != nil { + return nil, err + } + communityImageVersionClient, err := armcomputev5.NewCommunityGalleryImageVersionsClient(subscription, cred, nil) + if err != nil { + return nil, err + } + + return &Uploader{ + subscription: subscription, + location: location, + resourceGroup: resourceGroup, + pollingFrequency: pollingFrequency, + disks: diskClient, + managedImages: managedImagesClient, + blob: func(sasBlobURL string) (azurePageblobAPI, error) { + return pageblob.NewClientWithNoCredential(sasBlobURL, nil) + }, + galleries: galleriesClient, + image: galleriesImageClient, + imageVersions: galleriesImageVersionClient, + communityVersions: communityImageVersionClient, + log: log, + }, nil +} + +// Upload uploads an OS image to Azure. +func (u *Uploader) Upload(ctx context.Context, req *osimage.UploadRequest) ([]versionsapi.ImageInfoEntry, error) { + formattedTime := req.Timestamp.Format(timestampFormat) + diskName := fmt.Sprintf("constellation-%s-%s-%s", req.Version.Stream(), formattedTime, req.AttestationVariant) + var sigName string + switch req.Version.Stream() { + case "stable": + sigName = sigNameStable + case "debug": + sigName = sigNameDebug + default: + sigName = sigNameDefault + } + definitionName := imageOffer(req.Version) + versionName, err := imageVersion(req.Version, req.Timestamp) + if err != nil { + return nil, fmt.Errorf("determining image version name: %w", err) + } + + // ensure new image can be uploaded by deleting existing resources using the same name + if err := u.ensureImageVersionDeleted(ctx, sigName, definitionName, versionName); err != nil { + return nil, fmt.Errorf("pre-cleaning: ensuring no image version using the same name exists: %w", err) + } + if err := u.ensureManagedImageDeleted(ctx, diskName); err != nil { + return nil, fmt.Errorf("pre-cleaning: ensuring no managed image using the same name exists: %w", err) + } + if err := u.ensureDiskDeleted(ctx, diskName); err != nil { + return nil, fmt.Errorf("pre-cleaning: ensuring no temporary disk using the same name exists: %w", err) + } + + diskID, err := u.createDisk(ctx, diskName, DiskTypeNormal, req.Image, nil, req.Size) + if err != nil { + return nil, fmt.Errorf("creating disk: %w", err) + } + defer func() { + // cleanup temp disk + err := u.ensureDiskDeleted(ctx, diskName) + if err != nil { + u.log.Error("post-cleaning: deleting disk image: %v", err) + } + }() + managedImageID, err := u.createManagedImage(ctx, diskName, diskID) + if err != nil { + return nil, fmt.Errorf("creating managed image: %w", err) + } + if err := u.ensureSIG(ctx, sigName); err != nil { + return nil, fmt.Errorf("ensuring sig exists: %w", err) + } + if err := u.ensureImageDefinition(ctx, sigName, definitionName, req.Version, req.AttestationVariant); err != nil { + return nil, fmt.Errorf("ensuring image definition exists: %w", err) + } + + unsharedImageVersionID, err := u.createImageVersion(ctx, sigName, definitionName, versionName, managedImageID) + if err != nil { + return nil, fmt.Errorf("creating image version: %w", err) + } + + imageReference, err := u.getImageReference(ctx, sigName, definitionName, versionName, unsharedImageVersionID) + if err != nil { + return nil, fmt.Errorf("getting image reference: %w", err) + } + + return []versionsapi.ImageInfoEntry{ + { + CSP: "azure", + AttestationVariant: req.AttestationVariant, + Reference: imageReference, + }, + }, nil +} + +// createDisk creates and initializes (uploads contents of) an azure disk. +func (u *Uploader) createDisk(ctx context.Context, diskName string, diskType DiskType, img io.ReadSeeker, vmgs io.ReadSeeker, size int64) (string, error) { + u.log.Debug("Creating disk %s in %s", diskName, u.resourceGroup) + if diskType == DiskTypeWithVMGS && vmgs == nil { + return "", errors.New("cannot create disk with vmgs: vmgs reader is nil") + } + + var createOption armcomputev5.DiskCreateOption + var requestVMGSSAS bool + switch diskType { + case DiskTypeNormal: + createOption = armcomputev5.DiskCreateOptionUpload + case DiskTypeWithVMGS: + createOption = armcomputev5.DiskCreateOptionUploadPreparedSecure + requestVMGSSAS = true + } + disk := armcomputev5.Disk{ + Location: &u.location, + Properties: &armcomputev5.DiskProperties{ + CreationData: &armcomputev5.CreationData{ + CreateOption: &createOption, + UploadSizeBytes: toPtr(size), + }, + HyperVGeneration: toPtr(armcomputev5.HyperVGenerationV2), + OSType: toPtr(armcomputev5.OperatingSystemTypesLinux), + }, + } + createPoller, err := u.disks.BeginCreateOrUpdate(ctx, u.resourceGroup, diskName, disk, &armcomputev5.DisksClientBeginCreateOrUpdateOptions{}) + if err != nil { + return "", fmt.Errorf("creating disk: %w", err) + } + createdDisk, err := createPoller.PollUntilDone(ctx, &runtime.PollUntilDoneOptions{Frequency: u.pollingFrequency}) + if err != nil { + return "", fmt.Errorf("waiting for disk to be created: %w", err) + } + + u.log.Debug("Granting temporary upload permissions via SAS token") + accessGrant := armcomputev5.GrantAccessData{ + Access: toPtr(armcomputev5.AccessLevelWrite), + DurationInSeconds: toPtr(int32(uploadAccessDuration)), + GetSecureVMGuestStateSAS: &requestVMGSSAS, + } + accessPoller, err := u.disks.BeginGrantAccess(ctx, u.resourceGroup, diskName, accessGrant, &armcomputev5.DisksClientBeginGrantAccessOptions{}) + if err != nil { + return "", fmt.Errorf("generating disk sas token: %w", err) + } + accesPollerResp, err := accessPoller.PollUntilDone(ctx, &runtime.PollUntilDoneOptions{Frequency: u.pollingFrequency}) + if err != nil { + return "", fmt.Errorf("waiting for sas token: %w", err) + } + + if requestVMGSSAS { + u.log.Debug("Uploading vmgs") + vmgsSize, err := vmgs.Seek(0, io.SeekEnd) + if err != nil { + return "", err + } + if _, err := vmgs.Seek(0, io.SeekStart); err != nil { + return "", err + } + if accesPollerResp.SecurityDataAccessSAS == nil { + return "", errors.New("uploading vmgs: grant access returned no vmgs sas") + } + if err := uploadBlob(ctx, *accesPollerResp.SecurityDataAccessSAS, vmgs, vmgsSize, u.blob); err != nil { + return "", fmt.Errorf("uploading vmgs: %w", err) + } + } + u.log.Debug("Uploading os image") + if accesPollerResp.AccessSAS == nil { + return "", errors.New("uploading disk: grant access returned no disk sas") + } + if err := uploadBlob(ctx, *accesPollerResp.AccessSAS, img, size, u.blob); err != nil { + return "", fmt.Errorf("uploading image: %w", err) + } + revokePoller, err := u.disks.BeginRevokeAccess(ctx, u.resourceGroup, diskName, &armcomputev5.DisksClientBeginRevokeAccessOptions{}) + if err != nil { + return "", fmt.Errorf("revoking disk sas token: %w", err) + } + if _, err := revokePoller.PollUntilDone(ctx, &runtime.PollUntilDoneOptions{Frequency: u.pollingFrequency}); err != nil { + return "", fmt.Errorf("waiting for sas token revocation: %w", err) + } + if createdDisk.ID == nil { + return "", errors.New("created disk has no id") + } + return *createdDisk.ID, nil +} + +func (u *Uploader) ensureDiskDeleted(ctx context.Context, diskName string) error { + _, err := u.disks.Get(ctx, u.resourceGroup, diskName, &armcomputev5.DisksClientGetOptions{}) + if err != nil { + u.log.Debug("Disk %s in %s doesn't exist. Nothing to clean up.", diskName, u.resourceGroup) + return nil + } + u.log.Debug("Deleting disk %s in %s", diskName, u.resourceGroup) + deletePoller, err := u.disks.BeginDelete(ctx, u.resourceGroup, diskName, &armcomputev5.DisksClientBeginDeleteOptions{}) + if err != nil { + return fmt.Errorf("deleting disk: %w", err) + } + if _, err = deletePoller.PollUntilDone(ctx, &runtime.PollUntilDoneOptions{Frequency: u.pollingFrequency}); err != nil { + return fmt.Errorf("waiting for disk to be deleted: %w", err) + } + return nil +} + +func (u *Uploader) createManagedImage(ctx context.Context, imageName string, diskID string) (string, error) { + u.log.Debug("Creating managed image %s in %s", imageName, u.resourceGroup) + image := armcomputev5.Image{ + Location: &u.location, + Properties: &armcomputev5.ImageProperties{ + HyperVGeneration: toPtr(armcomputev5.HyperVGenerationTypesV2), + StorageProfile: &armcomputev5.ImageStorageProfile{ + OSDisk: &armcomputev5.ImageOSDisk{ + OSState: toPtr(armcomputev5.OperatingSystemStateTypesGeneralized), + OSType: toPtr(armcomputev5.OperatingSystemTypesLinux), + ManagedDisk: &armcomputev5.SubResource{ + ID: &diskID, + }, + }, + }, + }, + } + createPoller, err := u.managedImages.BeginCreateOrUpdate( + ctx, u.resourceGroup, imageName, image, + &armcomputev5.ImagesClientBeginCreateOrUpdateOptions{}, + ) + if err != nil { + return "", fmt.Errorf("creating managed image: %w", err) + } + createdImage, err := createPoller.PollUntilDone(ctx, &runtime.PollUntilDoneOptions{Frequency: u.pollingFrequency}) + if err != nil { + return "", fmt.Errorf("waiting for image to be created: %w", err) + } + if createdImage.ID == nil { + return "", errors.New("created image has no id") + } + return *createdImage.ID, nil +} + +func (u *Uploader) ensureManagedImageDeleted(ctx context.Context, imageName string) error { + _, err := u.managedImages.Get(ctx, u.resourceGroup, imageName, &armcomputev5.ImagesClientGetOptions{}) + if err != nil { + u.log.Debug("Managed image %s in %s doesn't exist. Nothing to clean up.", imageName, u.resourceGroup) + return nil + } + u.log.Debug("Deleting managed image %s in %s", imageName, u.resourceGroup) + deletePoller, err := u.managedImages.BeginDelete(ctx, u.resourceGroup, imageName, &armcomputev5.ImagesClientBeginDeleteOptions{}) + if err != nil { + return fmt.Errorf("deleting image: %w", err) + } + if _, err = deletePoller.PollUntilDone(ctx, &runtime.PollUntilDoneOptions{Frequency: u.pollingFrequency}); err != nil { + return fmt.Errorf("waiting for image to be deleted: %w", err) + } + return nil +} + +// ensureSIG creates a SIG if it does not exist yet. +func (u *Uploader) ensureSIG(ctx context.Context, sigName string) error { + _, err := u.galleries.Get(ctx, u.resourceGroup, sigName, &armcomputev5.GalleriesClientGetOptions{}) + if err == nil { + u.log.Debug("Image gallery %s in %s exists", sigName, u.resourceGroup) + return nil + } + u.log.Debug("Creating image gallery %s in %s", sigName, u.resourceGroup) + gallery := armcomputev5.Gallery{ + Location: &u.location, + } + createPoller, err := u.galleries.BeginCreateOrUpdate(ctx, u.resourceGroup, sigName, gallery, + &armcomputev5.GalleriesClientBeginCreateOrUpdateOptions{}, + ) + if err != nil { + return fmt.Errorf("creating image gallery: %w", err) + } + if _, err = createPoller.PollUntilDone(ctx, &runtime.PollUntilDoneOptions{Frequency: u.pollingFrequency}); err != nil { + return fmt.Errorf("waiting for image gallery to be created: %w", err) + } + return nil +} + +// ensureImageDefinition creates an image definition (component of a SIG) if it does not exist yet. +func (u *Uploader) ensureImageDefinition(ctx context.Context, sigName, definitionName string, version versionsapi.Version, attestationVariant string) error { + _, err := u.image.Get(ctx, u.resourceGroup, sigName, definitionName, &armcomputev5.GalleryImagesClientGetOptions{}) + if err == nil { + u.log.Debug("Image definition %s/%s in %s exists", sigName, definitionName, u.resourceGroup) + return nil + } + u.log.Debug("Creating image definition %s/%s in %s", sigName, definitionName, u.resourceGroup) + var securityType string + // TODO(malt3): This needs to allow the *Supported or the normal variant + // based on wether a VMGS was provided or not. + // VMGS provided: ConfidentialVM + // No VMGS provided: ConfidentialVMSupported + switch strings.ToLower(attestationVariant) { + case "azure-sev-snp": + securityType = string("ConfidentialVMSupported") + case "azure-trustedlaunch": + securityType = string(armcomputev5.SecurityTypesTrustedLaunch) + } + offer := imageOffer(version) + + galleryImage := armcomputev5.GalleryImage{ + Location: &u.location, + Properties: &armcomputev5.GalleryImageProperties{ + Identifier: &armcomputev5.GalleryImageIdentifier{ + Offer: &offer, + Publisher: toPtr(imageDefinitionPublisher), + SKU: toPtr(imageDefinitionSKU), + }, + OSState: toPtr(armcomputev5.OperatingSystemStateTypesGeneralized), + OSType: toPtr(armcomputev5.OperatingSystemTypesLinux), + Architecture: toPtr(armcomputev5.ArchitectureX64), + Features: []*armcomputev5.GalleryImageFeature{ + { + Name: toPtr("SecurityType"), + Value: &securityType, + }, + }, + HyperVGeneration: toPtr(armcomputev5.HyperVGenerationV2), + }, + } + createPoller, err := u.image.BeginCreateOrUpdate(ctx, u.resourceGroup, sigName, definitionName, galleryImage, + &armcomputev5.GalleryImagesClientBeginCreateOrUpdateOptions{}, + ) + if err != nil { + return fmt.Errorf("creating image definition: %w", err) + } + if _, err = createPoller.PollUntilDone(ctx, &runtime.PollUntilDoneOptions{Frequency: u.pollingFrequency}); err != nil { + return fmt.Errorf("waiting for image definition to be created: %w", err) + } + return nil +} + +func (u *Uploader) createImageVersion(ctx context.Context, sigName, definitionName, versionName, imageID string) (string, error) { + u.log.Debug("Creating image version %s/%s/%s in %s", sigName, definitionName, versionName, u.resourceGroup) + imageVersion := armcomputev5.GalleryImageVersion{ + Location: &u.location, + Properties: &armcomputev5.GalleryImageVersionProperties{ + StorageProfile: &armcomputev5.GalleryImageVersionStorageProfile{ + OSDiskImage: &armcomputev5.GalleryOSDiskImage{ + HostCaching: toPtr(armcomputev5.HostCachingReadOnly), + }, + Source: &armcomputev5.GalleryArtifactVersionFullSource{ + ID: &imageID, + }, + }, + PublishingProfile: &armcomputev5.GalleryImageVersionPublishingProfile{ + ReplicaCount: toPtr[int32](1), + ReplicationMode: toPtr(armcomputev5.ReplicationModeFull), + TargetRegions: targetRegions, + }, + }, + } + createPoller, err := u.imageVersions.BeginCreateOrUpdate(ctx, u.resourceGroup, sigName, definitionName, versionName, imageVersion, + &armcomputev5.GalleryImageVersionsClientBeginCreateOrUpdateOptions{}, + ) + if err != nil { + return "", fmt.Errorf("creating image version: %w", err) + } + createdImage, err := createPoller.PollUntilDone(ctx, &runtime.PollUntilDoneOptions{Frequency: u.pollingFrequency}) + if err != nil { + return "", fmt.Errorf("waiting for image version to be created: %w", err) + } + if createdImage.ID == nil { + return "", errors.New("created image has no id") + } + return *createdImage.ID, nil +} + +func (u *Uploader) ensureImageVersionDeleted(ctx context.Context, sigName, definitionName, versionName string) error { + _, err := u.imageVersions.Get(ctx, u.resourceGroup, sigName, definitionName, versionName, &armcomputev5.GalleryImageVersionsClientGetOptions{}) + if err != nil { + u.log.Debug("Image version %s in %s/%s/%s doesn't exist. Nothing to clean up.", versionName, u.resourceGroup, sigName, definitionName) + return nil + } + u.log.Debug("Deleting image version %s in %s/%s/%s", versionName, u.resourceGroup, sigName, definitionName) + deletePoller, err := u.imageVersions.BeginDelete(ctx, u.resourceGroup, sigName, definitionName, versionName, &armcomputev5.GalleryImageVersionsClientBeginDeleteOptions{}) + if err != nil { + return fmt.Errorf("deleting image version: %w", err) + } + if _, err = deletePoller.PollUntilDone(ctx, &runtime.PollUntilDoneOptions{Frequency: u.pollingFrequency}); err != nil { + return fmt.Errorf("waiting for image version to be deleted: %w", err) + } + return nil +} + +// getImageReference returns the image reference to use for the image version. +// If the shared image gallery is a community gallery, the community identifier is returned. +// Otherwise, the unshared identifier is returned. +func (u *Uploader) getImageReference(ctx context.Context, sigName, definitionName, versionName, unsharedID string) (string, error) { + galleryResp, err := u.galleries.Get(ctx, u.resourceGroup, sigName, &armcomputev5.GalleriesClientGetOptions{}) + if err != nil { + return "", fmt.Errorf("getting image gallery %s: %w", sigName, err) + } + if galleryResp.Properties == nil || + galleryResp.Properties.SharingProfile == nil || + galleryResp.Properties.SharingProfile.CommunityGalleryInfo == nil || + galleryResp.Properties.SharingProfile.CommunityGalleryInfo.CommunityGalleryEnabled == nil || + !*galleryResp.Properties.SharingProfile.CommunityGalleryInfo.CommunityGalleryEnabled { + u.log.Warn("Image gallery %s in %s is not shared. Using private identifier", sigName, u.resourceGroup) + return unsharedID, nil + } + if galleryResp.Properties == nil || + galleryResp.Properties.SharingProfile == nil || + galleryResp.Properties.SharingProfile.CommunityGalleryInfo == nil || + galleryResp.Properties.SharingProfile.CommunityGalleryInfo.PublicNames == nil || + len(galleryResp.Properties.SharingProfile.CommunityGalleryInfo.PublicNames) < 1 || + galleryResp.Properties.SharingProfile.CommunityGalleryInfo.PublicNames[0] == nil { + return "", fmt.Errorf("image gallery %s in %s is a community gallery but has no public names", sigName, u.resourceGroup) + } + communityGalleryName := *galleryResp.Properties.SharingProfile.CommunityGalleryInfo.PublicNames[0] + u.log.Debug("Image gallery %s in %s is shared. Using community identifier in %s", sigName, u.resourceGroup, communityGalleryName) + communityVersionResp, err := u.communityVersions.Get(ctx, u.location, communityGalleryName, + definitionName, versionName, + &armcomputev5.CommunityGalleryImageVersionsClientGetOptions{}, + ) + if err != nil { + return "", fmt.Errorf("getting community image version %s/%s/%s: %w", communityGalleryName, definitionName, versionName, err) + } + if communityVersionResp.Identifier == nil || communityVersionResp.Identifier.UniqueID == nil { + return "", fmt.Errorf("community image version %s/%s/%s has no id", communityGalleryName, definitionName, versionName) + } + return *communityVersionResp.Identifier.UniqueID, nil +} + +func uploadBlob(ctx context.Context, sasURL string, disk io.ReadSeeker, size int64, uploader sasBlobUploader) error { + uploadClient, err := uploader(sasURL) + if err != nil { + return fmt.Errorf("uploading blob: %w", err) + } + var offset int64 + var chunksize int + chunk := make([]byte, pageSizeMax) + var readErr error + for offset < size { + chunksize, readErr = io.ReadAtLeast(disk, chunk, 1) + if readErr != nil { + return fmt.Errorf("reading from disk: %w", err) + } + if err := uploadChunk(ctx, uploadClient, bytes.NewReader(chunk[:chunksize]), offset, int64(chunksize)); err != nil { + return fmt.Errorf("uploading chunk: %w", err) + } + offset += int64(chunksize) + } + return nil +} + +func uploadChunk(ctx context.Context, uploader azurePageblobAPI, chunk io.ReadSeeker, offset, chunksize int64) error { + _, err := uploader.UploadPages(ctx, &readSeekNopCloser{chunk}, blob.HTTPRange{ + Offset: offset, + Count: chunksize, + }, nil) + return err +} + +func imageOffer(version versionsapi.Version) string { + switch { + case version.Stream() == "stable": + return "constellation" + case version.Stream() == "debug" && version.Ref() == "-": + return version.Version() + } + return version.Ref() + "-" + version.Stream() +} + +// imageVersion determines the semantic version string used inside a sig image. +// For releases, the actual semantic version of the image (without leading v) is used (major.minor.patch). +// Otherwise, the version is derived from the commit timestamp. +func imageVersion(version versionsapi.Version, timestamp time.Time) (string, error) { + switch { + case version.Stream() == "stable": + fallthrough + case version.Stream() == "debug" && version.Ref() == "-": + return strings.TrimLeft(version.Version(), "v"), nil + } + + formattedTime := timestamp.Format(timestampFormat) + if len(formattedTime) != len(timestampFormat) { + return "", errors.New("invalid timestamp") + } + // ..