From 54cce77bab62603bff69bbe77dbd76b939023d65 Mon Sep 17 00:00:00 2001 From: miampf Date: Thu, 8 Feb 2024 14:20:01 +0000 Subject: [PATCH] deps: convert zap to slog (#2825) --- bootstrapper/cmd/bootstrapper/BUILD.bazel | 1 - bootstrapper/cmd/bootstrapper/main.go | 33 ++- bootstrapper/cmd/bootstrapper/run.go | 27 +- bootstrapper/cmd/bootstrapper/test.go | 6 +- bootstrapper/internal/initserver/BUILD.bazel | 1 - .../internal/initserver/initserver.go | 26 +- .../internal/initserver/initserver_test.go | 3 +- bootstrapper/internal/joinclient/BUILD.bazel | 2 - .../internal/joinclient/joinclient.go | 49 ++-- .../internal/joinclient/joinclient_test.go | 3 +- bootstrapper/internal/kubernetes/BUILD.bazel | 2 - .../internal/kubernetes/k8sapi/BUILD.bazel | 2 - .../internal/kubernetes/k8sapi/k8sutil.go | 27 +- bootstrapper/internal/kubernetes/k8sutil.go | 6 +- .../internal/kubernetes/kubernetes.go | 49 ++-- .../internal/kubernetes/kubernetes_test.go | 5 +- cli/internal/cmd/BUILD.bazel | 1 - cli/internal/cmd/apply.go | 55 ++--- cli/internal/cmd/applyhelm.go | 14 +- cli/internal/cmd/applyinit.go | 12 +- cli/internal/cmd/applyterraform.go | 16 +- cli/internal/cmd/configfetchmeasurements.go | 15 +- cli/internal/cmd/configgenerate.go | 7 +- cli/internal/cmd/iamcreate.go | 7 +- cli/internal/cmd/iamdestroy.go | 21 +- cli/internal/cmd/iamupgradeapply.go | 2 +- cli/internal/cmd/init.go | 6 +- cli/internal/cmd/license_enterprise.go | 4 +- cli/internal/cmd/log.go | 12 +- cli/internal/cmd/maapatch.go | 3 +- cli/internal/cmd/miniup.go | 3 +- cli/internal/cmd/miniup_linux_amd64.go | 12 +- cli/internal/cmd/recover.go | 29 ++- cli/internal/cmd/status.go | 1 - cli/internal/cmd/upgradecheck.go | 43 ++-- cli/internal/cmd/verify.go | 17 +- debugd/cmd/debugd/BUILD.bazel | 1 - debugd/cmd/debugd/debugd.go | 36 +-- debugd/internal/cdbg/cmd/deploy.go | 39 +-- debugd/internal/debugd/deploy/BUILD.bazel | 2 - debugd/internal/debugd/deploy/download.go | 31 ++- debugd/internal/debugd/deploy/service.go | 29 ++- .../internal/debugd/logcollector/BUILD.bazel | 1 - .../debugd/logcollector/logcollector.go | 66 ++--- debugd/internal/debugd/metadata/BUILD.bazel | 6 +- debugd/internal/debugd/metadata/scheduler.go | 23 +- debugd/internal/debugd/server/BUILD.bazel | 1 - debugd/internal/debugd/server/server.go | 50 ++-- debugd/internal/filetransfer/BUILD.bazel | 2 - debugd/internal/filetransfer/filetransfer.go | 14 +- dev-docs/conventions.md | 46 ++-- disk-mapper/cmd/BUILD.bazel | 1 - disk-mapper/cmd/main.go | 59 +++-- .../internal/diskencryption/BUILD.bazel | 6 +- .../internal/diskencryption/diskencryption.go | 11 +- .../internal/recoveryserver/BUILD.bazel | 1 - .../internal/recoveryserver/recoveryserver.go | 24 +- disk-mapper/internal/rejoinclient/BUILD.bazel | 2 - .../internal/rejoinclient/rejoinclient.go | 23 +- disk-mapper/internal/setup/BUILD.bazel | 2 - disk-mapper/internal/setup/setup.go | 19 +- disk-mapper/internal/test/BUILD.bazel | 2 - disk-mapper/internal/test/benchmark_test.go | 4 +- disk-mapper/internal/test/integration_test.go | 3 +- e2e/internal/upgrade/helm.go | 2 +- e2e/malicious-join/BUILD.bazel | 2 - e2e/malicious-join/malicious-join.go | 46 ++-- go.mod | 3 +- hack/bazel-deps-mirror/BUILD.bazel | 1 - hack/bazel-deps-mirror/check.go | 33 +-- hack/bazel-deps-mirror/fix.go | 51 ++-- .../internal/mirror/BUILD.bazel | 1 - .../internal/mirror/mirror.go | 36 +-- hack/bazel-deps-mirror/upgrade.go | 47 ++-- hack/cli-k8s-compatibility/BUILD.bazel | 1 - hack/cli-k8s-compatibility/main.go | 24 +- hack/oci-pin/BUILD.bazel | 1 - hack/oci-pin/codegen.go | 18 +- hack/oci-pin/merge.go | 16 +- hack/oci-pin/sum.go | 18 +- hack/qemu-metadata-api/BUILD.bazel | 2 - hack/qemu-metadata-api/main.go | 12 +- hack/qemu-metadata-api/server/BUILD.bazel | 2 - hack/qemu-metadata-api/server/server.go | 51 ++-- image/upload/internal/cmd/BUILD.bazel | 1 - image/upload/internal/cmd/flags.go | 26 +- image/upload/internal/cmd/info.go | 8 +- .../internal/cmd/measurementsenvelope.go | 6 +- .../upload/internal/cmd/measurementsmerge.go | 6 +- .../upload/internal/cmd/measurementsupload.go | 8 +- image/upload/internal/cmd/uplosi.go | 6 +- internal/api/attestationconfigapi/BUILD.bazel | 1 - .../api/attestationconfigapi/cli/BUILD.bazel | 1 - .../api/attestationconfigapi/cli/delete.go | 6 +- .../api/attestationconfigapi/cli/upload.go | 14 +- internal/api/attestationconfigapi/client.go | 4 +- internal/api/attestationconfigapi/reporter.go | 10 +- internal/api/client/BUILD.bazel | 2 - internal/api/client/client.go | 29 ++- internal/api/versionsapi/BUILD.bazel | 1 - internal/api/versionsapi/cli/BUILD.bazel | 1 - internal/api/versionsapi/cli/add.go | 52 ++-- internal/api/versionsapi/cli/latest.go | 18 +- internal/api/versionsapi/cli/list.go | 28 +-- internal/api/versionsapi/cli/rm.go | 124 +++++----- internal/api/versionsapi/client.go | 44 ++-- internal/attestation/attestation.go | 12 +- internal/attestation/azure/snp/validator.go | 12 +- internal/attestation/snp/snp.go | 22 +- internal/attestation/tdx/issuer.go | 4 +- internal/attestation/tdx/validator.go | 6 +- internal/attestation/vtpm/attestation.go | 14 +- internal/attestation/vtpm/attestation_test.go | 4 +- internal/cloud/azure/BUILD.bazel | 2 - internal/cloud/azure/azure.go | 11 +- internal/constellation/apply.go | 14 +- internal/constellation/applyinit.go | 28 +-- internal/constellation/helm/action.go | 2 +- internal/constellation/helm/actionfactory.go | 10 +- internal/constellation/helm/helm.go | 8 +- internal/constellation/helm/retryaction.go | 6 +- internal/constellation/kubecmd/backup.go | 18 +- internal/constellation/kubecmd/backup_test.go | 3 +- internal/constellation/kubecmd/kubecmd.go | 28 +-- internal/grpc/grpclog/grpclog.go | 11 +- internal/grpc/grpclog/grpclog_test.go | 5 +- internal/logger/BUILD.bazel | 4 +- internal/logger/cmdline.go | 19 +- internal/logger/grpclogger.go | 53 ++-- internal/logger/levelhandler.go | 57 +++++ internal/logger/log.go | 233 +++++------------- internal/osimage/archive/BUILD.bazel | 1 - internal/osimage/archive/archive.go | 9 +- internal/osimage/imageinfo/BUILD.bazel | 1 - internal/osimage/imageinfo/imageinfo.go | 8 +- .../osimage/measurementsuploader/BUILD.bazel | 1 - .../measurementsuploader.go | 8 +- internal/osimage/nop/BUILD.bazel | 1 - internal/osimage/nop/nop.go | 9 +- internal/osimage/uplosi/BUILD.bazel | 1 - internal/osimage/uplosi/uplosiupload.go | 6 +- internal/staticupload/BUILD.bazel | 1 - internal/staticupload/staticupload.go | 14 +- internal/verify/verify.go | 6 +- joinservice/cmd/BUILD.bazel | 1 - joinservice/cmd/main.go | 62 +++-- joinservice/internal/certcache/BUILD.bazel | 1 - .../internal/certcache/amdkds/amdkds_test.go | 6 +- joinservice/internal/certcache/certcache.go | 28 +-- joinservice/internal/kms/BUILD.bazel | 2 - joinservice/internal/kms/kms.go | 15 +- joinservice/internal/kubeadm/BUILD.bazel | 1 - joinservice/internal/kubeadm/kubeadm.go | 16 +- joinservice/internal/kubernetesca/BUILD.bazel | 1 - .../internal/kubernetesca/kubernetesca.go | 12 +- joinservice/internal/server/BUILD.bazel | 1 - joinservice/internal/server/server.go | 64 ++--- joinservice/internal/watcher/BUILD.bazel | 2 - joinservice/internal/watcher/validator.go | 10 +- joinservice/internal/watcher/watcher.go | 15 +- keyservice/cmd/BUILD.bazel | 1 - keyservice/cmd/main.go | 30 ++- keyservice/internal/server/BUILD.bazel | 2 - keyservice/internal/server/server.go | 19 +- measurement-reader/cmd/BUILD.bazel | 2 - measurement-reader/cmd/main.go | 17 +- s3proxy/cmd/BUILD.bazel | 1 - s3proxy/cmd/main.go | 14 +- s3proxy/internal/kms/BUILD.bazel | 1 - s3proxy/internal/kms/kms.go | 12 +- s3proxy/internal/router/BUILD.bazel | 2 - s3proxy/internal/router/handler.go | 57 +++-- s3proxy/internal/router/object.go | 23 +- s3proxy/internal/router/router.go | 6 +- .../internal/provider/cluster_resource.go | 12 +- upgrade-agent/cmd/BUILD.bazel | 1 - upgrade-agent/cmd/main.go | 16 +- upgrade-agent/internal/server/server.go | 19 +- verify/cmd/BUILD.bazel | 1 - verify/cmd/main.go | 32 ++- verify/server/BUILD.bazel | 2 - verify/server/server.go | 35 ++- 182 files changed, 1474 insertions(+), 1509 deletions(-) create mode 100644 internal/logger/levelhandler.go diff --git a/bootstrapper/cmd/bootstrapper/BUILD.bazel b/bootstrapper/cmd/bootstrapper/BUILD.bazel index da8d4d9c22..6a8c61c503 100644 --- a/bootstrapper/cmd/bootstrapper/BUILD.bazel +++ b/bootstrapper/cmd/bootstrapper/BUILD.bazel @@ -44,7 +44,6 @@ go_library( "//internal/versions/components", "@com_github_spf13_afero//:afero", "@io_k8s_kubernetes//cmd/kubeadm/app/apis/kubeadm/v1beta3", - "@org_uber_go_zap//:zap", ], ) diff --git a/bootstrapper/cmd/bootstrapper/main.go b/bootstrapper/cmd/bootstrapper/main.go index efb010ea14..ff99b231ff 100644 --- a/bootstrapper/cmd/bootstrapper/main.go +++ b/bootstrapper/cmd/bootstrapper/main.go @@ -9,12 +9,13 @@ package main import ( "context" "flag" + "fmt" "io" + "log/slog" "os" "strconv" "github.com/spf13/afero" - "go.uber.org/zap" "github.com/edgelesssys/constellation/v2/bootstrapper/internal/kubernetes" "github.com/edgelesssys/constellation/v2/bootstrapper/internal/kubernetes/k8sapi" @@ -45,13 +46,12 @@ func main() { gRPCDebug := flag.Bool("debug", false, "Enable gRPC debug logging") verbosity := flag.Int("v", 0, logger.CmdLineVerbosityDescription) flag.Parse() - log := logger.New(logger.JSONLog, logger.VerbosityFromInt(*verbosity)).Named("bootstrapper") - defer log.Sync() + log := logger.NewJSONLogger(logger.VerbosityFromInt(*verbosity)).WithGroup("bootstrapper") if *gRPCDebug { - log.Named("gRPC").ReplaceGRPCLogger() + logger.ReplaceGRPCLogger(log.WithGroup("gRPC")) } else { - log.Named("gRPC").WithIncreasedLevel(zap.WarnLevel).ReplaceGRPCLogger() + logger.ReplaceGRPCLogger(slog.New(logger.NewLevelHandler(slog.LevelWarn, log.Handler())).WithGroup("gRPC")) } ctx, cancel := context.WithCancel(context.Background()) @@ -66,18 +66,21 @@ func main() { attestVariant, err := variant.FromString(os.Getenv(constants.AttestationVariant)) if err != nil { - log.With(zap.Error(err)).Fatalf("Failed to parse attestation variant") + log.With(slog.Any("error", err)).Error("Failed to parse attestation variant") + os.Exit(1) } issuer, err := choose.Issuer(attestVariant, log) if err != nil { - log.With(zap.Error(err)).Fatalf("Failed to select issuer") + log.With(slog.Any("error", err)).Error("Failed to select issuer") + os.Exit(1) } switch cloudprovider.FromString(os.Getenv(constellationCSP)) { case cloudprovider.AWS: metadata, err := awscloud.New(ctx) if err != nil { - log.With(zap.Error(err)).Fatalf("Failed to set up AWS metadata API") + log.With(slog.Any("error", err)).Error("Failed to set up AWS metadata API") + os.Exit(1) } metadataAPI = metadata @@ -91,7 +94,8 @@ func main() { case cloudprovider.GCP: metadata, err := gcpcloud.New(ctx) if err != nil { - log.With(zap.Error(err)).Fatalf("Failed to create GCP metadata client") + log.With(slog.Any("error", err)).Error("Failed to create GCP metadata client") + os.Exit(1) } defer metadata.Close() @@ -106,11 +110,13 @@ func main() { case cloudprovider.Azure: metadata, err := azurecloud.New(ctx) if err != nil { - log.With(zap.Error(err)).Fatalf("Failed to create Azure metadata client") + log.With(slog.Any("error", err)).Error("Failed to create Azure metadata client") + os.Exit(1) } if err := metadata.PrepareControlPlaneNode(ctx, log); err != nil { - log.With(zap.Error(err)).Fatalf("Failed to prepare Azure control plane node") + log.With(slog.Any("error", err)).Error("Failed to prepare Azure control plane node") + os.Exit(1) } metadataAPI = metadata @@ -138,13 +144,14 @@ func main() { return tdx.Open() } default: - log.Fatalf("Unsupported attestation variant: %s", attestVariant) + log.Error(fmt.Sprintf("Unsupported attestation variant: %s", attestVariant)) } fs = afero.NewOsFs() case cloudprovider.OpenStack: metadata, err := openstackcloud.New(ctx) if err != nil { - log.With(zap.Error(err)).Fatalf("Failed to create OpenStack metadata client") + log.With(slog.Any("error", err)).Error("Failed to create OpenStack metadata client") + os.Exit(1) } clusterInitJoiner = kubernetes.New( "openstack", k8sapi.NewKubernetesUtil(), &k8sapi.KubdeadmConfiguration{}, kubectl.NewUninitialized(), diff --git a/bootstrapper/cmd/bootstrapper/run.go b/bootstrapper/cmd/bootstrapper/run.go index 41630b6d51..733444beec 100644 --- a/bootstrapper/cmd/bootstrapper/run.go +++ b/bootstrapper/cmd/bootstrapper/run.go @@ -8,7 +8,10 @@ package main import ( "context" + "fmt" + "log/slog" "net" + "os" "github.com/edgelesssys/constellation/v2/bootstrapper/internal/clean" "github.com/edgelesssys/constellation/v2/bootstrapper/internal/diskencryption" @@ -21,31 +24,31 @@ import ( "github.com/edgelesssys/constellation/v2/internal/constants" "github.com/edgelesssys/constellation/v2/internal/file" "github.com/edgelesssys/constellation/v2/internal/grpc/dialer" - "github.com/edgelesssys/constellation/v2/internal/logger" - "go.uber.org/zap" ) func run(issuer atls.Issuer, openDevice vtpm.TPMOpenFunc, fileHandler file.Handler, kube clusterInitJoiner, metadata metadataAPI, - bindIP, bindPort string, log *logger.Logger, + bindIP, bindPort string, log *slog.Logger, ) { - log.With(zap.String("version", constants.BinaryVersion().String())).Infof("Starting bootstrapper") + log.With(slog.String("version", constants.BinaryVersion().String())).Info("Starting bootstrapper") uuid, err := getDiskUUID() if err != nil { - log.With(zap.Error(err)).Errorf("Failed to get disk UUID") + log.With(slog.Any("error", err)).Error("Failed to get disk UUID") } else { - log.Infof("Disk UUID: %s", uuid) + log.Info(fmt.Sprintf("Disk UUID: %s", uuid)) } nodeBootstrapped, err := initialize.IsNodeBootstrapped(openDevice) if err != nil { - log.With(zap.Error(err)).Fatalf("Failed to check if node was previously bootstrapped") + log.With(slog.Any("error", err)).Error("Failed to check if node was previously bootstrapped") + os.Exit(1) } if nodeBootstrapped { if err := kube.StartKubelet(); err != nil { - log.With(zap.Error(err)).Fatalf("Failed to restart kubelet") + log.With(slog.Any("error", err)).Error("Failed to restart kubelet") + os.Exit(1) } return } @@ -53,7 +56,8 @@ func run(issuer atls.Issuer, openDevice vtpm.TPMOpenFunc, fileHandler file.Handl nodeLock := nodelock.New(openDevice) initServer, err := initserver.New(context.Background(), nodeLock, kube, issuer, fileHandler, metadata, log) if err != nil { - log.With(zap.Error(err)).Fatalf("Failed to create init server") + log.With(slog.Any("error", err)).Error("Failed to create init server") + os.Exit(1) } dialer := dialer.New(issuer, nil, &net.Dialer{}) @@ -66,10 +70,11 @@ func run(issuer atls.Issuer, openDevice vtpm.TPMOpenFunc, fileHandler file.Handl joinClient.Start(cleaner) if err := initServer.Serve(bindIP, bindPort, cleaner); err != nil { - log.With(zap.Error(err)).Fatalf("Failed to serve init server") + log.With(slog.Any("error", err)).Error("Failed to serve init server") + os.Exit(1) } - log.Infof("bootstrapper done") + log.Info("bootstrapper done") } func getDiskUUID() (string, error) { diff --git a/bootstrapper/cmd/bootstrapper/test.go b/bootstrapper/cmd/bootstrapper/test.go index 24bd7d8614..05840de339 100644 --- a/bootstrapper/cmd/bootstrapper/test.go +++ b/bootstrapper/cmd/bootstrapper/test.go @@ -8,9 +8,9 @@ package main import ( "context" + "log/slog" "github.com/edgelesssys/constellation/v2/internal/cloud/metadata" - "github.com/edgelesssys/constellation/v2/internal/logger" "github.com/edgelesssys/constellation/v2/internal/role" "github.com/edgelesssys/constellation/v2/internal/versions/components" kubeadm "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta3" @@ -22,13 +22,13 @@ type clusterFake struct{} // InitCluster fakes bootstrapping a new cluster with the current node being the master, returning the arguments required to join the cluster. func (c *clusterFake) InitCluster( context.Context, string, string, - bool, components.Components, []string, string, *logger.Logger, + bool, components.Components, []string, string, *slog.Logger, ) ([]byte, error) { return []byte{}, nil } // JoinCluster will fake joining the current node to an existing cluster. -func (c *clusterFake) JoinCluster(context.Context, *kubeadm.BootstrapTokenDiscovery, role.Role, components.Components, *logger.Logger) error { +func (c *clusterFake) JoinCluster(context.Context, *kubeadm.BootstrapTokenDiscovery, role.Role, components.Components, *slog.Logger) error { return nil } diff --git a/bootstrapper/internal/initserver/BUILD.bazel b/bootstrapper/internal/initserver/BUILD.bazel index 385108c577..009bb0594b 100644 --- a/bootstrapper/internal/initserver/BUILD.bazel +++ b/bootstrapper/internal/initserver/BUILD.bazel @@ -27,7 +27,6 @@ go_library( "@org_golang_google_grpc//keepalive", "@org_golang_google_grpc//status", "@org_golang_x_crypto//bcrypt", - "@org_uber_go_zap//:zap", ], ) diff --git a/bootstrapper/internal/initserver/initserver.go b/bootstrapper/internal/initserver/initserver.go index 1c9d6c40a0..ff2e5e975f 100644 --- a/bootstrapper/internal/initserver/initserver.go +++ b/bootstrapper/internal/initserver/initserver.go @@ -23,6 +23,7 @@ import ( "errors" "fmt" "io" + "log/slog" "net" "strings" "sync" @@ -43,7 +44,6 @@ import ( "github.com/edgelesssys/constellation/v2/internal/nodestate" "github.com/edgelesssys/constellation/v2/internal/role" "github.com/edgelesssys/constellation/v2/internal/versions/components" - "go.uber.org/zap" "golang.org/x/crypto/bcrypt" "google.golang.org/grpc" "google.golang.org/grpc/codes" @@ -68,7 +68,7 @@ type Server struct { kmsURI string - log *logger.Logger + log *slog.Logger journaldCollector journaldCollection @@ -76,8 +76,8 @@ type Server struct { } // New creates a new initialization server. -func New(ctx context.Context, lock locker, kube ClusterInitializer, issuer atls.Issuer, fh file.Handler, metadata MetadataAPI, log *logger.Logger) (*Server, error) { - log = log.Named("initServer") +func New(ctx context.Context, lock locker, kube ClusterInitializer, issuer atls.Issuer, fh file.Handler, metadata MetadataAPI, log *slog.Logger) (*Server, error) { + log = log.WithGroup("initServer") initSecretHash, err := metadata.InitSecretHash(ctx) if err != nil { @@ -106,7 +106,7 @@ func New(ctx context.Context, lock locker, kube ClusterInitializer, issuer atls. grpcServer := grpc.NewServer( grpc.Creds(atlscredentials.New(issuer, nil)), grpc.KeepaliveParams(keepalive.ServerParameters{Time: 15 * time.Second}), - log.Named("gRPC").GetServerUnaryInterceptor(), + logger.GetServerUnaryInterceptor(log.WithGroup("gRPC")), ) initproto.RegisterAPIServer(grpcServer, server) @@ -122,7 +122,7 @@ func (s *Server) Serve(ip, port string, cleaner cleaner) error { return fmt.Errorf("failed to listen: %w", err) } - s.log.Infof("Starting") + s.log.Info("Starting") return s.grpcServer.Serve(lis) } @@ -132,8 +132,8 @@ func (s *Server) Init(req *initproto.InitRequest, stream initproto.API_InitServe s.shutdownLock.RLock() defer s.shutdownLock.RUnlock() - log := s.log.With(zap.String("peer", grpclog.PeerAddrFromContext(stream.Context()))) - log.Infof("Init called") + log := s.log.With(slog.String("peer", grpclog.PeerAddrFromContext(stream.Context()))) + log.Info("Init called") s.kmsURI = req.KmsUri @@ -174,7 +174,7 @@ func (s *Server) Init(req *initproto.InitRequest, stream initproto.API_InitServe // init does not make sense, so we just stop. // // The server stops itself after the current call is done. - log.Warnf("Node is already in a join process") + log.Warn("Node is already in a join process") err = status.Error(codes.FailedPrecondition, "node is already being activated") @@ -228,7 +228,7 @@ func (s *Server) Init(req *initproto.InitRequest, stream initproto.API_InitServe return err } - log.Infof("Init succeeded") + log.Info("Init succeeded") successMessage := &initproto.InitResponse_InitSuccess{ InitSuccess: &initproto.InitSuccessResponse{ @@ -287,14 +287,14 @@ func (s *Server) sendLogsWithMessage(stream initproto.API_InitServer, message er // Stop stops the initialization server gracefully. func (s *Server) Stop() { - s.log.Infof("Stopping") + s.log.Info("Stopping") // Make sure to only stop the server if no Init calls are running s.shutdownLock.Lock() defer s.shutdownLock.Unlock() s.grpcServer.GracefulStop() - s.log.Infof("Stopped") + s.log.Info("Stopped") } func (s *Server) setupDisk(ctx context.Context, cloudKms kms.CloudKMS) error { @@ -342,7 +342,7 @@ type ClusterInitializer interface { kubernetesComponents components.Components, apiServerCertSANs []string, serviceCIDR string, - log *logger.Logger, + log *slog.Logger, ) ([]byte, error) } diff --git a/bootstrapper/internal/initserver/initserver_test.go b/bootstrapper/internal/initserver/initserver_test.go index 155fa17938..77a0b0817f 100644 --- a/bootstrapper/internal/initserver/initserver_test.go +++ b/bootstrapper/internal/initserver/initserver_test.go @@ -11,6 +11,7 @@ import ( "context" "errors" "io" + "log/slog" "net" "strings" "sync" @@ -408,7 +409,7 @@ type stubClusterInitializer struct { func (i *stubClusterInitializer) InitCluster( context.Context, string, string, - bool, components.Components, []string, string, *logger.Logger, + bool, components.Components, []string, string, *slog.Logger, ) ([]byte, error) { return i.initClusterKubeconfig, i.initClusterErr } diff --git a/bootstrapper/internal/joinclient/BUILD.bazel b/bootstrapper/internal/joinclient/BUILD.bazel index c7d6307b57..3b8bf70b7f 100644 --- a/bootstrapper/internal/joinclient/BUILD.bazel +++ b/bootstrapper/internal/joinclient/BUILD.bazel @@ -13,7 +13,6 @@ go_library( "//internal/cloud/metadata", "//internal/constants", "//internal/file", - "//internal/logger", "//internal/nodestate", "//internal/role", "//internal/versions/components", @@ -23,7 +22,6 @@ go_library( "@io_k8s_kubernetes//cmd/kubeadm/app/constants", "@io_k8s_utils//clock", "@org_golang_google_grpc//:go_default_library", - "@org_uber_go_zap//:zap", ], ) diff --git a/bootstrapper/internal/joinclient/joinclient.go b/bootstrapper/internal/joinclient/joinclient.go index 7f8857419d..110b52a662 100644 --- a/bootstrapper/internal/joinclient/joinclient.go +++ b/bootstrapper/internal/joinclient/joinclient.go @@ -21,6 +21,7 @@ import ( "context" "errors" "fmt" + "log/slog" "net" "path/filepath" "strconv" @@ -33,13 +34,11 @@ import ( "github.com/edgelesssys/constellation/v2/internal/cloud/metadata" "github.com/edgelesssys/constellation/v2/internal/constants" "github.com/edgelesssys/constellation/v2/internal/file" - "github.com/edgelesssys/constellation/v2/internal/logger" "github.com/edgelesssys/constellation/v2/internal/nodestate" "github.com/edgelesssys/constellation/v2/internal/role" "github.com/edgelesssys/constellation/v2/internal/versions/components" "github.com/edgelesssys/constellation/v2/joinservice/joinproto" "github.com/spf13/afero" - "go.uber.org/zap" "google.golang.org/grpc" kubeadm "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta3" kubeconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants" @@ -73,7 +72,7 @@ type JoinClient struct { cleaner cleaner metadataAPI MetadataAPI - log *logger.Logger + log *slog.Logger mux sync.Mutex stopC chan struct{} @@ -81,7 +80,7 @@ type JoinClient struct { } // New creates a new JoinClient. -func New(lock locker, dial grpcDialer, joiner ClusterJoiner, meta MetadataAPI, log *logger.Logger) *JoinClient { +func New(lock locker, dial grpcDialer, joiner ClusterJoiner, meta MetadataAPI, log *slog.Logger) *JoinClient { return &JoinClient{ nodeLock: lock, disk: diskencryption.New(), @@ -93,7 +92,7 @@ func New(lock locker, dial grpcDialer, joiner ClusterJoiner, meta MetadataAPI, l dialer: dial, joiner: joiner, metadataAPI: meta, - log: log.Named("join-client"), + log: log.WithGroup("join-client"), } } @@ -110,7 +109,7 @@ func (c *JoinClient) Start(cleaner cleaner) { return } - c.log.Infof("Starting") + c.log.Info("Starting") c.stopC = make(chan struct{}, 1) c.stopDone = make(chan struct{}, 1) c.cleaner = cleaner @@ -119,11 +118,11 @@ func (c *JoinClient) Start(cleaner cleaner) { go func() { defer ticker.Stop() defer func() { c.stopDone <- struct{}{} }() - defer c.log.Infof("Client stopped") + defer c.log.Info("Client stopped") diskUUID, err := c.getDiskUUID() if err != nil { - c.log.With(zap.Error(err)).Errorf("Failed to get disk UUID") + c.log.With(slog.Any("error", err)).Error("Failed to get disk UUID") return } c.diskUUID = diskUUID @@ -131,12 +130,12 @@ func (c *JoinClient) Start(cleaner cleaner) { for { err := c.getNodeMetadata() if err == nil { - c.log.With(zap.String("role", c.role.String()), zap.String("name", c.nodeName)).Infof("Received own instance metadata") + c.log.With(slog.String("role", c.role.String()), slog.String("name", c.nodeName)).Info("Received own instance metadata") break } - c.log.With(zap.Error(err)).Errorf("Failed to retrieve instance metadata") + c.log.With(slog.Any("error", err)).Error("Failed to retrieve instance metadata") - c.log.With(zap.Duration("interval", c.interval)).Infof("Sleeping") + c.log.With(slog.Duration("interval", c.interval)).Info("Sleeping") select { case <-c.stopC: return @@ -147,15 +146,15 @@ func (c *JoinClient) Start(cleaner cleaner) { for { err := c.tryJoinWithAvailableServices() if err == nil { - c.log.Infof("Joined successfully. Client is shutting down") + c.log.Info("Joined successfully. Client is shutting down") return } else if isUnrecoverable(err) { - c.log.With(zap.Error(err)).Errorf("Unrecoverable error occurred") + c.log.With(slog.Any("error", err)).Error("Unrecoverable error occurred") return } - c.log.With(zap.Error(err)).Warnf("Join failed for all available endpoints") + c.log.With(slog.Any("error", err)).Warn("Join failed for all available endpoints") - c.log.With(zap.Duration("interval", c.interval)).Infof("Sleeping") + c.log.With(slog.Duration("interval", c.interval)).Info("Sleeping") select { case <-c.stopC: return @@ -174,7 +173,7 @@ func (c *JoinClient) Stop() { return } - c.log.Infof("Stopping") + c.log.Info("Stopping") c.stopC <- struct{}{} <-c.stopDone @@ -182,7 +181,7 @@ func (c *JoinClient) Stop() { c.stopC = nil c.stopDone = nil - c.log.Infof("Stopped") + c.log.Info("Stopped") } func (c *JoinClient) tryJoinWithAvailableServices() error { @@ -231,7 +230,7 @@ func (c *JoinClient) join(serviceEndpoint string) error { conn, err := c.dialer.Dial(ctx, serviceEndpoint) if err != nil { - c.log.With(zap.String("endpoint", serviceEndpoint), zap.Error(err)).Errorf("Join service unreachable") + c.log.With(slog.String("endpoint", serviceEndpoint), slog.Any("error", err)).Error("Join service unreachable") return fmt.Errorf("dialing join service endpoint: %w", err) } defer conn.Close() @@ -244,7 +243,7 @@ func (c *JoinClient) join(serviceEndpoint string) error { } ticket, err := protoClient.IssueJoinTicket(ctx, req) if err != nil { - c.log.With(zap.String("endpoint", serviceEndpoint), zap.Error(err)).Errorf("Issuing join ticket failed") + c.log.With(slog.String("endpoint", serviceEndpoint), slog.Any("error", err)).Error("Issuing join ticket failed") return fmt.Errorf("issuing join ticket: %w", err) } @@ -269,7 +268,7 @@ func (c *JoinClient) startNodeAndJoin(ticket *joinproto.IssueJoinTicketResponse, nodeLockAcquired, err := c.nodeLock.TryLockOnce(clusterID) if err != nil { - c.log.With(zap.Error(err)).Errorf("Acquiring node lock failed") + c.log.With(slog.Any("error", err)).Error("Acquiring node lock failed") return fmt.Errorf("acquiring node lock: %w", err) } if !nodeLockAcquired { @@ -322,12 +321,12 @@ func (c *JoinClient) getNodeMetadata() error { ctx, cancel := c.timeoutCtx() defer cancel() - c.log.Debugf("Requesting node metadata from metadata API") + c.log.Debug("Requesting node metadata from metadata API") inst, err := c.metadataAPI.Self(ctx) if err != nil { return err } - c.log.With(zap.Any("instance", inst)).Debugf("Received node metadata") + c.log.With(slog.Any("instance", inst)).Debug("Received node metadata") if inst.Name == "" { return errors.New("got instance metadata with empty name") @@ -371,7 +370,7 @@ func (c *JoinClient) getDiskUUID() (string, error) { func (c *JoinClient) getControlPlaneIPs(ctx context.Context) ([]string, error) { instances, err := c.metadataAPI.List(ctx) if err != nil { - c.log.With(zap.Error(err)).Errorf("Failed to list instances from metadata API") + c.log.With(slog.Any("error", err)).Error("Failed to list instances from metadata API") return nil, fmt.Errorf("listing instances from metadata API: %w", err) } @@ -382,7 +381,7 @@ func (c *JoinClient) getControlPlaneIPs(ctx context.Context) ([]string, error) { } } - c.log.With(zap.Strings("IPs", ips)).Infof("Received control plane endpoints") + c.log.With(slog.Any("IPs", ips)).Info("Received control plane endpoints") return ips, nil } @@ -423,7 +422,7 @@ type ClusterJoiner interface { args *kubeadm.BootstrapTokenDiscovery, peerRole role.Role, k8sComponents components.Components, - log *logger.Logger, + log *slog.Logger, ) error } diff --git a/bootstrapper/internal/joinclient/joinclient_test.go b/bootstrapper/internal/joinclient/joinclient_test.go index 82426b0a5a..4684b2eb49 100644 --- a/bootstrapper/internal/joinclient/joinclient_test.go +++ b/bootstrapper/internal/joinclient/joinclient_test.go @@ -9,6 +9,7 @@ package joinclient import ( "context" "errors" + "log/slog" "net" "strconv" "sync" @@ -401,7 +402,7 @@ type stubClusterJoiner struct { joinClusterErr error } -func (j *stubClusterJoiner) JoinCluster(context.Context, *kubeadm.BootstrapTokenDiscovery, role.Role, components.Components, *logger.Logger) error { +func (j *stubClusterJoiner) JoinCluster(context.Context, *kubeadm.BootstrapTokenDiscovery, role.Role, components.Components, *slog.Logger) error { j.joinClusterCalled = true return j.joinClusterErr } diff --git a/bootstrapper/internal/kubernetes/BUILD.bazel b/bootstrapper/internal/kubernetes/BUILD.bazel index c2255148a6..d6ba14a496 100644 --- a/bootstrapper/internal/kubernetes/BUILD.bazel +++ b/bootstrapper/internal/kubernetes/BUILD.bazel @@ -17,13 +17,11 @@ go_library( "//internal/cloud/metadata", "//internal/constants", "//internal/kubernetes", - "//internal/logger", "//internal/role", "//internal/versions/components", "@io_k8s_api//core/v1:core", "@io_k8s_apimachinery//pkg/apis/meta/v1:meta", "@io_k8s_kubernetes//cmd/kubeadm/app/apis/kubeadm/v1beta3", - "@org_uber_go_zap//:zap", ], ) diff --git a/bootstrapper/internal/kubernetes/k8sapi/BUILD.bazel b/bootstrapper/internal/kubernetes/k8sapi/BUILD.bazel index 85738d5007..ef87085bd8 100644 --- a/bootstrapper/internal/kubernetes/k8sapi/BUILD.bazel +++ b/bootstrapper/internal/kubernetes/k8sapi/BUILD.bazel @@ -19,7 +19,6 @@ go_library( "//internal/file", "//internal/installer", "//internal/kubernetes", - "//internal/logger", "//internal/versions/components", "@com_github_coreos_go_systemd_v22//dbus", "@com_github_spf13_afero//:afero", @@ -29,7 +28,6 @@ go_library( "@io_k8s_kubelet//config/v1beta1", "@io_k8s_kubernetes//cmd/kubeadm/app/apis/kubeadm/v1beta3", "@io_k8s_kubernetes//cmd/kubeadm/app/constants", - "@org_uber_go_zap//:zap", ], ) diff --git a/bootstrapper/internal/kubernetes/k8sapi/k8sutil.go b/bootstrapper/internal/kubernetes/k8sapi/k8sutil.go index 5f00f3e1e3..53f681b493 100644 --- a/bootstrapper/internal/kubernetes/k8sapi/k8sutil.go +++ b/bootstrapper/internal/kubernetes/k8sapi/k8sutil.go @@ -13,6 +13,7 @@ import ( "encoding/pem" "errors" "fmt" + "log/slog" "net" "os" "os/exec" @@ -30,9 +31,7 @@ import ( "github.com/edgelesssys/constellation/v2/internal/crypto" "github.com/edgelesssys/constellation/v2/internal/file" "github.com/edgelesssys/constellation/v2/internal/installer" - "github.com/edgelesssys/constellation/v2/internal/logger" "github.com/spf13/afero" - "go.uber.org/zap" ) const ( @@ -87,7 +86,7 @@ func (k *KubernetesUtil) InstallComponents(ctx context.Context, kubernetesCompon // InitCluster instruments kubeadm to initialize the K8s cluster. // On success an admin kubeconfig file is returned. func (k *KubernetesUtil) InitCluster( - ctx context.Context, initConfig []byte, nodeName, clusterName string, ips []net.IP, conformanceMode bool, log *logger.Logger, + ctx context.Context, initConfig []byte, nodeName, clusterName string, ips []net.IP, conformanceMode bool, log *slog.Logger, ) ([]byte, error) { // TODO(3u13r): audit policy should be user input auditPolicy, err := resources.NewDefaultAuditPolicy().Marshal() @@ -108,7 +107,7 @@ func (k *KubernetesUtil) InitCluster( } // preflight - log.Infof("Running kubeadm preflight checks") + log.Info("Running kubeadm preflight checks") cmd := exec.CommandContext(ctx, constants.KubeadmPath, "init", "phase", "preflight", "-v=5", "--config", initConfigFile.Name()) out, err := cmd.CombinedOutput() if err != nil { @@ -120,7 +119,7 @@ func (k *KubernetesUtil) InitCluster( } // create CA certs - log.Infof("Creating Kubernetes control-plane certificates and keys") + log.Info("Creating Kubernetes control-plane certificates and keys") cmd = exec.CommandContext(ctx, constants.KubeadmPath, "init", "phase", "certs", "all", "-v=5", "--config", initConfigFile.Name()) out, err = cmd.CombinedOutput() if err != nil { @@ -132,19 +131,19 @@ func (k *KubernetesUtil) InitCluster( } // create kubelet key and CA signed certificate for the node - log.Infof("Creating signed kubelet certificate") + log.Info("Creating signed kubelet certificate") if err := k.createSignedKubeletCert(nodeName, ips); err != nil { return nil, fmt.Errorf("creating signed kubelete certificate: %w", err) } // Create static pods directory for all nodes (the Kubelets on the worker nodes also expect the path to exist) - log.Infof("Creating static Pod directory /etc/kubernetes/manifests") + log.Info("Creating static Pod directory /etc/kubernetes/manifests") if err := os.MkdirAll("/etc/kubernetes/manifests", os.ModePerm); err != nil { return nil, fmt.Errorf("creating static pods directory: %w", err) } // initialize the cluster - log.Infof("Initializing the cluster using kubeadm init") + log.Info("Initializing the cluster using kubeadm init") skipPhases := "--skip-phases=preflight,certs" if !conformanceMode { skipPhases += ",addon/kube-proxy" @@ -159,11 +158,11 @@ func (k *KubernetesUtil) InitCluster( } return nil, fmt.Errorf("kubeadm init: %w", err) } - log.With(zap.String("output", string(out))).Infof("kubeadm init succeeded") + log.With(slog.String("output", string(out))).Info("kubeadm init succeeded") userName := clusterName + "-admin" - log.With(zap.String("userName", userName)).Infof("Creating admin kubeconfig file") + log.With(slog.String("userName", userName)).Info("Creating admin kubeconfig file") cmd = exec.CommandContext( ctx, constants.KubeadmPath, "kubeconfig", "user", "--client-name", userName, "--config", initConfigFile.Name(), "--org", user.SystemPrivilegedGroup, @@ -176,12 +175,12 @@ func (k *KubernetesUtil) InitCluster( } return nil, fmt.Errorf("kubeadm kubeconfig user: %w", err) } - log.Infof("kubeadm kubeconfig user succeeded") + log.Info("kubeadm kubeconfig user succeeded") return out, nil } // JoinCluster joins existing Kubernetes cluster using kubeadm join. -func (k *KubernetesUtil) JoinCluster(ctx context.Context, joinConfig []byte, log *logger.Logger) error { +func (k *KubernetesUtil) JoinCluster(ctx context.Context, joinConfig []byte, log *slog.Logger) error { // TODO(3u13r): audit policy should be user input auditPolicy, err := resources.NewDefaultAuditPolicy().Marshal() if err != nil { @@ -201,7 +200,7 @@ func (k *KubernetesUtil) JoinCluster(ctx context.Context, joinConfig []byte, log } // Create static pods directory for all nodes (the Kubelets on the worker nodes also expect the path to exist) - log.Infof("Creating static Pod directory /etc/kubernetes/manifests") + log.Info("Creating static Pod directory /etc/kubernetes/manifests") if err := os.MkdirAll("/etc/kubernetes/manifests", os.ModePerm); err != nil { return fmt.Errorf("creating static pods directory: %w", err) } @@ -216,7 +215,7 @@ func (k *KubernetesUtil) JoinCluster(ctx context.Context, joinConfig []byte, log } return fmt.Errorf("kubeadm join: %w", err) } - log.With(zap.String("output", string(out))).Infof("kubeadm join succeeded") + log.With(slog.String("output", string(out))).Info("kubeadm join succeeded") return nil } diff --git a/bootstrapper/internal/kubernetes/k8sutil.go b/bootstrapper/internal/kubernetes/k8sutil.go index 3c7b55718c..1faf6c3cf6 100644 --- a/bootstrapper/internal/kubernetes/k8sutil.go +++ b/bootstrapper/internal/kubernetes/k8sutil.go @@ -8,15 +8,15 @@ package kubernetes import ( "context" + "log/slog" "net" - "github.com/edgelesssys/constellation/v2/internal/logger" "github.com/edgelesssys/constellation/v2/internal/versions/components" ) type clusterUtil interface { InstallComponents(ctx context.Context, kubernetesComponents components.Components) error - InitCluster(ctx context.Context, initConfig []byte, nodeName, clusterName string, ips []net.IP, conformanceMode bool, log *logger.Logger) ([]byte, error) - JoinCluster(ctx context.Context, joinConfig []byte, log *logger.Logger) error + InitCluster(ctx context.Context, initConfig []byte, nodeName, clusterName string, ips []net.IP, conformanceMode bool, log *slog.Logger) ([]byte, error) + JoinCluster(ctx context.Context, joinConfig []byte, log *slog.Logger) error StartKubelet() error } diff --git a/bootstrapper/internal/kubernetes/kubernetes.go b/bootstrapper/internal/kubernetes/kubernetes.go index ed587d933c..5ef1f46376 100644 --- a/bootstrapper/internal/kubernetes/kubernetes.go +++ b/bootstrapper/internal/kubernetes/kubernetes.go @@ -10,6 +10,7 @@ package kubernetes import ( "context" "fmt" + "log/slog" "net" "regexp" "strings" @@ -20,10 +21,8 @@ import ( "github.com/edgelesssys/constellation/v2/internal/cloud/cloudprovider" "github.com/edgelesssys/constellation/v2/internal/constants" "github.com/edgelesssys/constellation/v2/internal/kubernetes" - "github.com/edgelesssys/constellation/v2/internal/logger" "github.com/edgelesssys/constellation/v2/internal/role" "github.com/edgelesssys/constellation/v2/internal/versions/components" - "go.uber.org/zap" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" kubeadm "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta3" @@ -69,9 +68,9 @@ func New(cloudProvider string, clusterUtil clusterUtil, configProvider configura // InitCluster initializes a new Kubernetes cluster and applies pod network provider. func (k *KubeWrapper) InitCluster( - ctx context.Context, versionString, clusterName string, conformanceMode bool, kubernetesComponents components.Components, apiServerCertSANs []string, serviceCIDR string, log *logger.Logger, + ctx context.Context, versionString, clusterName string, conformanceMode bool, kubernetesComponents components.Components, apiServerCertSANs []string, serviceCIDR string, log *slog.Logger, ) ([]byte, error) { - log.With(zap.String("version", versionString)).Infof("Installing Kubernetes components") + log.With(slog.String("version", versionString)).Info("Installing Kubernetes components") if err := k.clusterUtil.InstallComponents(ctx, kubernetesComponents); err != nil { return nil, err } @@ -79,7 +78,7 @@ func (k *KubeWrapper) InitCluster( var validIPs []net.IP // Step 1: retrieve cloud metadata for Kubernetes configuration - log.Infof("Retrieving node metadata") + log.Info("Retrieving node metadata") instance, err := k.providerMetadata.Self(ctx) if err != nil { return nil, fmt.Errorf("retrieving own instance metadata: %w", err) @@ -108,14 +107,14 @@ func (k *KubeWrapper) InitCluster( certSANs = append(certSANs, apiServerCertSANs...) log.With( - zap.String("nodeName", nodeName), - zap.String("providerID", instance.ProviderID), - zap.String("nodeIP", nodeIP), - zap.String("controlPlaneHost", controlPlaneHost), - zap.String("controlPlanePort", controlPlanePort), - zap.String("certSANs", strings.Join(certSANs, ",")), - zap.String("podCIDR", subnetworkPodCIDR), - ).Infof("Setting information for node") + slog.String("nodeName", nodeName), + slog.String("providerID", instance.ProviderID), + slog.String("nodeIP", nodeIP), + slog.String("controlPlaneHost", controlPlaneHost), + slog.String("controlPlanePort", controlPlanePort), + slog.String("certSANs", strings.Join(certSANs, ",")), + slog.String("podCIDR", subnetworkPodCIDR), + ).Info("Setting information for node") // Step 2: configure kubeadm init config ccmSupported := cloudprovider.FromString(k.cloudProvider) == cloudprovider.Azure || @@ -133,7 +132,7 @@ func (k *KubeWrapper) InitCluster( if err != nil { return nil, fmt.Errorf("encoding kubeadm init configuration as YAML: %w", err) } - log.Infof("Initializing Kubernetes cluster") + log.Info("Initializing Kubernetes cluster") kubeConfig, err := k.clusterUtil.InitCluster(ctx, initConfigYAML, nodeName, clusterName, validIPs, conformanceMode, log) if err != nil { return nil, fmt.Errorf("kubeadm init: %w", err) @@ -178,7 +177,7 @@ func (k *KubeWrapper) InitCluster( return nil, fmt.Errorf("annotating node with Kubernetes components hash: %w", err) } - log.Infof("Setting up internal-config ConfigMap") + log.Info("Setting up internal-config ConfigMap") if err := k.setupInternalConfigMap(ctx); err != nil { return nil, fmt.Errorf("failed to setup internal ConfigMap: %w", err) } @@ -186,14 +185,14 @@ func (k *KubeWrapper) InitCluster( } // JoinCluster joins existing Kubernetes cluster. -func (k *KubeWrapper) JoinCluster(ctx context.Context, args *kubeadm.BootstrapTokenDiscovery, peerRole role.Role, k8sComponents components.Components, log *logger.Logger) error { - log.With("k8sComponents", k8sComponents).Infof("Installing provided kubernetes components") +func (k *KubeWrapper) JoinCluster(ctx context.Context, args *kubeadm.BootstrapTokenDiscovery, peerRole role.Role, k8sComponents components.Components, log *slog.Logger) error { + log.With("k8sComponents", k8sComponents).Info("Installing provided kubernetes components") if err := k.clusterUtil.InstallComponents(ctx, k8sComponents); err != nil { return fmt.Errorf("installing kubernetes components: %w", err) } // Step 1: retrieve cloud metadata for Kubernetes configuration - log.Infof("Retrieving node metadata") + log.Info("Retrieving node metadata") instance, err := k.providerMetadata.Self(ctx) if err != nil { return fmt.Errorf("retrieving own instance metadata: %w", err) @@ -214,12 +213,12 @@ func (k *KubeWrapper) JoinCluster(ctx context.Context, args *kubeadm.BootstrapTo args.APIServerEndpoint = net.JoinHostPort(loadBalancerHost, loadBalancerPort) log.With( - zap.String("nodeName", nodeName), - zap.String("providerID", providerID), - zap.String("nodeIP", nodeInternalIP), - zap.String("loadBalancerHost", loadBalancerHost), - zap.String("loadBalancerPort", loadBalancerPort), - ).Infof("Setting information for node") + slog.String("nodeName", nodeName), + slog.String("providerID", providerID), + slog.String("nodeIP", nodeInternalIP), + slog.String("loadBalancerHost", loadBalancerHost), + slog.String("loadBalancerPort", loadBalancerPort), + ).Info("Setting information for node") // Step 2: configure kubeadm join config ccmSupported := cloudprovider.FromString(k.cloudProvider) == cloudprovider.Azure || @@ -238,7 +237,7 @@ func (k *KubeWrapper) JoinCluster(ctx context.Context, args *kubeadm.BootstrapTo if err != nil { return fmt.Errorf("encoding kubeadm join configuration as YAML: %w", err) } - log.With(zap.String("apiServerEndpoint", args.APIServerEndpoint)).Infof("Joining Kubernetes cluster") + log.With(slog.String("apiServerEndpoint", args.APIServerEndpoint)).Info("Joining Kubernetes cluster") if err := k.clusterUtil.JoinCluster(ctx, joinConfigYAML, log); err != nil { return fmt.Errorf("joining cluster: %v; %w ", string(joinConfigYAML), err) } diff --git a/bootstrapper/internal/kubernetes/kubernetes_test.go b/bootstrapper/internal/kubernetes/kubernetes_test.go index 4f179f549d..ccc3a107cb 100644 --- a/bootstrapper/internal/kubernetes/kubernetes_test.go +++ b/bootstrapper/internal/kubernetes/kubernetes_test.go @@ -9,6 +9,7 @@ package kubernetes import ( "context" "errors" + "log/slog" "net" "strconv" "testing" @@ -440,7 +441,7 @@ func (s *stubClusterUtil) InstallComponents(_ context.Context, _ components.Comp return s.installComponentsErr } -func (s *stubClusterUtil) InitCluster(_ context.Context, initConfig []byte, _, _ string, _ []net.IP, _ bool, _ *logger.Logger) ([]byte, error) { +func (s *stubClusterUtil) InitCluster(_ context.Context, initConfig []byte, _, _ string, _ []net.IP, _ bool, _ *slog.Logger) ([]byte, error) { s.initConfigs = append(s.initConfigs, initConfig) return s.kubeconfig, s.initClusterErr } @@ -465,7 +466,7 @@ func (s *stubClusterUtil) SetupNodeOperator(_ context.Context, _ k8sapi.Client, return s.setupNodeOperatorErr } -func (s *stubClusterUtil) JoinCluster(_ context.Context, joinConfig []byte, _ *logger.Logger) error { +func (s *stubClusterUtil) JoinCluster(_ context.Context, joinConfig []byte, _ *slog.Logger) error { s.joinConfigs = append(s.joinConfigs, joinConfig) return s.joinClusterErr } diff --git a/cli/internal/cmd/BUILD.bazel b/cli/internal/cmd/BUILD.bazel index 5654b1dee1..afccfc643b 100644 --- a/cli/internal/cmd/BUILD.bazel +++ b/cli/internal/cmd/BUILD.bazel @@ -110,7 +110,6 @@ go_library( "@io_k8s_sigs_yaml//:yaml", "@org_golang_google_grpc//:go_default_library", "@org_golang_x_mod//semver", - "@org_uber_go_zap//zapcore", ] + select({ "@io_bazel_rules_go//go/platform:android_amd64": [ "@org_golang_x_sys//unix", diff --git a/cli/internal/cmd/apply.go b/cli/internal/cmd/apply.go index 32eaedf211..70d66daa67 100644 --- a/cli/internal/cmd/apply.go +++ b/cli/internal/cmd/apply.go @@ -212,7 +212,6 @@ func runApply(cmd *cobra.Command, _ []string) error { if err != nil { return fmt.Errorf("creating logger: %w", err) } - defer log.Sync() spinner, err := newSpinnerOrStderr(cmd) if err != nil { return err @@ -396,7 +395,7 @@ func (a *applyCmd) apply( // Apply Attestation Config if !a.flags.skipPhases.contains(skipAttestationConfigPhase) { - a.log.Debugf("Applying new attestation config to cluster") + a.log.Debug("Applying new attestation config to cluster") if err := a.applyJoinConfig(cmd, conf.GetAttestationConfig(), stateFile.ClusterValues.MeasurementSalt); err != nil { return fmt.Errorf("applying attestation config: %w", err) } @@ -443,7 +442,7 @@ func (a *applyCmd) apply( func (a *applyCmd) validateInputs(cmd *cobra.Command, configFetcher attestationconfigapi.Fetcher) (*config.Config, *state.State, error) { // Read user's config and state file - a.log.Debugf("Reading config from %s", a.flags.pathPrefixer.PrefixPrintablePath(constants.ConfigFilename)) + a.log.Debug(fmt.Sprintf("Reading config from %s", a.flags.pathPrefixer.PrefixPrintablePath(constants.ConfigFilename))) conf, err := config.New(a.fileHandler, constants.ConfigFilename, configFetcher, a.flags.force) var configValidationErr *config.ValidationError if errors.As(err, &configValidationErr) { @@ -453,7 +452,7 @@ func (a *applyCmd) validateInputs(cmd *cobra.Command, configFetcher attestationc return nil, nil, err } - a.log.Debugf("Reading state file from %s", a.flags.pathPrefixer.PrefixPrintablePath(constants.StateFilename)) + a.log.Debug(fmt.Sprintf("Reading state file from %s", a.flags.pathPrefixer.PrefixPrintablePath(constants.StateFilename))) stateFile, err := state.CreateOrRead(a.fileHandler, constants.StateFilename) if err != nil { return nil, nil, err @@ -464,7 +463,7 @@ func (a *applyCmd) validateInputs(cmd *cobra.Command, configFetcher attestationc // We don't run "hard" verification of skip-phases flags and state file here, // a user may still end up skipping phases that could result in errors later on. // However, we perform basic steps, like ensuring init phase is not skipped if - a.log.Debugf("Validating state file") + a.log.Debug("Validating state file") preCreateValidateErr := stateFile.Validate(state.PreCreate, conf.GetAttestationConfig().GetVariant()) preInitValidateErr := stateFile.Validate(state.PreInit, conf.GetAttestationConfig().GetVariant()) postInitValidateErr := stateFile.Validate(state.PostInit, conf.GetAttestationConfig().GetVariant()) @@ -473,7 +472,7 @@ func (a *applyCmd) validateInputs(cmd *cobra.Command, configFetcher attestationc // in which case the workspace has to be clean if preCreateValidateErr == nil { // We can't skip the infrastructure phase if no infrastructure has been defined - a.log.Debugf("State file is in pre-create state, checking workspace") + a.log.Debug("State file is in pre-create state, checking workspace") if a.flags.skipPhases.contains(skipInfrastructurePhase) { return nil, nil, preInitValidateErr } @@ -482,7 +481,7 @@ func (a *applyCmd) validateInputs(cmd *cobra.Command, configFetcher attestationc return nil, nil, err } - a.log.Debugf("No Terraform state found in current working directory. Preparing to create a new cluster.") + a.log.Debug("No Terraform state found in current working directory. Preparing to create a new cluster.") printCreateWarnings(cmd.ErrOrStderr(), conf) } @@ -491,7 +490,7 @@ func (a *applyCmd) validateInputs(cmd *cobra.Command, configFetcher attestationc // If so, we need to run the init RPC if preInitValidateErr == nil || (preCreateValidateErr == nil && !a.flags.skipPhases.contains(skipInitPhase)) { // We can't skip the init phase if the init RPC hasn't been run yet - a.log.Debugf("State file is in pre-init state, checking workspace") + a.log.Debug("State file is in pre-init state, checking workspace") if a.flags.skipPhases.contains(skipInitPhase) { return nil, nil, postInitValidateErr } @@ -507,7 +506,7 @@ func (a *applyCmd) validateInputs(cmd *cobra.Command, configFetcher attestationc // If the state file is in a post-init state, // we need to make sure specific files exist in the workspace if postInitValidateErr == nil { - a.log.Debugf("State file is in post-init state, checking workspace") + a.log.Debug("State file is in post-init state, checking workspace") if err := a.checkPostInitFilesExist(); err != nil { return nil, nil, err } @@ -522,16 +521,16 @@ func (a *applyCmd) validateInputs(cmd *cobra.Command, configFetcher attestationc // If we need to run the init RPC, the version has to be valid // Otherwise, we are able to use an outdated version, meaning we skip the K8s upgrade // We skip version validation if the user explicitly skips the Kubernetes phase - a.log.Debugf("Validating Kubernetes version %s", conf.KubernetesVersion) + a.log.Debug(fmt.Sprintf("Validating Kubernetes version %s", conf.KubernetesVersion)) validVersion, err := versions.NewValidK8sVersion(string(conf.KubernetesVersion), true) if err != nil { - a.log.Debugf("Kubernetes version not valid: %s", err) + a.log.Debug(fmt.Sprintf("Kubernetes version not valid: %s", err)) if !a.flags.skipPhases.contains(skipInitPhase) { return nil, nil, err } if !a.flags.skipPhases.contains(skipK8sPhase) { - a.log.Debugf("Checking if user wants to continue anyway") + a.log.Debug("Checking if user wants to continue anyway") if !a.flags.yes { confirmed, err := askToConfirm(cmd, fmt.Sprintf( @@ -548,7 +547,7 @@ func (a *applyCmd) validateInputs(cmd *cobra.Command, configFetcher attestationc } a.flags.skipPhases.add(skipK8sPhase) - a.log.Debugf("Outdated Kubernetes version accepted, Kubernetes upgrade will be skipped") + a.log.Debug("Outdated Kubernetes version accepted, Kubernetes upgrade will be skipped") } validVersionString, err := versions.ResolveK8sPatchVersion(xsemver.MajorMinor(string(conf.KubernetesVersion))) @@ -564,7 +563,7 @@ func (a *applyCmd) validateInputs(cmd *cobra.Command, configFetcher attestationc cmd.PrintErrf("Warning: Constellation with Kubernetes %s is still in preview. Use only for evaluation purposes.\n", validVersion) } conf.KubernetesVersion = validVersion - a.log.Debugf("Target Kubernetes version set to %s", conf.KubernetesVersion) + a.log.Debug(fmt.Sprintf("Target Kubernetes version set to %s", conf.KubernetesVersion)) // Validate microservice version (helm versions) in the user's config matches the version of the CLI // This makes sure we catch potential errors early, not just after we already ran Terraform migrations or the init RPC @@ -592,9 +591,9 @@ func (a *applyCmd) applyJoinConfig(cmd *cobra.Command, newConfig config.Attestat ) error { clusterAttestationConfig, err := a.applier.GetClusterAttestationConfig(cmd.Context(), newConfig.GetVariant()) if err != nil { - a.log.Debugf("Getting cluster attestation config failed: %s", err) + a.log.Debug(fmt.Sprintf("Getting cluster attestation config failed: %s", err)) if k8serrors.IsNotFound(err) { - a.log.Debugf("Creating new join config") + a.log.Debug("Creating new join config") return a.applier.ApplyJoinConfig(cmd.Context(), newConfig, measurementSalt) } return fmt.Errorf("getting cluster attestation config: %w", err) @@ -606,7 +605,7 @@ func (a *applyCmd) applyJoinConfig(cmd *cobra.Command, newConfig config.Attestat return fmt.Errorf("comparing attestation configs: %w", err) } if equal { - a.log.Debugf("Current attestation config is equal to the new config, nothing to do") + a.log.Debug("Current attestation config is equal to the new config, nothing to do") return nil } @@ -685,7 +684,7 @@ func (a *applyCmd) checkCreateFilesClean() error { if err := a.checkInitFilesClean(); err != nil { return err } - a.log.Debugf("Checking Terraform state") + a.log.Debug("Checking Terraform state") if _, err := a.fileHandler.Stat(constants.TerraformWorkingDir); err == nil { return fmt.Errorf( "terraform state %q already exists in working directory, run 'constellation terminate' before creating a new cluster", @@ -700,7 +699,7 @@ func (a *applyCmd) checkCreateFilesClean() error { // checkInitFilesClean ensures that the workspace is clean before running the init RPC. func (a *applyCmd) checkInitFilesClean() error { - a.log.Debugf("Checking admin configuration file") + a.log.Debug("Checking admin configuration file") if _, err := a.fileHandler.Stat(constants.AdminConfFilename); err == nil { return fmt.Errorf( "file %q already exists in working directory, run 'constellation terminate' before creating a new cluster", @@ -709,7 +708,7 @@ func (a *applyCmd) checkInitFilesClean() error { } else if !errors.Is(err, fs.ErrNotExist) { return fmt.Errorf("checking for %q: %w", a.flags.pathPrefixer.PrefixPrintablePath(constants.AdminConfFilename), err) } - a.log.Debugf("Checking master secrets file") + a.log.Debug("Checking master secrets file") if _, err := a.fileHandler.Stat(constants.MasterSecretFilename); err == nil { return fmt.Errorf( "file %q already exists in working directory. Constellation won't overwrite previous master secrets. Move it somewhere or delete it before creating a new cluster", @@ -805,20 +804,20 @@ type warnLogger struct { log debugLog } -// Infof messages are reduced to debug messages, since we don't want +// Info messages are reduced to debug messages, since we don't want // the extra info when using the CLI without setting the debug flag. -func (wl warnLogger) Infof(fmtStr string, args ...any) { - wl.log.Debugf(fmtStr, args...) +func (wl warnLogger) Info(msg string, args ...any) { + wl.log.Debug(msg, args...) } -// Warnf prints a formatted warning from the validator. -func (wl warnLogger) Warnf(fmtStr string, args ...any) { - wl.cmd.PrintErrf("Warning: %s\n", fmt.Sprintf(fmtStr, args...)) +// Warn prints a formatted warning from the validator. +func (wl warnLogger) Warn(msg string, args ...any) { + wl.cmd.PrintErrf("Warning: %s %s\n", msg, fmt.Sprint(args...)) } type warnLog interface { - Warnf(format string, args ...any) - Infof(format string, args ...any) + Warn(msg string, args ...any) + Info(msg string, args ...any) } // applier is used to run the different phases of the apply command. diff --git a/cli/internal/cmd/applyhelm.go b/cli/internal/cmd/applyhelm.go index 74e65ff5a0..79ae2a6d72 100644 --- a/cli/internal/cmd/applyhelm.go +++ b/cli/internal/cmd/applyhelm.go @@ -25,7 +25,7 @@ import ( // runHelmApply handles installing or upgrading helm charts for the cluster. func (a *applyCmd) runHelmApply(cmd *cobra.Command, conf *config.Config, stateFile *state.State, upgradeDir string, ) error { - a.log.Debugf("Installing or upgrading Helm charts") + a.log.Debug("Installing or upgrading Helm charts") var masterSecret uri.MasterSecret if err := a.fileHandler.ReadJSON(constants.MasterSecretFilename, &masterSecret); err != nil { return fmt.Errorf("reading master secret: %w", err) @@ -44,13 +44,13 @@ func (a *applyCmd) runHelmApply(cmd *cobra.Command, conf *config.Config, stateFi AllowDestructive: helm.DenyDestructive, } - a.log.Debugf("Getting service account URI") + a.log.Debug("Getting service account URI") serviceAccURI, err := cloudcmd.GetMarshaledServiceAccountURI(conf, a.fileHandler) if err != nil { return err } - a.log.Debugf("Preparing Helm charts") + a.log.Debug("Preparing Helm charts") executor, includesUpgrades, err := a.applier.PrepareHelmCharts(options, stateFile, serviceAccURI, masterSecret, conf.Provider.OpenStack) if errors.Is(err, helm.ErrConfirmationMissing) { if !a.flags.yes { @@ -75,12 +75,12 @@ func (a *applyCmd) runHelmApply(cmd *cobra.Command, conf *config.Config, stateFi cmd.PrintErrln(err) } - a.log.Debugf("Backing up Helm charts") + a.log.Debug("Backing up Helm charts") if err := a.backupHelmCharts(cmd.Context(), executor, includesUpgrades, upgradeDir); err != nil { return err } - a.log.Debugf("Applying Helm charts") + a.log.Debug("Applying Helm charts") if !a.flags.skipPhases.contains(skipInitPhase) { a.spinner.Start("Installing Kubernetes components ", false) } else { @@ -108,10 +108,10 @@ func (a *applyCmd) backupHelmCharts( if err := executor.SaveCharts(chartDir, a.fileHandler); err != nil { return fmt.Errorf("saving Helm charts to disk: %w", err) } - a.log.Debugf("Helm charts saved to %s", a.flags.pathPrefixer.PrefixPrintablePath(chartDir)) + a.log.Debug(fmt.Sprintf("Helm charts saved to %s", a.flags.pathPrefixer.PrefixPrintablePath(chartDir))) if includesUpgrades { - a.log.Debugf("Creating backup of CRDs and CRs") + a.log.Debug("Creating backup of CRDs and CRs") crds, err := a.applier.BackupCRDs(ctx, a.fileHandler, upgradeDir) if err != nil { return fmt.Errorf("creating CRD backup: %w", err) diff --git a/cli/internal/cmd/applyinit.go b/cli/internal/cmd/applyinit.go index 90fa77cdc9..34ab7f1a9b 100644 --- a/cli/internal/cmd/applyinit.go +++ b/cli/internal/cmd/applyinit.go @@ -29,13 +29,13 @@ import ( // On success, it writes the Kubernetes admin config file to disk. // Therefore it is skipped if the Kubernetes admin config file already exists. func (a *applyCmd) runInit(cmd *cobra.Command, conf *config.Config, stateFile *state.State) (*bytes.Buffer, error) { - a.log.Debugf("Creating aTLS Validator for %s", conf.GetAttestationConfig().GetVariant()) + a.log.Debug(fmt.Sprintf("Creating aTLS Validator for %s", conf.GetAttestationConfig().GetVariant())) validator, err := choose.Validator(conf.GetAttestationConfig(), a.wLog) if err != nil { return nil, fmt.Errorf("creating validator: %w", err) } - a.log.Debugf("Running init RPC") + a.log.Debug("Running init RPC") masterSecret, err := a.generateAndPersistMasterSecret(cmd.OutOrStdout()) if err != nil { return nil, fmt.Errorf("generating master secret: %w", err) @@ -74,9 +74,9 @@ func (a *applyCmd) runInit(cmd *cobra.Command, conf *config.Config, stateFile *s } return nil, err } - a.log.Debugf("Initialization request successful") + a.log.Debug("Initialization request successful") - a.log.Debugf("Buffering init success message") + a.log.Debug("Buffering init success message") bufferedOutput := &bytes.Buffer{} if err := a.writeInitOutput(stateFile, resp, a.flags.mergeConfigs, bufferedOutput, measurementSalt); err != nil { return nil, err @@ -121,7 +121,7 @@ func (a *applyCmd) writeInitOutput( if err := a.fileHandler.Write(constants.AdminConfFilename, initResp.Kubeconfig, file.OptNone); err != nil { return fmt.Errorf("writing kubeconfig: %w", err) } - a.log.Debugf("Kubeconfig written to %s", a.flags.pathPrefixer.PrefixPrintablePath(constants.AdminConfFilename)) + a.log.Debug(fmt.Sprintf("Kubeconfig written to %s", a.flags.pathPrefixer.PrefixPrintablePath(constants.AdminConfFilename))) if mergeConfig { if err := a.merger.mergeConfigs(constants.AdminConfFilename, a.fileHandler); err != nil { @@ -136,7 +136,7 @@ func (a *applyCmd) writeInitOutput( return fmt.Errorf("writing Constellation state file: %w", err) } - a.log.Debugf("Constellation state file written to %s", a.flags.pathPrefixer.PrefixPrintablePath(constants.StateFilename)) + a.log.Debug(fmt.Sprintf("Constellation state file written to %s", a.flags.pathPrefixer.PrefixPrintablePath(constants.StateFilename))) if !mergeConfig { fmt.Fprintln(wr, "You can now connect to your cluster by executing:") diff --git a/cli/internal/cmd/applyterraform.go b/cli/internal/cmd/applyterraform.go index 3dc0048b8e..668ad0eeda 100644 --- a/cli/internal/cmd/applyterraform.go +++ b/cli/internal/cmd/applyterraform.go @@ -23,7 +23,7 @@ import ( // runTerraformApply checks if changes to Terraform are required and applies them. func (a *applyCmd) runTerraformApply(cmd *cobra.Command, conf *config.Config, stateFile *state.State, upgradeDir string) error { - a.log.Debugf("Checking if Terraform migrations are required") + a.log.Debug("Checking if Terraform migrations are required") terraformClient, removeClient, err := a.newInfraApplier(cmd.Context()) if err != nil { return fmt.Errorf("creating Terraform client: %w", err) @@ -39,18 +39,18 @@ func (a *applyCmd) runTerraformApply(cmd *cobra.Command, conf *config.Config, st if changesRequired, err := a.planTerraformChanges(cmd, conf, terraformClient); err != nil { return fmt.Errorf("planning Terraform migrations: %w", err) } else if !changesRequired { - a.log.Debugf("No changes to infrastructure required, skipping Terraform migrations") + a.log.Debug("No changes to infrastructure required, skipping Terraform migrations") return nil } - a.log.Debugf("Apply new Terraform resources for infrastructure changes") + a.log.Debug("Apply new Terraform resources for infrastructure changes") newInfraState, err := a.applyTerraformChanges(cmd, conf, terraformClient, upgradeDir, isNewCluster) if err != nil { return err } // Merge the original state with the new infrastructure values - a.log.Debugf("Updating state file with new infrastructure state") + a.log.Debug("Updating state file with new infrastructure state") if _, err := stateFile.Merge( // temporary state with new infrastructure values state.New().SetInfrastructure(newInfraState), @@ -68,7 +68,7 @@ func (a *applyCmd) runTerraformApply(cmd *cobra.Command, conf *config.Config, st // planTerraformChanges checks if any changes to the Terraform state are required. // If no state exists, this function will return true and the caller should create a new state. func (a *applyCmd) planTerraformChanges(cmd *cobra.Command, conf *config.Config, terraformClient cloudApplier) (bool, error) { - a.log.Debugf("Planning Terraform changes") + a.log.Debug("Planning Terraform changes") // Check if there are any Terraform changes to apply @@ -76,7 +76,7 @@ func (a *applyCmd) planTerraformChanges(cmd *cobra.Command, conf *config.Config, // // var manualMigrations []terraform.StateMigration // for _, migration := range manualMigrations { - // u.log.Debugf("Adding manual Terraform migration: %s", migration.DisplayName) + // u.log.Debug(fmt.Sprintf("Adding manual Terraform migration: %s", migration.DisplayName)) // u.infraApplier.AddManualStateMigration(migration) // } @@ -146,7 +146,7 @@ func (a *applyCmd) applyTerraformChangesWithMessage( return state.Infrastructure{}, errors.New(abortErrorMsg) } } - a.log.Debugf("Applying Terraform changes") + a.log.Debug("Applying Terraform changes") a.spinner.Start(progressMsg, false) infraState, err := terraformClient.Apply(cmd.Context(), csp, attestation, rollbackBehavior) @@ -186,7 +186,7 @@ func printCreateInfo(out io.Writer, conf *config.Config, log debugLog) error { } } if len(otherGroupNames) > 0 { - log.Debugf("Creating %d additional node groups: %v", len(otherGroupNames), otherGroupNames) + log.Debug(fmt.Sprintf("Creating %d additional node groups: %v", len(otherGroupNames), otherGroupNames)) } fmt.Fprintf(out, "The following Constellation cluster will be created:\n") diff --git a/cli/internal/cmd/configfetchmeasurements.go b/cli/internal/cmd/configfetchmeasurements.go index aaa5b1cf06..04af8632ce 100644 --- a/cli/internal/cmd/configfetchmeasurements.go +++ b/cli/internal/cmd/configfetchmeasurements.go @@ -93,7 +93,6 @@ func runConfigFetchMeasurements(cmd *cobra.Command, _ []string) error { if err != nil { return fmt.Errorf("creating logger: %w", err) } - defer log.Sync() fileHandler := file.NewHandler(afero.NewOsFs()) rekor, err := sigstore.NewRekor() if err != nil { @@ -105,7 +104,7 @@ func runConfigFetchMeasurements(cmd *cobra.Command, _ []string) error { if err := cfm.flags.parse(cmd.Flags()); err != nil { return fmt.Errorf("parsing flags: %w", err) } - cfm.log.Debugf("Using flags %+v", cfm.flags) + cfm.log.Debug(fmt.Sprintf("Using flags %+v", cfm.flags)) fetcher := attestationconfigapi.NewFetcherWithClient(http.DefaultClient, constants.CDNRepositoryURL) return cfm.configFetchMeasurements(cmd, fileHandler, fetcher) @@ -119,7 +118,7 @@ func (cfm *configFetchMeasurementsCmd) configFetchMeasurements( return errors.New("fetching measurements is not supported") } - cfm.log.Debugf("Loading configuration file from %q", cfm.flags.pathPrefixer.PrefixPrintablePath(constants.ConfigFilename)) + cfm.log.Debug(fmt.Sprintf("Loading configuration file from %q", cfm.flags.pathPrefixer.PrefixPrintablePath(constants.ConfigFilename))) conf, err := config.New(fileHandler, constants.ConfigFilename, fetcher, cfm.flags.force) var configValidationErr *config.ValidationError @@ -134,11 +133,11 @@ func (cfm *configFetchMeasurementsCmd) configFetchMeasurements( cmd.PrintErrln("Configured image doesn't look like a released production image. Double check image before deploying to production.") } - cfm.log.Debugf("Creating context") + cfm.log.Debug("Creating context") ctx, cancel := context.WithTimeout(context.Background(), time.Minute) defer cancel() - cfm.log.Debugf("Updating URLs") + cfm.log.Debug("Updating URLs") if err := cfm.flags.updateURLs(conf); err != nil { return err } @@ -153,14 +152,14 @@ func (cfm *configFetchMeasurementsCmd) configFetchMeasurements( return fmt.Errorf("fetching and verifying measurements: %w", err) } } - cfm.log.Debugf("Measurements: %#v\n", fetchedMeasurements) + cfm.log.Debug(fmt.Sprintf("Measurements: %#v\n", fetchedMeasurements)) - cfm.log.Debugf("Updating measurements in configuration") + cfm.log.Debug("Updating measurements in configuration") conf.UpdateMeasurements(fetchedMeasurements) if err := fileHandler.WriteYAML(constants.ConfigFilename, conf, file.OptOverwrite); err != nil { return err } - cfm.log.Debugf("Configuration written to %s", cfm.flags.pathPrefixer.PrefixPrintablePath(constants.ConfigFilename)) + cfm.log.Debug(fmt.Sprintf("Configuration written to %s", cfm.flags.pathPrefixer.PrefixPrintablePath(constants.ConfigFilename))) cmd.Print("Successfully fetched measurements and updated Configuration\n") return nil } diff --git a/cli/internal/cmd/configgenerate.go b/cli/internal/cmd/configgenerate.go index f7858221b0..666b7284d5 100644 --- a/cli/internal/cmd/configgenerate.go +++ b/cli/internal/cmd/configgenerate.go @@ -77,7 +77,6 @@ func runConfigGenerate(cmd *cobra.Command, args []string) error { if err != nil { return fmt.Errorf("creating logger: %w", err) } - defer log.Sync() fileHandler := file.NewHandler(afero.NewOsFs()) provider := cloudprovider.FromString(args[0]) @@ -86,13 +85,13 @@ func runConfigGenerate(cmd *cobra.Command, args []string) error { if err := cg.flags.parse(cmd.Flags()); err != nil { return fmt.Errorf("parsing flags: %w", err) } - log.Debugf("Parsed flags as %+v", cg.flags) + log.Debug(fmt.Sprintf("Parsed flags as %+v", cg.flags)) return cg.configGenerate(cmd, fileHandler, provider, args[0]) } func (cg *configGenerateCmd) configGenerate(cmd *cobra.Command, fileHandler file.Handler, provider cloudprovider.Provider, rawProvider string) error { - cg.log.Debugf("Using cloud provider %s", provider.String()) + cg.log.Debug(fmt.Sprintf("Using cloud provider %s", provider.String())) // Config creation conf, err := createConfigWithAttestationVariant(provider, rawProvider, cg.flags.attestationVariant) @@ -100,7 +99,7 @@ func (cg *configGenerateCmd) configGenerate(cmd *cobra.Command, fileHandler file return fmt.Errorf("creating config: %w", err) } conf.KubernetesVersion = cg.flags.k8sVersion - cg.log.Debugf("Writing YAML data to configuration file") + cg.log.Debug("Writing YAML data to configuration file") if err := fileHandler.WriteYAML(constants.ConfigFilename, conf, file.OptMkdirAll); err != nil { return fmt.Errorf("writing config file: %w", err) } diff --git a/cli/internal/cmd/iamcreate.go b/cli/internal/cmd/iamcreate.go index 337bf5f863..4067b33b06 100644 --- a/cli/internal/cmd/iamcreate.go +++ b/cli/internal/cmd/iamcreate.go @@ -82,7 +82,6 @@ func runIAMCreate(cmd *cobra.Command, providerCreator providerIAMCreator, provid if err != nil { return fmt.Errorf("creating logger: %w", err) } - defer log.Sync() iamCreator := &iamCreator{ cmd: cmd, @@ -134,7 +133,7 @@ func (c *iamCreator) create(ctx context.Context) error { var conf config.Config if c.flags.updateConfig { - c.log.Debugf("Parsing config %s", c.flags.pathPrefixer.PrefixPrintablePath(constants.ConfigFilename)) + c.log.Debug(fmt.Sprintf("Parsing config %s", c.flags.pathPrefixer.PrefixPrintablePath(constants.ConfigFilename))) if err := c.fileHandler.ReadYAML(constants.ConfigFilename, &conf); err != nil { return fmt.Errorf("error reading the configuration file: %w", err) } @@ -154,7 +153,7 @@ func (c *iamCreator) create(ctx context.Context) error { return err } c.cmd.Println() // Print empty line to separate after spinner ended. - c.log.Debugf("Successfully created the IAM cloud resources") + c.log.Debug("Successfully created the IAM cloud resources") err = c.providerCreator.parseAndWriteIDFile(iamFile, c.fileHandler) if err != nil { @@ -162,7 +161,7 @@ func (c *iamCreator) create(ctx context.Context) error { } if c.flags.updateConfig { - c.log.Debugf("Writing IAM configuration to %s", c.flags.pathPrefixer.PrefixPrintablePath(constants.ConfigFilename)) + c.log.Debug(fmt.Sprintf("Writing IAM configuration to %s", c.flags.pathPrefixer.PrefixPrintablePath(constants.ConfigFilename))) c.providerCreator.writeOutputValuesToConfig(&conf, iamFile) if err := c.fileHandler.WriteYAML(constants.ConfigFilename, conf, file.OptOverwrite); err != nil { return err diff --git a/cli/internal/cmd/iamdestroy.go b/cli/internal/cmd/iamdestroy.go index 667218b810..f89c939a52 100644 --- a/cli/internal/cmd/iamdestroy.go +++ b/cli/internal/cmd/iamdestroy.go @@ -58,7 +58,6 @@ func runIAMDestroy(cmd *cobra.Command, _ []string) error { if err != nil { return fmt.Errorf("creating logger: %w", err) } - defer log.Sync() spinner := newSpinner(cmd.ErrOrStderr()) destroyer := cloudcmd.NewIAMDestroyer() fsHandler := file.NewHandler(afero.NewOsFs()) @@ -78,25 +77,25 @@ type destroyCmd struct { func (c *destroyCmd) iamDestroy(cmd *cobra.Command, spinner spinnerInterf, destroyer iamDestroyer, fsHandler file.Handler) error { // check if there is a possibility that the cluster is still running by looking out for specific files - c.log.Debugf("Checking if %q exists", c.flags.pathPrefixer.PrefixPrintablePath(constants.AdminConfFilename)) + c.log.Debug(fmt.Sprintf("Checking if %q exists", c.flags.pathPrefixer.PrefixPrintablePath(constants.AdminConfFilename))) if _, err := fsHandler.Stat(constants.AdminConfFilename); !errors.Is(err, os.ErrNotExist) { return fmt.Errorf("file %q still exists, please make sure to terminate your cluster before destroying your IAM configuration", c.flags.pathPrefixer.PrefixPrintablePath(constants.AdminConfFilename)) } - c.log.Debugf("Checking if %q exists", c.flags.pathPrefixer.PrefixPrintablePath(constants.StateFilename)) + c.log.Debug(fmt.Sprintf("Checking if %q exists", c.flags.pathPrefixer.PrefixPrintablePath(constants.StateFilename))) if _, err := fsHandler.Stat(constants.StateFilename); !errors.Is(err, os.ErrNotExist) { return fmt.Errorf("file %q still exists, please make sure to terminate your cluster before destroying your IAM configuration", c.flags.pathPrefixer.PrefixPrintablePath(constants.StateFilename)) } gcpFileExists := false - c.log.Debugf("Checking if %q exists", c.flags.pathPrefixer.PrefixPrintablePath(constants.GCPServiceAccountKeyFilename)) + c.log.Debug(fmt.Sprintf("Checking if %q exists", c.flags.pathPrefixer.PrefixPrintablePath(constants.GCPServiceAccountKeyFilename))) if _, err := fsHandler.Stat(constants.GCPServiceAccountKeyFilename); err != nil { if !errors.Is(err, os.ErrNotExist) { return err } } else { - c.log.Debugf("%q exists", c.flags.pathPrefixer.PrefixPrintablePath(constants.GCPServiceAccountKeyFilename)) + c.log.Debug(fmt.Sprintf("%q exists", c.flags.pathPrefixer.PrefixPrintablePath(constants.GCPServiceAccountKeyFilename))) gcpFileExists = true } @@ -117,7 +116,7 @@ func (c *destroyCmd) iamDestroy(cmd *cobra.Command, spinner spinnerInterf, destr } if gcpFileExists { - c.log.Debugf("Starting to delete %q", c.flags.pathPrefixer.PrefixPrintablePath(constants.GCPServiceAccountKeyFilename)) + c.log.Debug(fmt.Sprintf("Starting to delete %q", c.flags.pathPrefixer.PrefixPrintablePath(constants.GCPServiceAccountKeyFilename))) proceed, err := c.deleteGCPServiceAccountKeyFile(cmd, destroyer, fsHandler) if err != nil { return err @@ -128,7 +127,7 @@ func (c *destroyCmd) iamDestroy(cmd *cobra.Command, spinner spinnerInterf, destr } } - c.log.Debugf("Starting to destroy IAM configuration") + c.log.Debug("Starting to destroy IAM configuration") spinner.Start("Destroying IAM configuration", false) defer spinner.Stop() @@ -144,18 +143,18 @@ func (c *destroyCmd) iamDestroy(cmd *cobra.Command, spinner spinnerInterf, destr func (c *destroyCmd) deleteGCPServiceAccountKeyFile(cmd *cobra.Command, destroyer iamDestroyer, fsHandler file.Handler) (bool, error) { var fileSaKey gcpshared.ServiceAccountKey - c.log.Debugf("Parsing %q", c.flags.pathPrefixer.PrefixPrintablePath(constants.GCPServiceAccountKeyFilename)) + c.log.Debug(fmt.Sprintf("Parsing %q", c.flags.pathPrefixer.PrefixPrintablePath(constants.GCPServiceAccountKeyFilename))) if err := fsHandler.ReadJSON(constants.GCPServiceAccountKeyFilename, &fileSaKey); err != nil { return false, err } - c.log.Debugf("Getting service account key from the tfstate") + c.log.Debug("Getting service account key from the tfstate") tfSaKey, err := destroyer.GetTfStateServiceAccountKey(cmd.Context(), constants.TerraformIAMWorkingDir) if err != nil { return false, err } - c.log.Debugf("Checking if keys are the same") + c.log.Debug("Checking if keys are the same") if tfSaKey != fileSaKey { cmd.Printf( "The key in %q don't match up with your Terraform state. %q will not be deleted.\n", @@ -169,6 +168,6 @@ func (c *destroyCmd) deleteGCPServiceAccountKeyFile(cmd *cobra.Command, destroye return false, err } - c.log.Debugf("Successfully deleted %q", c.flags.pathPrefixer.PrefixPrintablePath(constants.GCPServiceAccountKeyFilename)) + c.log.Debug(fmt.Sprintf("Successfully deleted %q", c.flags.pathPrefixer.PrefixPrintablePath(constants.GCPServiceAccountKeyFilename))) return true, nil } diff --git a/cli/internal/cmd/iamupgradeapply.go b/cli/internal/cmd/iamupgradeapply.go index 7f1e98544d..0a3485d279 100644 --- a/cli/internal/cmd/iamupgradeapply.go +++ b/cli/internal/cmd/iamupgradeapply.go @@ -149,7 +149,7 @@ func (i iamUpgradeApplyCmd) iamUpgradeApply(cmd *cobra.Command, iamUpgrader iamU return errors.New("IAM upgrade aborted by user") } } - i.log.Debugf("Applying Terraform IAM migrations") + i.log.Debug("Applying Terraform IAM migrations") if err := iamUpgrader.ApplyIAMUpgrade(cmd.Context(), conf.GetProvider()); err != nil { return fmt.Errorf("applying terraform migrations: %w", err) } diff --git a/cli/internal/cmd/init.go b/cli/internal/cmd/init.go index 166195d492..8075db9017 100644 --- a/cli/internal/cmd/init.go +++ b/cli/internal/cmd/init.go @@ -72,7 +72,7 @@ func (c *kubeconfigMerger) mergeConfigs(configPath string, fileHandler file.Hand clientcmd.RecommendedHomeFile, configPath, // our config should overwrite the default config } - c.log.Debugf("Kubeconfig file loading precedence: %v", loadingRules.Precedence) + c.log.Debug(fmt.Sprintf("Kubeconfig file loading precedence: %v", loadingRules.Precedence)) // merge the kubeconfigs cfg, err := loadingRules.Load() @@ -82,7 +82,7 @@ func (c *kubeconfigMerger) mergeConfigs(configPath string, fileHandler file.Hand // Set the current context to the cluster we just created cfg.CurrentContext = constellConfig.CurrentContext - c.log.Debugf("Set current context to %s", cfg.CurrentContext) + c.log.Debug(fmt.Sprintf("Set current context to %s", cfg.CurrentContext)) json, err := runtime.Encode(clientcodec.Codec, cfg) if err != nil { @@ -97,7 +97,7 @@ func (c *kubeconfigMerger) mergeConfigs(configPath string, fileHandler file.Hand if err := fileHandler.Write(clientcmd.RecommendedHomeFile, mergedKubeconfig, file.OptOverwrite); err != nil { return fmt.Errorf("writing merged kubeconfig to file: %w", err) } - c.log.Debugf("Merged kubeconfig into default config file: %s", clientcmd.RecommendedHomeFile) + c.log.Debug(fmt.Sprintf("Merged kubeconfig into default config file: %s", clientcmd.RecommendedHomeFile)) return nil } diff --git a/cli/internal/cmd/license_enterprise.go b/cli/internal/cmd/license_enterprise.go index 6ceed9ddea..79ae2bf7c0 100644 --- a/cli/internal/cmd/license_enterprise.go +++ b/cli/internal/cmd/license_enterprise.go @@ -24,7 +24,7 @@ import ( // is used. It is a no-op in the open source version of Constellation. func (a *applyCmd) checkLicenseFile(cmd *cobra.Command, csp cloudprovider.Provider) { var licenseID string - a.log.Debugf("Running license check") + a.log.Debug("Running license check") readBytes, err := a.fileHandler.Read(constants.LicenseFilename) if errors.Is(err, fs.ErrNotExist) { @@ -52,5 +52,5 @@ func (a *applyCmd) checkLicenseFile(cmd *cobra.Command, csp cloudprovider.Provid cmd.Printf("Please keep your vCPU quota (%d) in mind.\n", quota) } - a.log.Debugf("Checked license") + a.log.Debug("Checked license") } diff --git a/cli/internal/cmd/log.go b/cli/internal/cmd/log.go index 463e1f5b6d..d86f1686ff 100644 --- a/cli/internal/cmd/log.go +++ b/cli/internal/cmd/log.go @@ -7,25 +7,25 @@ SPDX-License-Identifier: AGPL-3.0-only package cmd import ( + "log/slog" + "github.com/edgelesssys/constellation/v2/internal/logger" "github.com/spf13/cobra" - "go.uber.org/zap/zapcore" ) type debugLog interface { - Debugf(format string, args ...any) - Sync() + Debug(msg string, args ...any) } func newCLILogger(cmd *cobra.Command) (debugLog, error) { - logLvl := zapcore.InfoLevel + logLvl := slog.LevelInfo debugLog, err := cmd.Flags().GetBool("debug") if err != nil { return nil, err } if debugLog { - logLvl = zapcore.DebugLevel + logLvl = slog.LevelDebug } - return logger.New(logger.PlainLog, logLvl), nil + return logger.NewTextLogger(logLvl), nil } diff --git a/cli/internal/cmd/maapatch.go b/cli/internal/cmd/maapatch.go index 7db1abb5c5..e5bea9f699 100644 --- a/cli/internal/cmd/maapatch.go +++ b/cli/internal/cmd/maapatch.go @@ -47,7 +47,6 @@ func runPatchMAA(cmd *cobra.Command, args []string) error { if err != nil { return fmt.Errorf("creating logger: %w", err) } - defer log.Sync() p := maa.NewAzurePolicyPatcher() @@ -57,7 +56,7 @@ func runPatchMAA(cmd *cobra.Command, args []string) error { } func (c *maaPatchCmd) patchMAA(cmd *cobra.Command, attestationURL string) error { - c.log.Debugf("Using attestation URL %s", attestationURL) + c.log.Debug(fmt.Sprintf("Using attestation URL %s", attestationURL)) if err := c.patcher.Patch(cmd.Context(), attestationURL); err != nil { return fmt.Errorf("patching MAA attestation policy: %w", err) diff --git a/cli/internal/cmd/miniup.go b/cli/internal/cmd/miniup.go index ffe254e908..dfd297d934 100644 --- a/cli/internal/cmd/miniup.go +++ b/cli/internal/cmd/miniup.go @@ -50,7 +50,6 @@ func runUp(cmd *cobra.Command, _ []string) error { if err != nil { return fmt.Errorf("creating logger: %w", err) } - defer log.Sync() m := &miniUpCmd{ log: log, @@ -152,7 +151,7 @@ func (m *miniUpCmd) prepareConfig(cmd *cobra.Command) (*config.Config, error) { if err != nil { return nil, fmt.Errorf("mini default config is invalid: %v", err) } - m.log.Debugf("Prepared configuration") + m.log.Debug("Prepared configuration") return config, m.fileHandler.WriteYAML(constants.ConfigFilename, config, file.OptOverwrite) } diff --git a/cli/internal/cmd/miniup_linux_amd64.go b/cli/internal/cmd/miniup_linux_amd64.go index a28fe1486f..c9885d801e 100644 --- a/cli/internal/cmd/miniup_linux_amd64.go +++ b/cli/internal/cmd/miniup_linux_amd64.go @@ -32,12 +32,12 @@ func (m *miniUpCmd) checkSystemRequirements(out io.Writer) error { return fmt.Errorf("creation of a QEMU based Constellation is not supported for %s/%s, a linux/amd64 platform is required", runtime.GOOS, runtime.GOARCH) } - m.log.Debugf("Checked arch and os") + m.log.Debug("Checked arch and os") // check if /dev/kvm exists if _, err := os.Stat("/dev/kvm"); err != nil { return fmt.Errorf("unable to access KVM device: %w", err) } - m.log.Debugf("Checked that /dev/kvm exists") + m.log.Debug("Checked that /dev/kvm exists") // check CPU cores if runtime.NumCPU() < 4 { return fmt.Errorf("insufficient CPU cores: %d, at least 4 cores are required by MiniConstellation", runtime.NumCPU()) @@ -45,7 +45,7 @@ func (m *miniUpCmd) checkSystemRequirements(out io.Writer) error { if runtime.NumCPU() < 6 { fmt.Fprintf(out, "WARNING: Only %d CPU cores available. This may cause performance issues.\n", runtime.NumCPU()) } - m.log.Debugf("Checked CPU cores - there are %d", runtime.NumCPU()) + m.log.Debug(fmt.Sprintf("Checked CPU cores - there are %d", runtime.NumCPU())) // check memory f, err := os.Open("/proc/meminfo") @@ -63,7 +63,7 @@ func (m *miniUpCmd) checkSystemRequirements(out io.Writer) error { } } } - m.log.Debugf("Scanned for available memory") + m.log.Debug("Scanned for available memory") memGB := memKB / 1024 / 1024 if memGB < 4 { return fmt.Errorf("insufficient memory: %dGB, at least 4GB of memory are required by MiniConstellation", memGB) @@ -71,7 +71,7 @@ func (m *miniUpCmd) checkSystemRequirements(out io.Writer) error { if memGB < 6 { fmt.Fprintln(out, "WARNING: Less than 6GB of memory available. This may cause performance issues.") } - m.log.Debugf("Checked available memory, you have %dGB available", memGB) + m.log.Debug(fmt.Sprintf("Checked available memory, you have %dGB available", memGB)) var stat unix.Statfs_t if err := unix.Statfs(".", &stat); err != nil { @@ -81,7 +81,7 @@ func (m *miniUpCmd) checkSystemRequirements(out io.Writer) error { if freeSpaceGB < 20 { return fmt.Errorf("insufficient disk space: %dGB, at least 20GB of disk space are required by MiniConstellation", freeSpaceGB) } - m.log.Debugf("Checked for free space available, you have %dGB available", freeSpaceGB) + m.log.Debug(fmt.Sprintf("Checked for free space available, you have %dGB available", freeSpaceGB)) return nil } diff --git a/cli/internal/cmd/recover.go b/cli/internal/cmd/recover.go index d66aa69b3a..f3efc3e969 100644 --- a/cli/internal/cmd/recover.go +++ b/cli/internal/cmd/recover.go @@ -76,7 +76,6 @@ func runRecover(cmd *cobra.Command, _ []string) error { if err != nil { return fmt.Errorf("creating logger: %w", err) } - defer log.Sync() fileHandler := file.NewHandler(afero.NewOsFs()) newDialer := func(validator atls.Validator) *dialer.Dialer { return dialer.New(nil, validator, &net.Dialer{}) @@ -85,7 +84,7 @@ func runRecover(cmd *cobra.Command, _ []string) error { if err := r.flags.parse(cmd.Flags()); err != nil { return err } - r.log.Debugf("Using flags: %+v", r.flags) + r.log.Debug(fmt.Sprintf("Using flags: %+v", r.flags)) return r.recover(cmd, fileHandler, 5*time.Second, &recoverDoer{log: r.log}, newDialer) } @@ -94,12 +93,12 @@ func (r *recoverCmd) recover( doer recoverDoerInterface, newDialer func(validator atls.Validator) *dialer.Dialer, ) error { var masterSecret uri.MasterSecret - r.log.Debugf("Loading master secret file from %s", r.flags.pathPrefixer.PrefixPrintablePath(constants.MasterSecretFilename)) + r.log.Debug(fmt.Sprintf("Loading master secret file from %s", r.flags.pathPrefixer.PrefixPrintablePath(constants.MasterSecretFilename))) if err := fileHandler.ReadJSON(constants.MasterSecretFilename, &masterSecret); err != nil { return err } - r.log.Debugf("Loading configuration file from %q", r.flags.pathPrefixer.PrefixPrintablePath(constants.ConfigFilename)) + r.log.Debug(fmt.Sprintf("Loading configuration file from %q", r.flags.pathPrefixer.PrefixPrintablePath(constants.ConfigFilename))) conf, err := config.New(fileHandler, constants.ConfigFilename, r.configFetcher, r.flags.force) var configValidationErr *config.ValidationError if errors.As(err, &configValidationErr) { @@ -109,7 +108,7 @@ func (r *recoverCmd) recover( return err } - r.log.Debugf("Got provider %s", conf.GetProvider()) + r.log.Debug(fmt.Sprintf("Got provider %s", conf.GetProvider())) if conf.GetProvider() == cloudprovider.Azure { interval = 20 * time.Second // Azure LB takes a while to remove unhealthy instances } @@ -130,16 +129,16 @@ func (r *recoverCmd) recover( conf.UpdateMAAURL(stateFile.Infrastructure.Azure.AttestationURL) } - r.log.Debugf("Creating aTLS Validator for %s", conf.GetAttestationConfig().GetVariant()) + r.log.Debug(fmt.Sprintf("Creating aTLS Validator for %s", conf.GetAttestationConfig().GetVariant())) validator, err := choose.Validator(conf.GetAttestationConfig(), warnLogger{cmd: cmd, log: r.log}) if err != nil { return fmt.Errorf("creating new validator: %w", err) } - r.log.Debugf("Created a new validator") + r.log.Debug("Created a new validator") doer.setDialer(newDialer(validator), endpoint) - r.log.Debugf("Set dialer for endpoint %s", endpoint) + r.log.Debug(fmt.Sprintf("Set dialer for endpoint %s", endpoint)) doer.setURIs(masterSecret.EncodeToURI(), uri.NoStoreURI) - r.log.Debugf("Set secrets") + r.log.Debug("Set secrets") if err := r.recoverCall(cmd.Context(), cmd.OutOrStdout(), interval, doer); err != nil { if grpcRetry.ServiceIsUnavailable(err) { return nil @@ -167,12 +166,12 @@ func (r *recoverCmd) recoverCall(ctx context.Context, out io.Writer, interval ti }) } - r.log.Debugf("Encountered error (retriable: %t): %s", retry, err) + r.log.Debug(fmt.Sprintf("Encountered error (retriable: %t): %s", retry, err)) return retry } retrier := retry.NewIntervalRetrier(doer, interval, retryOnceOnFailure) - r.log.Debugf("Created new interval retrier") + r.log.Debug("Created new interval retrier") err = retrier.Do(ctx) if err != nil { break @@ -180,7 +179,7 @@ func (r *recoverCmd) recoverCall(ctx context.Context, out io.Writer, interval ti fmt.Fprintln(out, "Pushed recovery key.") ctr++ } - r.log.Debugf("Retry counter is %d", ctr) + r.log.Debug(fmt.Sprintf("Retry counter is %d", ctr)) if ctr > 0 { fmt.Fprintf(out, "Recovered %d control-plane nodes.\n", ctr) } else if grpcRetry.ServiceIsUnavailable(err) { @@ -222,11 +221,11 @@ func (d *recoverDoer) Do(ctx context.Context) (retErr error) { if err != nil { return fmt.Errorf("dialing recovery server: %w", err) } - d.log.Debugf("Dialed recovery server") + d.log.Debug("Dialed recovery server") defer conn.Close() protoClient := recoverproto.NewAPIClient(conn) - d.log.Debugf("Created protoClient") + d.log.Debug("Created protoClient") req := &recoverproto.RecoverMessage{ KmsUri: d.kmsURI, @@ -238,7 +237,7 @@ func (d *recoverDoer) Do(ctx context.Context) (retErr error) { return fmt.Errorf("calling recover: %w", err) } - d.log.Debugf("Received confirmation") + d.log.Debug("Received confirmation") return nil } diff --git a/cli/internal/cmd/status.go b/cli/internal/cmd/status.go index 73f24ffbac..c2e83ef3ac 100644 --- a/cli/internal/cmd/status.go +++ b/cli/internal/cmd/status.go @@ -43,7 +43,6 @@ func runStatus(cmd *cobra.Command, _ []string) error { if err != nil { return fmt.Errorf("creating logger: %w", err) } - defer log.Sync() fileHandler := file.NewHandler(afero.NewOsFs()) diff --git a/cli/internal/cmd/upgradecheck.go b/cli/internal/cmd/upgradecheck.go index 4916480747..74ec31e089 100644 --- a/cli/internal/cmd/upgradecheck.go +++ b/cli/internal/cmd/upgradecheck.go @@ -92,7 +92,6 @@ func runUpgradeCheck(cmd *cobra.Command, _ []string) error { if err != nil { return fmt.Errorf("creating logger: %w", err) } - defer log.Sync() var flags upgradeCheckFlags if err := flags.parse(cmd.Flags()); err != nil { @@ -188,7 +187,7 @@ func (u *upgradeCheckCmd) upgradeCheck(cmd *cobra.Command, fetcher attestationco // get current image version of the cluster csp := conf.GetProvider() attestationVariant := conf.GetAttestationConfig().GetVariant() - u.log.Debugf("Using provider %s with attestation variant %s", csp.String(), attestationVariant.String()) + u.log.Debug(fmt.Sprintf("Using provider %s with attestation variant %s", csp.String(), attestationVariant.String())) current, err := u.collect.currentVersions(cmd.Context()) if err != nil { @@ -199,18 +198,18 @@ func (u *upgradeCheckCmd) upgradeCheck(cmd *cobra.Command, fetcher attestationco if err != nil { return err } - u.log.Debugf("Current cli version: %s", current.cli) - u.log.Debugf("Supported cli version(s): %s", supported.cli) - u.log.Debugf("Current service version: %s", current.service) - u.log.Debugf("Supported service version: %s", supported.service) - u.log.Debugf("Current k8s version: %s", current.k8s) - u.log.Debugf("Supported k8s version(s): %s", supported.k8s) + u.log.Debug(fmt.Sprintf("Current cli version: %s", current.cli)) + u.log.Debug(fmt.Sprintf("Supported cli version(s): %s", supported.cli)) + u.log.Debug(fmt.Sprintf("Current service version: %s", current.service)) + u.log.Debug(fmt.Sprintf("Supported service version: %s", supported.service)) + u.log.Debug(fmt.Sprintf("Current k8s version: %s", current.k8s)) + u.log.Debug(fmt.Sprintf("Supported k8s version(s): %s", supported.k8s)) // Filter versions to only include upgrades newServices := supported.service if err := supported.service.IsUpgradeTo(current.service); err != nil { newServices = consemver.Semver{} - u.log.Debugf("No valid service upgrades are available from %q to %q. The minor version can only drift by 1.\n", current.service.String(), supported.service.String()) + u.log.Debug(fmt.Sprintf("No valid service upgrades are available from %q to %q. The minor version can only drift by 1.\n", current.service.String(), supported.service.String())) } newKubernetes := filterK8sUpgrades(current.k8s, supported.k8s) @@ -222,13 +221,13 @@ func (u *upgradeCheckCmd) upgradeCheck(cmd *cobra.Command, fetcher attestationco return err } - u.log.Debugf("Planning Terraform migrations") + u.log.Debug("Planning Terraform migrations") // Add manual migrations here if required // // var manualMigrations []terraform.StateMigration // for _, migration := range manualMigrations { - // u.log.Debugf("Adding manual Terraform migration: %s", migration.DisplayName) + // u.log.Debug("Adding manual Terraform migration: %s", migration.DisplayName) // u.terraformChecker.AddManualStateMigration(migration) // } cmd.Println("The following Terraform migrations are available with this CLI:") @@ -344,7 +343,7 @@ func (v *versionCollector) newMeasurements(ctx context.Context, csp cloudprovide // get expected measurements for each image upgrades := make(map[string]measurements.M) for _, version := range versions { - v.log.Debugf("Fetching measurements for image: %s", version) + v.log.Debug(fmt.Sprintf("Fetching measurements for image: %s", version.Version())) shortPath := version.ShortPath() publicKey, err := keyselect.CosignPublicKeyForVersion(version) @@ -365,7 +364,7 @@ func (v *versionCollector) newMeasurements(ctx context.Context, csp cloudprovide } upgrades[shortPath] = measurements } - v.log.Debugf("Compatible image measurements are %v", upgrades) + v.log.Debug(fmt.Sprintf("Compatible image measurements are %v", upgrades)) return upgrades, nil } @@ -453,9 +452,9 @@ func (v *versionCollector) newImages(ctx context.Context, currentImageVersion co if err != nil { return nil, fmt.Errorf("calculating next image minor version: %w", err) } - v.log.Debugf("Current image minor version is %s", currentImageMinorVer) - v.log.Debugf("Current CLI minor version is %s", currentCLIMinorVer) - v.log.Debugf("Next image minor version is %s", nextImageMinorVer) + v.log.Debug(fmt.Sprintf("Current image minor version is %s", currentImageMinorVer)) + v.log.Debug(fmt.Sprintf("Current CLI minor version is %s", currentCLIMinorVer)) + v.log.Debug(fmt.Sprintf("Next image minor version is %s", nextImageMinorVer)) allowedMinorVersions := []string{currentImageMinorVer, nextImageMinorVer} switch cliImageCompare := semver.Compare(currentCLIMinorVer, currentImageMinorVer); { @@ -471,7 +470,7 @@ func (v *versionCollector) newImages(ctx context.Context, currentImageVersion co case cliImageCompare > 0: allowedMinorVersions = []string{currentImageMinorVer, nextImageMinorVer} } - v.log.Debugf("Allowed minor versions are %#v", allowedMinorVersions) + v.log.Debug(fmt.Sprintf("Allowed minor versions are %#v", allowedMinorVersions)) newerImages, err := v.newerVersions(ctx, allowedMinorVersions) if err != nil { @@ -494,7 +493,7 @@ func (v *versionCollector) newerVersions(ctx context.Context, allowedVersions [] patchList, err := v.verListFetcher.FetchVersionList(ctx, patchList) var notFound *fetcher.NotFoundError if errors.As(err, ¬Found) { - v.log.Debugf("Skipping version: %s", err) + v.log.Debug(fmt.Sprintf("Skipping version: %s", err)) continue } if err != nil { @@ -502,7 +501,7 @@ func (v *versionCollector) newerVersions(ctx context.Context, allowedVersions [] } updateCandidates = append(updateCandidates, patchList.StructuredVersions()...) } - v.log.Debugf("Update candidates are %v", updateCandidates) + v.log.Debug(fmt.Sprintf("Update candidates are %v", updateCandidates)) return updateCandidates, nil } @@ -604,7 +603,7 @@ func getCompatibleImageMeasurements(ctx context.Context, writer io.Writer, clien } var fetchedMeasurements measurements.M - log.Debugf("Fetching for measurement url: %s", measurementsURL) + log.Debug(fmt.Sprintf("Fetching for measurement url: %s", measurementsURL)) hash, err := fetchedMeasurements.FetchAndVerify( ctx, client, cosign, @@ -658,7 +657,7 @@ func (v *versionCollector) newCLIVersions(ctx context.Context) ([]consemver.Semv return nil, fmt.Errorf("parsing version %s: %w", version, err) } if err := target.IsUpgradeTo(v.cliVersion); err != nil { - v.log.Debugf("Skipping incompatible minor version %q: %s", version, err) + v.log.Debug(fmt.Sprintf("Skipping incompatible minor version %q: %s", version, err)) continue } list := versionsapi.List{ @@ -692,7 +691,7 @@ func (v *versionCollector) filterCompatibleCLIVersions(ctx context.Context, cliP var compatibleVersions []consemver.Semver for _, version := range cliPatchVersions { if err := version.IsUpgradeTo(v.cliVersion); err != nil { - v.log.Debugf("Skipping incompatible patch version %q: %s", version, err) + v.log.Debug(fmt.Sprintf("Skipping incompatible patch version %q: %s", version, err)) continue } req := versionsapi.CLIInfo{ diff --git a/cli/internal/cmd/verify.go b/cli/internal/cmd/verify.go index cd734e5632..049f022931 100644 --- a/cli/internal/cmd/verify.go +++ b/cli/internal/cmd/verify.go @@ -100,7 +100,6 @@ func runVerify(cmd *cobra.Command, _ []string) error { if err != nil { return fmt.Errorf("creating logger: %w", err) } - defer log.Sync() fileHandler := file.NewHandler(afero.NewOsFs()) verifyClient := &constellationVerifier{ @@ -129,7 +128,7 @@ func runVerify(cmd *cobra.Command, _ []string) error { if err := v.flags.parse(cmd.Flags()); err != nil { return err } - v.log.Debugf("Using flags: %+v", v.flags) + v.log.Debug(fmt.Sprintf("Using flags: %+v", v.flags)) fetcher := attestationconfigapi.NewFetcher() return v.verify(cmd, verifyClient, formatterFactory, fetcher) } @@ -137,7 +136,7 @@ func runVerify(cmd *cobra.Command, _ []string) error { type formatterFactory func(output string, attestation variant.Variant, log debugLog) (attestationDocFormatter, error) func (c *verifyCmd) verify(cmd *cobra.Command, verifyClient verifyClient, factory formatterFactory, configFetcher attestationconfigapi.Fetcher) error { - c.log.Debugf("Loading configuration file from %q", c.flags.pathPrefixer.PrefixPrintablePath(constants.ConfigFilename)) + c.log.Debug(fmt.Sprintf("Loading configuration file from %q", c.flags.pathPrefixer.PrefixPrintablePath(constants.ConfigFilename))) conf, err := config.New(c.fileHandler, constants.ConfigFilename, configFetcher, c.flags.force) var configValidationErr *config.ValidationError if errors.As(err, &configValidationErr) { @@ -170,13 +169,13 @@ func (c *verifyCmd) verify(cmd *cobra.Command, verifyClient verifyClient, factor } conf.UpdateMAAURL(maaURL) - c.log.Debugf("Updating expected PCRs") + c.log.Debug("Updating expected PCRs") attConfig := conf.GetAttestationConfig() if err := updateInitMeasurements(attConfig, ownerID, clusterID); err != nil { return fmt.Errorf("updating expected PCRs: %w", err) } - c.log.Debugf("Creating aTLS Validator for %s", conf.GetAttestationConfig().GetVariant()) + c.log.Debug(fmt.Sprintf("Creating aTLS Validator for %s", conf.GetAttestationConfig().GetVariant())) validator, err := choose.Validator(attConfig, warnLogger{cmd: cmd, log: c.log}) if err != nil { return fmt.Errorf("creating aTLS validator: %w", err) @@ -186,7 +185,7 @@ func (c *verifyCmd) verify(cmd *cobra.Command, verifyClient verifyClient, factor if err != nil { return fmt.Errorf("generating random nonce: %w", err) } - c.log.Debugf("Generated random nonce: %x", nonce) + c.log.Debug(fmt.Sprintf("Generated random nonce: %x", nonce)) rawAttestationDoc, err := verifyClient.Verify( cmd.Context(), @@ -385,7 +384,7 @@ type constellationVerifier struct { func (v *constellationVerifier) Verify( ctx context.Context, endpoint string, req *verifyproto.GetAttestationRequest, validator atls.Validator, ) (string, error) { - v.log.Debugf("Dialing endpoint: %q", endpoint) + v.log.Debug(fmt.Sprintf("Dialing endpoint: %q", endpoint)) conn, err := v.dialer.DialInsecure(ctx, endpoint) if err != nil { return "", fmt.Errorf("dialing init server: %w", err) @@ -394,13 +393,13 @@ func (v *constellationVerifier) Verify( client := verifyproto.NewAPIClient(conn) - v.log.Debugf("Sending attestation request") + v.log.Debug("Sending attestation request") resp, err := client.GetAttestation(ctx, req) if err != nil { return "", fmt.Errorf("getting attestation: %w", err) } - v.log.Debugf("Verifying attestation") + v.log.Debug("Verifying attestation") signedData, err := validator.Validate(ctx, resp.Attestation, req.Nonce) if err != nil { return "", fmt.Errorf("validating attestation: %w", err) diff --git a/debugd/cmd/debugd/BUILD.bazel b/debugd/cmd/debugd/BUILD.bazel index 5f4e716e44..e517ef6a97 100644 --- a/debugd/cmd/debugd/BUILD.bazel +++ b/debugd/cmd/debugd/BUILD.bazel @@ -25,7 +25,6 @@ go_library( "//internal/cloud/qemu", "//internal/logger", "@com_github_spf13_afero//:afero", - "@org_uber_go_zap//:zap", ], ) diff --git a/debugd/cmd/debugd/debugd.go b/debugd/cmd/debugd/debugd.go index 92f7f16146..4140687f72 100644 --- a/debugd/cmd/debugd/debugd.go +++ b/debugd/cmd/debugd/debugd.go @@ -10,12 +10,12 @@ import ( "context" "flag" "fmt" + "log/slog" "net" "os" "sync" "github.com/spf13/afero" - "go.uber.org/zap" "github.com/edgelesssys/constellation/v2/debugd/internal/debugd/deploy" "github.com/edgelesssys/constellation/v2/debugd/internal/debugd/info" @@ -46,11 +46,11 @@ func main() { verbosity := flag.Int("v", 0, logger.CmdLineVerbosityDescription) flag.Parse() - log := logger.New(logger.JSONLog, logger.VerbosityFromInt(*verbosity)) + log := logger.NewJSONLogger(logger.VerbosityFromInt(*verbosity)) fs := afero.NewOsFs() streamer := streamer.New(fs) - filetransferer := filetransfer.New(log.Named("filetransfer"), streamer, filetransfer.DontShowProgress) - serviceManager := deploy.NewServiceManager(log.Named("serviceManager")) + filetransferer := filetransfer.New(log.WithGroup("filetransfer"), streamer, filetransfer.DontShowProgress) + serviceManager := deploy.NewServiceManager(log.WithGroup("serviceManager")) ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -64,21 +64,24 @@ func main() { case platform.AWS: meta, err := awscloud.New(ctx) if err != nil { - log.With(zap.Error(err)).Fatalf("Failed to initialize AWS metadata") + log.With(slog.Any("error", err)).Error("Failed to initialize AWS metadata") + os.Exit(1) } fetcher = cloudprovider.New(meta) case platform.Azure: meta, err := azurecloud.New(ctx) if err != nil { - log.With(zap.Error(err)).Fatalf("Failed to initialize Azure metadata") + log.With(slog.Any("error", err)).Error("Failed to initialize Azure metadata") + os.Exit(1) } fetcher = cloudprovider.New(meta) case platform.GCP: meta, err := gcpcloud.New(ctx) if err != nil { - log.With(zap.Error(err)).Fatalf("Failed to initialize GCP metadata") + log.With(slog.Any("error", err)).Error("Failed to initialize GCP metadata") + os.Exit(1) } defer meta.Close() fetcher = cloudprovider.New(meta) @@ -86,26 +89,27 @@ func main() { case platform.OpenStack: meta, err := openstackcloud.New(ctx) if err != nil { - log.With(zap.Error(err)).Fatalf("Failed to initialize OpenStack metadata") + log.With(slog.Any("error", err)).Error("Failed to initialize OpenStack metadata") + os.Exit(1) } fetcher = cloudprovider.New(meta) case platform.QEMU: fetcher = cloudprovider.New(qemucloud.New()) default: - log.Errorf("Unknown / unimplemented cloud provider CONSTEL_CSP=%v. Using fallback", csp) + log.Error(fmt.Sprintf("Unknown / unimplemented cloud provider CONSTEL_CSP=%v. Using fallback", csp)) fetcher = fallback.NewFallbackFetcher() } infoMap := info.NewMap() infoMap.RegisterOnReceiveTrigger( - logcollector.NewStartTrigger(ctx, wg, platform.FromString(csp), fetcher, log.Named("logcollector")), + logcollector.NewStartTrigger(ctx, wg, platform.FromString(csp), fetcher, log.WithGroup("logcollector")), ) - download := deploy.New(log.Named("download"), &net.Dialer{}, serviceManager, filetransferer, infoMap) + download := deploy.New(log.WithGroup("download"), &net.Dialer{}, serviceManager, filetransferer, infoMap) - sched := metadata.NewScheduler(log.Named("scheduler"), fetcher, download) - serv := server.New(log.Named("server"), serviceManager, filetransferer, infoMap) + sched := metadata.NewScheduler(log.WithGroup("scheduler"), fetcher, download) + serv := server.New(log.WithGroup("server"), serviceManager, filetransferer, infoMap) writeDebugBanner(log) @@ -114,14 +118,14 @@ func main() { wg.Wait() } -func writeDebugBanner(log *logger.Logger) { +func writeDebugBanner(log *slog.Logger) { tty, err := os.OpenFile("/dev/ttyS0", os.O_WRONLY, os.ModeAppend) if err != nil { - log.With(zap.Error(err)).Errorf("Unable to open /dev/ttyS0 for printing banner") + log.With(slog.Any("error", err)).Error("Unable to open /dev/ttyS0 for printing banner") return } defer tty.Close() if _, err := fmt.Fprint(tty, debugBanner); err != nil { - log.With(zap.Error(err)).Errorf("Unable to print to /dev/ttyS0") + log.With(slog.Any("error", err)).Error("Unable to print to /dev/ttyS0") } } diff --git a/debugd/internal/cdbg/cmd/deploy.go b/debugd/internal/cdbg/cmd/deploy.go index 9705df4a1a..95c3d147e0 100644 --- a/debugd/internal/cdbg/cmd/deploy.go +++ b/debugd/internal/cdbg/cmd/deploy.go @@ -10,6 +10,7 @@ import ( "context" "errors" "fmt" + "log/slog" "net" "path/filepath" "strconv" @@ -60,7 +61,7 @@ func runDeploy(cmd *cobra.Command, _ []string) error { if err != nil { return err } - log := logger.New(logger.PlainLog, logger.VerbosityFromInt(verbosity)) + log := logger.NewTextLogger(logger.VerbosityFromInt(verbosity)) force, err := cmd.Flags().GetBool("force") if err != nil { return fmt.Errorf("getting force flag: %w", err) @@ -83,7 +84,7 @@ func runDeploy(cmd *cobra.Command, _ []string) error { func deploy(cmd *cobra.Command, fileHandler file.Handler, constellationConfig *config.Config, transfer fileTransferer, - log *logger.Logger, + log *slog.Logger, ) error { binDir, err := cmd.Flags().GetString("bindir") if err != nil { @@ -99,13 +100,13 @@ func deploy(cmd *cobra.Command, fileHandler file.Handler, constellationConfig *c } if constellationConfig.IsReleaseImage() { - log.Infof("WARNING: Constellation image does not look like a debug image. Are you using a debug image?") + log.Info("WARNING: Constellation image does not look like a debug image. Are you using a debug image?") } if !constellationConfig.IsDebugCluster() { - log.Infof("WARNING: The Constellation config has debugCluster set to false.") - log.Infof("cdbg will likely not work unless you manually adjust the firewall / load balancing rules.") - log.Infof("If you create the cluster with a debug image, you should also set debugCluster to true.") + log.Info("WARNING: The Constellation config has debugCluster set to false.") + log.Info("cdbg will likely not work unless you manually adjust the firewall / load balancing rules.") + log.Info("If you create the cluster with a debug image, you should also set debugCluster to true.") } ips, err := cmd.Flags().GetStringSlice("ips") @@ -171,14 +172,14 @@ type deployOnEndpointInput struct { files []filetransfer.FileStat infos map[string]string transfer fileTransferer - log *logger.Logger + log *slog.Logger } // deployOnEndpoint deploys a custom built bootstrapper binary to a debugd endpoint. func deployOnEndpoint(ctx context.Context, in deployOnEndpointInput) error { ctx, cancel := context.WithTimeout(ctx, deployEndpointTimeout) defer cancel() - in.log.Infof("Deploying on %v", in.debugdEndpoint) + in.log.Info(fmt.Sprintf("Deploying on %v", in.debugdEndpoint)) client, closeAndWaitFn, err := newDebugdClient(ctx, in.debugdEndpoint, in.log) if err != nil { @@ -201,13 +202,13 @@ func deployOnEndpoint(ctx context.Context, in deployOnEndpointInput) error { type closeAndWait func() // newDebugdClient creates a new gRPC client for the debugd service and logs the connection state changes. -func newDebugdClient(ctx context.Context, ip string, log *logger.Logger) (pb.DebugdClient, closeAndWait, error) { +func newDebugdClient(ctx context.Context, ip string, log *slog.Logger) (pb.DebugdClient, closeAndWait, error) { conn, err := grpc.DialContext( ctx, net.JoinHostPort(ip, strconv.Itoa(constants.DebugdPort)), grpc.WithTransportCredentials(insecure.NewCredentials()), - log.GetClientUnaryInterceptor(), - log.GetClientStreamInterceptor(), + logger.GetClientUnaryInterceptor(log), + logger.GetClientStreamInterceptor(log), ) if err != nil { return nil, nil, fmt.Errorf("connecting to other instance via gRPC: %w", err) @@ -221,8 +222,8 @@ func newDebugdClient(ctx context.Context, ip string, log *logger.Logger) (pb.Deb return pb.NewDebugdClient(conn), closeAndWait, nil } -func setInfo(ctx context.Context, log *logger.Logger, client pb.DebugdClient, infos map[string]string) error { - log.Infof("Setting info with length %d", len(infos)) +func setInfo(ctx context.Context, log *slog.Logger, client pb.DebugdClient, infos map[string]string) error { + log.Info(fmt.Sprintf("Setting info with length %d", len(infos))) var infosPb []*pb.Info for key, value := range infos { @@ -238,17 +239,17 @@ func setInfo(ctx context.Context, log *logger.Logger, client pb.DebugdClient, in switch status.Status { case pb.SetInfoStatus_SET_INFO_SUCCESS: - log.Infof("Info set") + log.Info("Info set") case pb.SetInfoStatus_SET_INFO_ALREADY_SET: - log.Infof("Info already set") + log.Info("Info already set") default: - log.Warnf("Unknown status %v", status.Status) + log.Warn(fmt.Sprintf("Unknown status %v", status.Status)) } return nil } func uploadFiles(ctx context.Context, client pb.DebugdClient, in deployOnEndpointInput) error { - in.log.Infof("Uploading files") + in.log.Info("Uploading files") stream, err := client.UploadFiles(ctx, grpc.WaitForReady(true)) if err != nil { @@ -266,9 +267,9 @@ func uploadFiles(ctx context.Context, client pb.DebugdClient, in deployOnEndpoin } switch uploadResponse.Status { case pb.UploadFilesStatus_UPLOAD_FILES_SUCCESS: - in.log.Infof("Upload successful") + in.log.Info("Upload successful") case pb.UploadFilesStatus_UPLOAD_FILES_ALREADY_FINISHED: - in.log.Infof("Files already uploaded") + in.log.Info("Files already uploaded") case pb.UploadFilesStatus_UPLOAD_FILES_UPLOAD_FAILED: return fmt.Errorf("uploading files to %v failed: %v", in.debugdEndpoint, uploadResponse) case pb.UploadFilesStatus_UPLOAD_FILES_ALREADY_STARTED: diff --git a/debugd/internal/debugd/deploy/BUILD.bazel b/debugd/internal/debugd/deploy/BUILD.bazel index 4e0cccb4fe..16ba9cdf3a 100644 --- a/debugd/internal/debugd/deploy/BUILD.bazel +++ b/debugd/internal/debugd/deploy/BUILD.bazel @@ -16,12 +16,10 @@ go_library( "//debugd/internal/filetransfer", "//debugd/service", "//internal/constants", - "//internal/logger", "@com_github_coreos_go_systemd_v22//dbus", "@com_github_spf13_afero//:afero", "@org_golang_google_grpc//:go_default_library", "@org_golang_google_grpc//credentials/insecure", - "@org_uber_go_zap//:zap", ], ) diff --git a/debugd/internal/debugd/deploy/download.go b/debugd/internal/debugd/deploy/download.go index b4000ff2cd..affe685d3e 100644 --- a/debugd/internal/debugd/deploy/download.go +++ b/debugd/internal/debugd/deploy/download.go @@ -11,21 +11,20 @@ import ( "errors" "fmt" "io" + "log/slog" "net" "strconv" "github.com/edgelesssys/constellation/v2/debugd/internal/filetransfer" pb "github.com/edgelesssys/constellation/v2/debugd/service" "github.com/edgelesssys/constellation/v2/internal/constants" - "github.com/edgelesssys/constellation/v2/internal/logger" - "go.uber.org/zap" "google.golang.org/grpc" "google.golang.org/grpc/credentials/insecure" ) // Download downloads a bootstrapper from a given debugd instance. type Download struct { - log *logger.Logger + log *slog.Logger dialer NetDialer transfer fileTransferer serviceManager serviceManager @@ -33,7 +32,7 @@ type Download struct { } // New creates a new Download. -func New(log *logger.Logger, dialer NetDialer, serviceManager serviceManager, +func New(log *slog.Logger, dialer NetDialer, serviceManager serviceManager, transfer fileTransferer, info infoSetter, ) *Download { return &Download{ @@ -51,7 +50,7 @@ func (d *Download) DownloadInfo(ctx context.Context, ip string) error { return nil } - log := d.log.With(zap.String("ip", ip)) + log := d.log.With(slog.String("ip", ip)) serverAddr := net.JoinHostPort(ip, strconv.Itoa(constants.DebugdPort)) client, closer, err := d.newClient(ctx, serverAddr, log) @@ -60,19 +59,19 @@ func (d *Download) DownloadInfo(ctx context.Context, ip string) error { } defer closer.Close() - log.Infof("Trying to download info") + log.Info("Trying to download info") resp, err := client.GetInfo(ctx, &pb.GetInfoRequest{}) if err != nil { return fmt.Errorf("getting info from other instance: %w", err) } - log.Infof("Successfully downloaded info") + log.Info("Successfully downloaded info") return d.info.SetProto(resp.Info) } // DownloadDeployment will open a new grpc connection to another instance, attempting to download files from that instance. func (d *Download) DownloadDeployment(ctx context.Context, ip string) error { - log := d.log.With(zap.String("ip", ip)) + log := d.log.With(slog.String("ip", ip)) serverAddr := net.JoinHostPort(ip, strconv.Itoa(constants.DebugdPort)) client, closer, err := d.newClient(ctx, serverAddr, log) @@ -81,7 +80,7 @@ func (d *Download) DownloadDeployment(ctx context.Context, ip string) error { } defer closer.Close() - log.Infof("Trying to download files") + log.Info("Trying to download files") stream, err := client.DownloadFiles(ctx, &pb.DownloadFilesRequest{}) if err != nil { return fmt.Errorf("starting file download from other instance: %w", err) @@ -90,15 +89,15 @@ func (d *Download) DownloadDeployment(ctx context.Context, ip string) error { err = d.transfer.RecvFiles(stream) switch { case err == nil: - d.log.Infof("Downloading files succeeded") + d.log.Info("Downloading files succeeded") case errors.Is(err, filetransfer.ErrReceiveRunning): - d.log.Warnf("Download already in progress") + d.log.Warn("Download already in progress") return err case errors.Is(err, filetransfer.ErrReceiveFinished): - d.log.Warnf("Download already finished") + d.log.Warn("Download already finished") return nil default: - d.log.With(zap.Error(err)).Errorf("Downloading files failed") + d.log.With(slog.Any("error", err)).Error("Downloading files failed") return err } @@ -111,15 +110,15 @@ func (d *Download) DownloadDeployment(ctx context.Context, ip string) error { ctx, file.OverrideServiceUnit, file.TargetPath, ); err != nil { // continue on error to allow other units to be overridden - d.log.With(zap.Error(err)).Errorf("Failed to override service unit %s", file.OverrideServiceUnit) + d.log.With(slog.Any("error", err)).Error(fmt.Sprintf("Failed to override service unit %s", file.OverrideServiceUnit)) } } return nil } -func (d *Download) newClient(ctx context.Context, serverAddr string, log *logger.Logger) (pb.DebugdClient, io.Closer, error) { - log.Infof("Connecting to server") +func (d *Download) newClient(ctx context.Context, serverAddr string, log *slog.Logger) (pb.DebugdClient, io.Closer, error) { + log.Info("Connecting to server") conn, err := d.dial(ctx, serverAddr) if err != nil { return nil, nil, fmt.Errorf("connecting to other instance via gRPC: %w", err) diff --git a/debugd/internal/debugd/deploy/service.go b/debugd/internal/debugd/deploy/service.go index f82e09c252..806e8abbc2 100644 --- a/debugd/internal/debugd/deploy/service.go +++ b/debugd/internal/debugd/deploy/service.go @@ -9,15 +9,14 @@ package deploy import ( "context" "fmt" + "log/slog" "os" "path/filepath" "regexp" "strings" "sync" - "github.com/edgelesssys/constellation/v2/internal/logger" "github.com/spf13/afero" - "go.uber.org/zap" ) const ( @@ -60,14 +59,14 @@ type SystemdUnit struct { // ServiceManager receives ServiceManagerRequests and units via channels and performs the requests / creates the unit files. type ServiceManager struct { - log *logger.Logger + log *slog.Logger dbus dbusClient fs afero.Fs systemdUnitFilewriteLock sync.Mutex } // NewServiceManager creates a new ServiceManager. -func NewServiceManager(log *logger.Logger) *ServiceManager { +func NewServiceManager(log *slog.Logger) *ServiceManager { fs := afero.NewOsFs() return &ServiceManager{ log: log, @@ -102,7 +101,7 @@ type dbusConn interface { // SystemdAction will perform a systemd action on a service unit (start, stop, restart, reload). func (s *ServiceManager) SystemdAction(ctx context.Context, request ServiceManagerRequest) error { - log := s.log.With(zap.String("unit", request.Unit), zap.String("action", request.Action.String())) + log := s.log.With(slog.String("unit", request.Unit), slog.String("action", request.Action.String())) conn, err := s.dbus.NewSystemConnectionContext(ctx) if err != nil { return fmt.Errorf("establishing systemd connection: %w", err) @@ -127,7 +126,7 @@ func (s *ServiceManager) SystemdAction(ctx context.Context, request ServiceManag } if request.Action == Reload { - log.Infof("daemon-reload succeeded") + log.Info("daemon-reload succeeded") return nil } // Wait for the action to finish and then check if it was @@ -136,7 +135,7 @@ func (s *ServiceManager) SystemdAction(ctx context.Context, request ServiceManag switch result { case "done": - log.Infof("%s on systemd unit %s succeeded", request.Action, request.Unit) + log.Info(fmt.Sprintf("%s on systemd unit %s succeeded", request.Action, request.Unit)) return nil default: @@ -146,8 +145,8 @@ func (s *ServiceManager) SystemdAction(ctx context.Context, request ServiceManag // WriteSystemdUnitFile will write a systemd unit to disk. func (s *ServiceManager) WriteSystemdUnitFile(ctx context.Context, unit SystemdUnit) error { - log := s.log.With(zap.String("unitFile", fmt.Sprintf("%s/%s", systemdUnitFolder, unit.Name))) - log.Infof("Writing systemd unit file") + log := s.log.With(slog.String("unitFile", fmt.Sprintf("%s/%s", systemdUnitFolder, unit.Name))) + log.Info("Writing systemd unit file") s.systemdUnitFilewriteLock.Lock() defer s.systemdUnitFilewriteLock.Unlock() if err := afero.WriteFile(s.fs, fmt.Sprintf("%s/%s", systemdUnitFolder, unit.Name), []byte(unit.Contents), 0o644); err != nil { @@ -158,14 +157,14 @@ func (s *ServiceManager) WriteSystemdUnitFile(ctx context.Context, unit SystemdU return fmt.Errorf("performing systemd daemon-reload: %w", err) } - log.Infof("Wrote systemd unit file and performed daemon-reload") + log.Info("Wrote systemd unit file and performed daemon-reload") return nil } // OverrideServiceUnitExecStart will override the ExecStart of a systemd unit. func (s *ServiceManager) OverrideServiceUnitExecStart(ctx context.Context, unitName, execStart string) error { - log := s.log.With(zap.String("unitFile", fmt.Sprintf("%s/%s", systemdUnitFolder, unitName))) - log.Infof("Overriding systemd unit file execStart") + log := s.log.With(slog.String("unitFile", fmt.Sprintf("%s/%s", systemdUnitFolder, unitName))) + log.Info("Overriding systemd unit file execStart") if !systemdUnitNameRegexp.MatchString(unitName) { return fmt.Errorf("unit name %q is invalid", unitName) } @@ -187,13 +186,13 @@ func (s *ServiceManager) OverrideServiceUnitExecStart(ctx context.Context, unitN // do not return early here // the "daemon-reload" command may return an unrelated error // and there is no way to know if the override was successful - log.Warnf("Failed to perform systemd daemon-reload: %v", err) + log.Warn(fmt.Sprintf("Failed to perform systemd daemon-reload: %v", err)) } if err := s.SystemdAction(ctx, ServiceManagerRequest{Unit: unitName + ".service", Action: Restart}); err != nil { - log.Warnf("Failed to perform unit restart: %v", err) + log.Warn(fmt.Sprintf("Failed to perform unit restart: %v", err)) return fmt.Errorf("performing systemd unit restart: %w", err) } - log.Infof("Overrode systemd unit file execStart, performed daemon-reload and restarted unit %v", unitName) + log.Info(fmt.Sprintf("Overrode systemd unit file execStart, performed daemon-reload and restarted unit %v", unitName)) return nil } diff --git a/debugd/internal/debugd/logcollector/BUILD.bazel b/debugd/internal/debugd/logcollector/BUILD.bazel index 6ecc05a718..4e0f0b089c 100644 --- a/debugd/internal/debugd/logcollector/BUILD.bazel +++ b/debugd/internal/debugd/logcollector/BUILD.bazel @@ -14,7 +14,6 @@ go_library( "//debugd/internal/debugd/info", "//internal/cloud/cloudprovider", "//internal/cloud/metadata", - "//internal/logger", "//internal/versions", "@com_github_aws_aws_sdk_go_v2_config//:config", "@com_github_aws_aws_sdk_go_v2_service_secretsmanager//:secretsmanager", diff --git a/debugd/internal/debugd/logcollector/logcollector.go b/debugd/internal/debugd/logcollector/logcollector.go index c40859d0be..9723d102fd 100644 --- a/debugd/internal/debugd/logcollector/logcollector.go +++ b/debugd/internal/debugd/logcollector/logcollector.go @@ -12,6 +12,7 @@ import ( "context" "fmt" "io" + "log/slog" "os" "os/exec" "path/filepath" @@ -22,7 +23,6 @@ import ( "github.com/edgelesssys/constellation/v2/debugd/internal/debugd/info" "github.com/edgelesssys/constellation/v2/internal/cloud/cloudprovider" "github.com/edgelesssys/constellation/v2/internal/cloud/metadata" - "github.com/edgelesssys/constellation/v2/internal/logger" "github.com/edgelesssys/constellation/v2/internal/versions" ) @@ -36,60 +36,60 @@ const ( // // This requires podman to be installed. func NewStartTrigger(ctx context.Context, wg *sync.WaitGroup, provider cloudprovider.Provider, - metadata providerMetadata, logger *logger.Logger, + metadata providerMetadata, logger *slog.Logger, ) func(*info.Map) { return func(infoMap *info.Map) { wg.Add(1) go func() { defer wg.Done() - logger.Infof("Start trigger running") + logger.Info("Start trigger running") if err := ctx.Err(); err != nil { - logger.With("err", err).Errorf("Start trigger canceled") + logger.With("err", err).Error("Start trigger canceled") return } - logger.Infof("Get flags from infos") + logger.Info("Get flags from infos") _, ok, err := infoMap.Get("logcollect") if err != nil { - logger.Errorf("Getting infos: %v", err) + logger.Error(fmt.Sprintf("Getting infos: %v", err)) return } if !ok { - logger.Infof("Flag 'logcollect' not set") + logger.Info("Flag 'logcollect' not set") return } cerdsGetter, err := newCloudCredentialGetter(ctx, provider, infoMap) if err != nil { - logger.Errorf("Creating cloud credential getter: %v", err) + logger.Error(fmt.Sprintf("Creating cloud credential getter: %v", err)) return } - logger.Infof("Getting credentials") + logger.Info("Getting credentials") creds, err := cerdsGetter.GetOpensearchCredentials(ctx) if err != nil { - logger.Errorf("Getting opensearch credentials: %v", err) + logger.Error(fmt.Sprintf("Getting opensearch credentials: %v", err)) return } - logger.Infof("Getting logstash pipeline template from image %s", versions.LogstashImage) + logger.Info(fmt.Sprintf("Getting logstash pipeline template from image %s", versions.LogstashImage)) tmpl, err := getTemplate(ctx, logger, versions.LogstashImage, "/run/logstash/templates/pipeline.conf", "/run/logstash") if err != nil { - logger.Errorf("Getting logstash pipeline template: %v", err) + logger.Error(fmt.Sprintf("Getting logstash pipeline template: %v", err)) return } infoMapM, err := infoMap.GetCopy() if err != nil { - logger.Errorf("Getting copy of map from info: %v", err) + logger.Error(fmt.Sprintf("Getting copy of map from info: %v", err)) return } infoMapM = filterInfoMap(infoMapM) setCloudMetadata(ctx, infoMapM, provider, metadata) - logger.Infof("Writing logstash pipeline") + logger.Info("Writing logstash pipeline") pipelineConf := logstashConfInput{ Port: 5044, Host: openSearchHost, @@ -97,14 +97,14 @@ func NewStartTrigger(ctx context.Context, wg *sync.WaitGroup, provider cloudprov Credentials: creds, } if err := writeTemplate("/run/logstash/pipeline/pipeline.conf", tmpl, pipelineConf); err != nil { - logger.Errorf("Writing logstash config: %v", err) + logger.Error(fmt.Sprintf("Writing logstash config: %v", err)) return } - logger.Infof("Getting filebeat config template from image %s", versions.FilebeatImage) + logger.Info(fmt.Sprintf("Getting filebeat config template from image %s", versions.FilebeatImage)) tmpl, err = getTemplate(ctx, logger, versions.FilebeatImage, "/run/filebeat/templates/filebeat.yml", "/run/filebeat") if err != nil { - logger.Errorf("Getting filebeat config template: %v", err) + logger.Error(fmt.Sprintf("Getting filebeat config template: %v", err)) return } filebeatConf := filebeatConfInput{ @@ -112,26 +112,26 @@ func NewStartTrigger(ctx context.Context, wg *sync.WaitGroup, provider cloudprov AddCloudMetadata: true, } if err := writeTemplate("/run/filebeat/filebeat.yml", tmpl, filebeatConf); err != nil { - logger.Errorf("Writing filebeat pipeline: %v", err) + logger.Error(fmt.Sprintf("Writing filebeat pipeline: %v", err)) return } - logger.Infof("Starting log collection pod") + logger.Info("Starting log collection pod") if err := startPod(ctx, logger); err != nil { - logger.Errorf("Starting log collection: %v", err) + logger.Error(fmt.Sprintf("Starting log collection: %v", err)) } }() } } -func getTemplate(ctx context.Context, logger *logger.Logger, image, templateDir, destDir string) (*template.Template, error) { +func getTemplate(ctx context.Context, logger *slog.Logger, image, templateDir, destDir string) (*template.Template, error) { createContainerArgs := []string{ "create", "--name=template", image, } createContainerCmd := exec.CommandContext(ctx, "podman", createContainerArgs...) - logger.Infof("Creating template container") + logger.Info("Creating template container") if out, err := createContainerCmd.CombinedOutput(); err != nil { return nil, fmt.Errorf("creating template container: %w\n%s", err, out) } @@ -146,7 +146,7 @@ func getTemplate(ctx context.Context, logger *logger.Logger, image, templateDir, destDir, } copyFromCmd := exec.CommandContext(ctx, "podman", copyFromArgs...) - logger.Infof("Copying templates") + logger.Info("Copying templates") if out, err := copyFromCmd.CombinedOutput(); err != nil { return nil, fmt.Errorf("copying templates: %w\n%s", err, out) } @@ -156,7 +156,7 @@ func getTemplate(ctx context.Context, logger *logger.Logger, image, templateDir, "template", } removeContainerCmd := exec.CommandContext(ctx, "podman", removeContainerArgs...) - logger.Infof("Removing template container") + logger.Info("Removing template container") if out, err := removeContainerCmd.CombinedOutput(); err != nil { return nil, fmt.Errorf("removing template container: %w\n%s", err, out) } @@ -169,7 +169,7 @@ func getTemplate(ctx context.Context, logger *logger.Logger, image, templateDir, return tmpl, nil } -func startPod(ctx context.Context, logger *logger.Logger) error { +func startPod(ctx context.Context, logger *slog.Logger) error { // create a shared pod for filebeat, metricbeat and logstash createPodArgs := []string{ "pod", @@ -177,13 +177,13 @@ func startPod(ctx context.Context, logger *logger.Logger) error { "logcollection", } createPodCmd := exec.CommandContext(ctx, "podman", createPodArgs...) - logger.Infof("Create pod command: %v", createPodCmd.String()) + logger.Info(fmt.Sprintf("Create pod command: %v", createPodCmd.String())) if out, err := createPodCmd.CombinedOutput(); err != nil { return fmt.Errorf("failed to create pod: %w; output: %s", err, out) } // start logstash container - logstashLog := newCmdLogger(logger.Named("logstash")) + logstashLog := newCmdLogger(logger.WithGroup("logstash")) runLogstashArgs := []string{ "run", "--rm", @@ -194,7 +194,7 @@ func startPod(ctx context.Context, logger *logger.Logger) error { versions.LogstashImage, } runLogstashCmd := exec.CommandContext(ctx, "podman", runLogstashArgs...) - logger.Infof("Run logstash command: %v", runLogstashCmd.String()) + logger.Info(fmt.Sprintf("Run logstash command: %v", runLogstashCmd.String())) runLogstashCmd.Stdout = logstashLog runLogstashCmd.Stderr = logstashLog if err := runLogstashCmd.Start(); err != nil { @@ -202,7 +202,7 @@ func startPod(ctx context.Context, logger *logger.Logger) error { } // start filebeat container - filebeatLog := newCmdLogger(logger.Named("filebeat")) + filebeatLog := newCmdLogger(logger.WithGroup("filebeat")) runFilebeatArgs := []string{ "run", "--rm", @@ -219,7 +219,7 @@ func startPod(ctx context.Context, logger *logger.Logger) error { versions.FilebeatImage, } runFilebeatCmd := exec.CommandContext(ctx, "podman", runFilebeatArgs...) - logger.Infof("Run filebeat command: %v", runFilebeatCmd.String()) + logger.Info(fmt.Sprintf("Run filebeat command: %v", runFilebeatCmd.String())) runFilebeatCmd.Stdout = filebeatLog runFilebeatCmd.Stderr = filebeatLog if err := runFilebeatCmd.Start(); err != nil { @@ -295,16 +295,16 @@ func setCloudMetadata(ctx context.Context, m map[string]string, provider cloudpr } } -func newCmdLogger(logger *logger.Logger) io.Writer { +func newCmdLogger(logger *slog.Logger) io.Writer { return &cmdLogger{logger: logger} } type cmdLogger struct { - logger *logger.Logger + logger *slog.Logger } func (c *cmdLogger) Write(p []byte) (n int, err error) { - c.logger.Infof("%s", p) + c.logger.Info(string(p)) return len(p), nil } diff --git a/debugd/internal/debugd/metadata/BUILD.bazel b/debugd/internal/debugd/metadata/BUILD.bazel index 8cc159b11e..7b57a5fb0a 100644 --- a/debugd/internal/debugd/metadata/BUILD.bazel +++ b/debugd/internal/debugd/metadata/BUILD.bazel @@ -9,11 +9,7 @@ go_library( ], importpath = "github.com/edgelesssys/constellation/v2/debugd/internal/debugd/metadata", visibility = ["//debugd:__subpackages__"], - deps = [ - "//debugd/internal/debugd", - "//internal/logger", - "@org_uber_go_zap//:zap", - ], + deps = ["//debugd/internal/debugd"], ) go_test( diff --git a/debugd/internal/debugd/metadata/scheduler.go b/debugd/internal/debugd/metadata/scheduler.go index eb04e5ade1..bf6705fecc 100644 --- a/debugd/internal/debugd/metadata/scheduler.go +++ b/debugd/internal/debugd/metadata/scheduler.go @@ -8,12 +8,11 @@ package metadata import ( "context" + "log/slog" "sync" "time" "github.com/edgelesssys/constellation/v2/debugd/internal/debugd" - "github.com/edgelesssys/constellation/v2/internal/logger" - "go.uber.org/zap" ) // Fetcher retrieves other debugd IPs from cloud provider metadata. @@ -24,7 +23,7 @@ type Fetcher interface { // Scheduler schedules fetching of metadata using timers. type Scheduler struct { - log *logger.Logger + log *slog.Logger fetcher Fetcher downloader downloader deploymentDone bool @@ -33,7 +32,7 @@ type Scheduler struct { } // NewScheduler returns a new scheduler. -func NewScheduler(log *logger.Logger, fetcher Fetcher, downloader downloader) *Scheduler { +func NewScheduler(log *slog.Logger, fetcher Fetcher, downloader downloader) *Scheduler { return &Scheduler{ log: log, fetcher: fetcher, @@ -60,22 +59,22 @@ func (s *Scheduler) Start(ctx context.Context, wg *sync.WaitGroup) { ips, err := s.fetcher.DiscoverDebugdIPs(ctx) if err != nil { - s.log.With(zap.Error(err)).Warnf("Discovering debugd IPs failed") + s.log.With(slog.Any("error", err)).Warn("Discovering debugd IPs failed") } lbip, err := s.fetcher.DiscoverLoadBalancerIP(ctx) if err != nil { - s.log.With(zap.Error(err)).Warnf("Discovering load balancer IP failed") + s.log.With(slog.Any("error", err)).Warn("Discovering load balancer IP failed") } else { ips = append(ips, lbip) } if len(ips) == 0 { - s.log.With(zap.Error(err)).Warnf("No debugd IPs discovered") + s.log.With(slog.Any("error", err)).Warn("No debugd IPs discovered") continue } - s.log.With(zap.Strings("ips", ips)).Infof("Discovered instances") + s.log.With(slog.Any("ips", ips)).Info("Discovered instances") s.download(ctx, ips) if s.deploymentDone && s.infoDone { return @@ -90,8 +89,8 @@ func (s *Scheduler) download(ctx context.Context, ips []string) { for _, ip := range ips { if !s.deploymentDone { if err := s.downloader.DownloadDeployment(ctx, ip); err != nil { - s.log.With(zap.Error(err), zap.String("peer", ip)). - Warnf("Downloading deployment from %s: %s", ip, err) + s.log.With(slog.Any("error", err), slog.String("peer", ip)). + Warn("Downloading deployment from %s: %s", ip, err) } else { s.deploymentDone = true } @@ -99,8 +98,8 @@ func (s *Scheduler) download(ctx context.Context, ips []string) { if !s.infoDone { if err := s.downloader.DownloadInfo(ctx, ip); err != nil { - s.log.With(zap.Error(err), zap.String("peer", ip)). - Warnf("Downloading info from %s: %s", ip, err) + s.log.With(slog.Any("error", err), slog.String("peer", ip)). + Warn("Downloading info from %s: %s", ip, err) } else { s.infoDone = true } diff --git a/debugd/internal/debugd/server/BUILD.bazel b/debugd/internal/debugd/server/BUILD.bazel index 19e646cf73..ee0de46c2c 100644 --- a/debugd/internal/debugd/server/BUILD.bazel +++ b/debugd/internal/debugd/server/BUILD.bazel @@ -15,7 +15,6 @@ go_library( "//internal/logger", "@org_golang_google_grpc//:go_default_library", "@org_golang_google_grpc//keepalive", - "@org_uber_go_zap//:zap", ], ) diff --git a/debugd/internal/debugd/server/server.go b/debugd/internal/debugd/server/server.go index 551230ae71..aeae7c4a18 100644 --- a/debugd/internal/debugd/server/server.go +++ b/debugd/internal/debugd/server/server.go @@ -10,7 +10,9 @@ package server import ( "context" "errors" + "log/slog" "net" + "os" "strconv" "sync" "time" @@ -21,13 +23,12 @@ import ( pb "github.com/edgelesssys/constellation/v2/debugd/service" "github.com/edgelesssys/constellation/v2/internal/constants" "github.com/edgelesssys/constellation/v2/internal/logger" - "go.uber.org/zap" "google.golang.org/grpc" "google.golang.org/grpc/keepalive" ) type debugdServer struct { - log *logger.Logger + log *slog.Logger serviceManager serviceManager transfer fileTransferer info *info.Map @@ -36,7 +37,7 @@ type debugdServer struct { } // New creates a new debugdServer according to the gRPC spec. -func New(log *logger.Logger, serviceManager serviceManager, transfer fileTransferer, infos *info.Map) pb.DebugdServer { +func New(log *slog.Logger, serviceManager serviceManager, transfer fileTransferer, infos *info.Map) pb.DebugdServer { return &debugdServer{ log: log, serviceManager: serviceManager, @@ -47,25 +48,25 @@ func New(log *logger.Logger, serviceManager serviceManager, transfer fileTransfe // SetInfo sets the info of the debugd instance. func (s *debugdServer) SetInfo(_ context.Context, req *pb.SetInfoRequest) (*pb.SetInfoResponse, error) { - s.log.Infof("Received SetInfo request") + s.log.Info("Received SetInfo request") if len(req.Info) == 0 { - s.log.Infof("Info is empty") + s.log.Info("Info is empty") } setProtoErr := s.info.SetProto(req.Info) if errors.Is(setProtoErr, info.ErrInfoAlreadySet) { - s.log.Warnf("Setting info failed (already set)") + s.log.Warn("Setting info failed (already set)") return &pb.SetInfoResponse{ Status: pb.SetInfoStatus_SET_INFO_ALREADY_SET, }, nil } if setProtoErr != nil { - s.log.With(zap.Error(setProtoErr)).Errorf("Setting info failed") + s.log.With(slog.Any("error", setProtoErr)).Error("Setting info failed") return nil, setProtoErr } - s.log.Infof("Info set") + s.log.Info("Info set") return &pb.SetInfoResponse{ Status: pb.SetInfoStatus_SET_INFO_SUCCESS, @@ -74,7 +75,7 @@ func (s *debugdServer) SetInfo(_ context.Context, req *pb.SetInfoRequest) (*pb.S // GetInfo returns the info of the debugd instance. func (s *debugdServer) GetInfo(_ context.Context, _ *pb.GetInfoRequest) (*pb.GetInfoResponse, error) { - s.log.Infof("Received GetInfo request") + s.log.Info("Received GetInfo request") info, err := s.info.GetProto() if err != nil { @@ -86,23 +87,23 @@ func (s *debugdServer) GetInfo(_ context.Context, _ *pb.GetInfoRequest) (*pb.Get // UploadFiles receives a stream of files (each consisting of a header and a stream of chunks) and writes them to the filesystem. func (s *debugdServer) UploadFiles(stream pb.Debugd_UploadFilesServer) error { - s.log.Infof("Received UploadFiles request") + s.log.Info("Received UploadFiles request") err := s.transfer.RecvFiles(stream) switch { case err == nil: - s.log.Infof("Uploading files succeeded") + s.log.Info("Uploading files succeeded") case errors.Is(err, filetransfer.ErrReceiveRunning): - s.log.Warnf("Upload already in progress") + s.log.Warn("Upload already in progress") return stream.SendAndClose(&pb.UploadFilesResponse{ Status: pb.UploadFilesStatus_UPLOAD_FILES_ALREADY_STARTED, }) case errors.Is(err, filetransfer.ErrReceiveFinished): - s.log.Warnf("Upload already finished") + s.log.Warn("Upload already finished") return stream.SendAndClose(&pb.UploadFilesResponse{ Status: pb.UploadFilesStatus_UPLOAD_FILES_ALREADY_FINISHED, }) default: - s.log.With(zap.Error(err)).Errorf("Uploading files failed") + s.log.With(slog.Any("error", err)).Error("Uploading files failed") return stream.SendAndClose(&pb.UploadFilesResponse{ Status: pb.UploadFilesStatus_UPLOAD_FILES_UPLOAD_FAILED, }) @@ -120,7 +121,7 @@ func (s *debugdServer) UploadFiles(stream pb.Debugd_UploadFilesServer) error { } if overrideUnitErr != nil { - s.log.With(zap.Error(overrideUnitErr)).Errorf("Overriding service units failed") + s.log.With(slog.Any("error", overrideUnitErr)).Error("Overriding service units failed") return stream.SendAndClose(&pb.UploadFilesResponse{ Status: pb.UploadFilesStatus_UPLOAD_FILES_START_FAILED, }) @@ -132,13 +133,13 @@ func (s *debugdServer) UploadFiles(stream pb.Debugd_UploadFilesServer) error { // DownloadFiles streams the previously received files to other instances. func (s *debugdServer) DownloadFiles(_ *pb.DownloadFilesRequest, stream pb.Debugd_DownloadFilesServer) error { - s.log.Infof("Sending files to other instance") + s.log.Info("Sending files to other instance") return s.transfer.SendFiles(stream) } // UploadSystemServiceUnits receives systemd service units, writes them to a service file and schedules a daemon-reload. func (s *debugdServer) UploadSystemServiceUnits(ctx context.Context, in *pb.UploadSystemdServiceUnitsRequest) (*pb.UploadSystemdServiceUnitsResponse, error) { - s.log.Infof("Uploading systemd service units") + s.log.Info("Uploading systemd service units") for _, unit := range in.Units { if err := s.serviceManager.WriteSystemdUnitFile(ctx, deploy.SystemdUnit{Name: unit.Name, Contents: unit.Contents}); err != nil { return &pb.UploadSystemdServiceUnitsResponse{Status: pb.UploadSystemdServiceUnitsStatus_UPLOAD_SYSTEMD_SERVICE_UNITS_FAILURE}, nil @@ -149,25 +150,26 @@ func (s *debugdServer) UploadSystemServiceUnits(ctx context.Context, in *pb.Uplo } // Start will start the gRPC server as goroutine. -func Start(log *logger.Logger, wg *sync.WaitGroup, serv pb.DebugdServer) { +func Start(log *slog.Logger, wg *sync.WaitGroup, serv pb.DebugdServer) { wg.Add(1) go func() { defer wg.Done() - grpcLog := log.Named("gRPC") - grpcLog.WithIncreasedLevel(zap.WarnLevel).ReplaceGRPCLogger() + grpcLog := log.WithGroup("gRPC") + logger.ReplaceGRPCLogger(slog.New(logger.NewLevelHandler(slog.LevelWarn, grpcLog.Handler()))) grpcServer := grpc.NewServer( - grpcLog.GetServerStreamInterceptor(), - grpcLog.GetServerUnaryInterceptor(), + logger.GetServerStreamInterceptor(grpcLog), + logger.GetServerUnaryInterceptor(grpcLog), grpc.KeepaliveParams(keepalive.ServerParameters{Time: 15 * time.Second}), ) pb.RegisterDebugdServer(grpcServer, serv) lis, err := net.Listen("tcp", net.JoinHostPort("0.0.0.0", strconv.Itoa(constants.DebugdPort))) if err != nil { - log.With(zap.Error(err)).Fatalf("Listening failed") + log.With(slog.Any("error", err)).Error("Listening failed") + os.Exit(1) } - log.Infof("gRPC server is waiting for connections") + log.Info("gRPC server is waiting for connections") grpcServer.Serve(lis) }() } diff --git a/debugd/internal/filetransfer/BUILD.bazel b/debugd/internal/filetransfer/BUILD.bazel index f59b0cb82f..db82609db2 100644 --- a/debugd/internal/filetransfer/BUILD.bazel +++ b/debugd/internal/filetransfer/BUILD.bazel @@ -13,8 +13,6 @@ go_library( "//debugd/internal/debugd", "//debugd/internal/filetransfer/streamer", "//debugd/service", - "//internal/logger", - "@org_uber_go_zap//:zap", ], ) diff --git a/debugd/internal/filetransfer/filetransfer.go b/debugd/internal/filetransfer/filetransfer.go index ff90bdf09b..04c784be19 100644 --- a/debugd/internal/filetransfer/filetransfer.go +++ b/debugd/internal/filetransfer/filetransfer.go @@ -10,16 +10,16 @@ package filetransfer import ( "errors" + "fmt" "io" "io/fs" + "log/slog" "sync" "sync/atomic" "github.com/edgelesssys/constellation/v2/debugd/internal/debugd" "github.com/edgelesssys/constellation/v2/debugd/internal/filetransfer/streamer" pb "github.com/edgelesssys/constellation/v2/debugd/service" - "github.com/edgelesssys/constellation/v2/internal/logger" - "go.uber.org/zap" ) // RecvFilesStream is a stream that receives FileTransferMessages. @@ -35,7 +35,7 @@ type SendFilesStream interface { // FileTransferer manages sending and receiving of files. type FileTransferer struct { fileMux sync.RWMutex - log *logger.Logger + log *slog.Logger receiveStarted bool receiveFinished atomic.Bool files []FileStat @@ -44,7 +44,7 @@ type FileTransferer struct { } // New creates a new FileTransferer. -func New(log *logger.Logger, streamer streamReadWriter, showProgress bool) *FileTransferer { +func New(log *slog.Logger, streamer streamReadWriter, showProgress bool) *FileTransferer { return &FileTransferer{ log: log, streamer: streamer, @@ -146,7 +146,7 @@ func (s *FileTransferer) handleFileRecv(stream RecvFilesStream) (bool, error) { if header == nil { return false, errors.New("first message must be a header message") } - s.log.Infof("Starting file receive of %q", header.TargetPath) + s.log.Info(fmt.Sprintf("Starting file receive of %q", header.TargetPath)) s.addFile(FileStat{ SourcePath: header.TargetPath, TargetPath: header.TargetPath, @@ -160,10 +160,10 @@ func (s *FileTransferer) handleFileRecv(stream RecvFilesStream) (bool, error) { }) recvChunkStream := &recvChunkStream{stream: stream} if err := s.streamer.WriteStream(header.TargetPath, recvChunkStream, s.showProgress); err != nil { - s.log.With(zap.Error(err)).Errorf("Receive of file %q failed", header.TargetPath) + s.log.With(slog.Any("error", err)).Error(fmt.Sprintf("Receive of file %q failed", header.TargetPath)) return false, err } - s.log.Infof("Finished file receive of %q", header.TargetPath) + s.log.Info(fmt.Sprintf("Finished file receive of %q", header.TargetPath)) return false, nil } diff --git a/dev-docs/conventions.md b/dev-docs/conventions.md index 26f8ba3fbd..02b08e1dbd 100644 --- a/dev-docs/conventions.md +++ b/dev-docs/conventions.md @@ -21,57 +21,67 @@ It is also recommended to use golangci-lint (and [gofumpt](https://github.com/mv ## Logging -We use a [custom subset](/internal/logger/) of [zap](https://pkg.go.dev/go.uber.org/zap) to provide logging for Constellation’s services and components. +We use [slog](https://pkg.go.dev/log/slog) for logging. Usage instructions can be found in the package documentation. -Certain components may further specify a subset of the logger for their use. For example, the CLI has a debug-only logger, restricting the use of the logger to only `Debugf()`. +Certain components may further specify a subset of the logger for their use. For example, the CLI has a debug-only logger, restricting the use of the logger to only `Debug()`. Further we try to adhere to the following guidelines: * Do not log potentially sensitive information, e.g. variables that contain keys, secrets or otherwise protected information. -* Start log messages in uppercase and end without a punctuation mark. Exclamation, question marks, or ellipsis may be used where appropriate. +* Create a text or JSON logger using the helper functions in the `logger` package. These create a `slog.Logger` with useful defaults. Example: ```Go - log.Infof("This is a log message") - log.Infof("Waiting to do something...") - log.Error("A critical error occurred!") + log := logger.NewTextLogger(slog.LevelDebug) + log.Debug("A debug message") ``` -* Use the `With()` method to add structured context to your log messages. The context tags should be easily searchable to allow for easy log filtering. Try to keep consistent tag naming! +* Start log messages in uppercase and end without a punctuation mark. Exclamation, question marks, or ellipsis may be used where appropriate. Example: ```Go - log.With(zap.Error(someError), zap.String("ip", "192.0.2.1")).Errorf("Connecting to IP failed") + log.Info("This is a log message") + log.Info("Waiting to do something...") + log.Error("A critical error occurred!") ``` -* Log messages may use format strings to produce human readable messages. However, the information should also be present as structured context fields if it might be valuable for debugging purposes. +* Use additional arguments to add structured context to your log messages. The context tags should be easily searchable to allow for easy log filtering. Try to keep consistent tag naming! Example: ```Go - log.Infof("Starting server on %s:%s", addr, port) + log.Error("Connecting to IP failed", "error", someError, "ip", "192.0.2.1") ``` -* Usage of the `Fatalf()` method should be constrained to the main package of an application only! +* Log messages may use format strings to produce human readable messages. However, the information should also be present as structured context fields if it might be valuable for debugging purposes. So, instead of writing + + ```Go + log.Info(fmt.Sprintf("Starting server on %s:%s", addr, port)) + ``` + + You should write + + ```Go + log.Info("Starting server", "addr", addr, "port", port) + ``` * Use log levels to configure how detailed the logs of you application should be. - * `Debugf()` for log low level and detailed information. This may include variable dumps, but should not disclose sensitive information, e.g. keys or secret tokens. - * `Infof()` for general information. - * `Warnf()` for information that may indicate unwanted behavior, but is not an application error. Commonly used by retry loops. - * `Errorf()` to log information about any errors that occurred. - * `Fatalf()` to log information about any errors that occurred and then exit the program. Should only be used in the main package of an application. + * `Debug()` for log low level and detailed information. This may include variable dumps, but should not disclose sensitive information, e.g. keys or secret tokens. + * `Info()` for general information. + * `Warn()` for information that may indicate unwanted behavior, but is not an application error. Commonly used by retry loops. + * `Error()` to log information about any errors that occurred. -* Loggers passed to subpackages of an application may use the `Named()` method for better understanding of where a message originated. +* Loggers passed to subpackages of an application may use the `WithGroup()` method for better understanding of where a message originated. Example: ```Go - grpcServer, err := server.New(log.Named("server")) + grpcServer, err := server.New(log.WithGroup("server")) ``` ## Nested Go modules diff --git a/disk-mapper/cmd/BUILD.bazel b/disk-mapper/cmd/BUILD.bazel index 1d3b1d565b..4db0736b3b 100644 --- a/disk-mapper/cmd/BUILD.bazel +++ b/disk-mapper/cmd/BUILD.bazel @@ -30,7 +30,6 @@ go_library( "//internal/logger", "//internal/role", "@com_github_spf13_afero//:afero", - "@org_uber_go_zap//:zap", ], ) diff --git a/disk-mapper/cmd/main.go b/disk-mapper/cmd/main.go index 415b1c56c6..56b1c1812a 100644 --- a/disk-mapper/cmd/main.go +++ b/disk-mapper/cmd/main.go @@ -9,7 +9,9 @@ package main import ( "context" "flag" + "fmt" "io" + "log/slog" "net" "os" "path/filepath" @@ -35,7 +37,6 @@ import ( "github.com/edgelesssys/constellation/v2/internal/logger" "github.com/edgelesssys/constellation/v2/internal/role" "github.com/spf13/afero" - "go.uber.org/zap" ) const ( @@ -51,18 +52,20 @@ func main() { verbosity := flag.Int("v", 0, logger.CmdLineVerbosityDescription) flag.Parse() - log := logger.New(logger.JSONLog, logger.VerbosityFromInt(*verbosity)) - log.With(zap.String("version", constants.BinaryVersion().String()), zap.String("cloudProvider", *csp)). - Infof("Starting disk-mapper") + log := logger.NewJSONLogger(logger.VerbosityFromInt(*verbosity)) + log.With(slog.String("version", constants.BinaryVersion().String()), slog.String("cloudProvider", *csp)). + Info("Starting disk-mapper") // set up quote issuer for aTLS connections attestVariant, err := variant.FromString(os.Getenv(constants.AttestationVariant)) if err != nil { - log.With(zap.Error(err)).Fatalf("Failed to parse attestation variant") + log.With(slog.Any("error", err)).Error("Failed to parse attestation variant") + os.Exit(1) } issuer, err := choose.Issuer(attestVariant, log) if err != nil { - log.With(zap.Error(err)).Fatalf("Failed to select issuer") + log.With(slog.Any("error", err)).Error("Failed to select issuer") + os.Exit(1) } // set up metadata API @@ -74,31 +77,37 @@ func main() { // using udev rules, a symlink for our disk is created at /dev/sdb diskPath, err = filepath.EvalSymlinks(awsStateDiskPath) if err != nil { - log.With(zap.Error(err)).Fatalf("Unable to resolve Azure state disk path") + log.With(slog.Any("error", err)).Error("Unable to resolve Azure state disk path") + os.Exit(1) } metadataClient, err = awscloud.New(context.Background()) if err != nil { - log.With(zap.Error(err)).Fatalf("Failed to set up AWS metadata client") + log.With(slog.Any("error", err)).Error("Failed to set up AWS metadata client") + os.Exit(1) } case cloudprovider.Azure: diskPath, err = filepath.EvalSymlinks(azureStateDiskPath) if err != nil { - log.With(zap.Error(err)).Fatalf("Unable to resolve Azure state disk path") + log.With(slog.Any("error", err)).Error("Unable to resolve Azure state disk path") + os.Exit(1) } metadataClient, err = azurecloud.New(context.Background()) if err != nil { - log.With(zap.Error(err)).Fatalf("Failed to set up Azure metadata client") + log.With(slog.Any("error", err)).Error("Failed to set up Azure metadata client") + os.Exit(1) } case cloudprovider.GCP: diskPath, err = filepath.EvalSymlinks(gcpStateDiskPath) if err != nil { - log.With(zap.Error(err)).Fatalf("Unable to resolve GCP state disk path") + log.With(slog.Any("error", err)).Error("Unable to resolve GCP state disk path") + os.Exit(1) } gcpMeta, err := gcpcloud.New(context.Background()) if err != nil { - log.With(zap.Error(err)).Fatalf("Failed to create GCP metadata client") + log.With(slog.Any("error", err)).Error(("Failed to create GCP metadata client")) + os.Exit(1) } defer gcpMeta.Close() metadataClient = gcpMeta @@ -107,7 +116,8 @@ func main() { diskPath = openstackStateDiskPath metadataClient, err = openstack.New(context.Background()) if err != nil { - log.With(zap.Error(err)).Fatalf("Failed to create OpenStack metadata client") + log.With(slog.Any("error", err)).Error(("Failed to create OpenStack metadata client")) + os.Exit(1) } case cloudprovider.QEMU: @@ -115,13 +125,15 @@ func main() { metadataClient = qemucloud.New() default: - log.Fatalf("CSP %s is not supported by Constellation", *csp) + log.Error(fmt.Sprintf("CSP %s is not supported by Constellation", *csp)) + os.Exit(1) } // initialize device mapper mapper, free, err := diskencryption.New(diskPath, log) if err != nil { - log.With(zap.Error(err)).Fatalf("Failed to initialize device mapper") + log.With(slog.Any("error", err)).Error(("Failed to initialize device mapper")) + os.Exit(1) } defer free() @@ -133,7 +145,7 @@ func main() { } } setupManger := setup.New( - log.Named("setupManager"), + log.WithGroup("setupManager"), *csp, diskPath, afero.Afero{Fs: afero.NewOsFs()}, @@ -143,7 +155,8 @@ func main() { ) if err := setupManger.LogDevices(); err != nil { - log.With(zap.Error(err)).Fatalf("Failed to log devices") + log.With(slog.Any("error", err)).Error(("Failed to log devices")) + os.Exit(1) } // prepare the state disk @@ -152,21 +165,22 @@ func main() { var self metadata.InstanceMetadata self, err = metadataClient.Self(context.Background()) if err != nil { - log.With(zap.Error(err)).Fatalf("Failed to get self metadata") + log.With(slog.Any("error", err)).Error(("Failed to get self metadata")) + os.Exit(1) } rejoinClient := rejoinclient.New( dialer.New(issuer, nil, &net.Dialer{}), self, metadataClient, - log.Named("rejoinClient"), + log.WithGroup("rejoinClient"), ) // set up recovery server if control-plane node var recoveryServer setup.RecoveryServer if self.Role == role.ControlPlane { - recoveryServer = recoveryserver.New(issuer, kmssetup.KMS, log.Named("recoveryServer")) + recoveryServer = recoveryserver.New(issuer, kmssetup.KMS, log.WithGroup("recoveryServer")) } else { - recoveryServer = recoveryserver.NewStub(log.Named("recoveryServer")) + recoveryServer = recoveryserver.NewStub(log.WithGroup("recoveryServer")) } err = setupManger.PrepareExistingDisk(setup.NewNodeRecoverer(recoveryServer, rejoinClient)) @@ -174,6 +188,7 @@ func main() { err = setupManger.PrepareNewDisk() } if err != nil { - log.With(zap.Error(err)).Fatalf("Failed to prepare state disk") + log.With(slog.Any("error", err)).Error(("Failed to prepare state disk")) + os.Exit(1) } } diff --git a/disk-mapper/internal/diskencryption/BUILD.bazel b/disk-mapper/internal/diskencryption/BUILD.bazel index 84acaa6c18..452c846de7 100644 --- a/disk-mapper/internal/diskencryption/BUILD.bazel +++ b/disk-mapper/internal/diskencryption/BUILD.bazel @@ -5,9 +5,5 @@ go_library( srcs = ["diskencryption.go"], importpath = "github.com/edgelesssys/constellation/v2/disk-mapper/internal/diskencryption", visibility = ["//disk-mapper:__subpackages__"], - deps = [ - "//internal/cryptsetup", - "//internal/logger", - "@org_uber_go_zap//:zap", - ], + deps = ["//internal/cryptsetup"], ) diff --git a/disk-mapper/internal/diskencryption/diskencryption.go b/disk-mapper/internal/diskencryption/diskencryption.go index f6d25a6942..6bed71bfe9 100644 --- a/disk-mapper/internal/diskencryption/diskencryption.go +++ b/disk-mapper/internal/diskencryption/diskencryption.go @@ -15,22 +15,21 @@ package diskencryption import ( "fmt" + "log/slog" "time" "github.com/edgelesssys/constellation/v2/internal/cryptsetup" - "github.com/edgelesssys/constellation/v2/internal/logger" - "go.uber.org/zap" ) // DiskEncryption handles actions for formatting and mapping crypt devices. type DiskEncryption struct { device cryptDevice devicePath string - log *logger.Logger + log *slog.Logger } // New creates a new crypt device for the device at path. -func New(path string, log *logger.Logger) (*DiskEncryption, func(), error) { +func New(path string, log *slog.Logger) (*DiskEncryption, func(), error) { device := cryptsetup.New() _, err := device.Init(path) if err != nil { @@ -101,7 +100,7 @@ func (d *DiskEncryption) UnmapDisk(target string) error { func (d *DiskEncryption) Wipe(blockWipeSize int) error { logProgress := func(size, offset uint64) { prog := (float64(offset) / float64(size)) * 100 - d.log.With(zap.String("progress", fmt.Sprintf("%.2f%%", prog))).Infof("Wiping disk") + d.log.With(slog.String("progress", fmt.Sprintf("%.2f%%", prog))).Info("Wiping disk") } start := time.Now() @@ -109,7 +108,7 @@ func (d *DiskEncryption) Wipe(blockWipeSize int) error { if err := d.device.Wipe("integrity", blockWipeSize, 0, logProgress, 30*time.Second); err != nil { return fmt.Errorf("wiping disk: %w", err) } - d.log.With(zap.Duration("duration", time.Since(start))).Infof("Wiping disk successful") + d.log.With(slog.Duration("duration", time.Since(start))).Info("Wiping disk successful") return nil } diff --git a/disk-mapper/internal/recoveryserver/BUILD.bazel b/disk-mapper/internal/recoveryserver/BUILD.bazel index ce81e8c39e..966b93afd8 100644 --- a/disk-mapper/internal/recoveryserver/BUILD.bazel +++ b/disk-mapper/internal/recoveryserver/BUILD.bazel @@ -17,7 +17,6 @@ go_library( "@org_golang_google_grpc//:go_default_library", "@org_golang_google_grpc//codes", "@org_golang_google_grpc//status", - "@org_uber_go_zap//:zap", ], ) diff --git a/disk-mapper/internal/recoveryserver/recoveryserver.go b/disk-mapper/internal/recoveryserver/recoveryserver.go index d4db3b7870..5234d2e63f 100644 --- a/disk-mapper/internal/recoveryserver/recoveryserver.go +++ b/disk-mapper/internal/recoveryserver/recoveryserver.go @@ -17,6 +17,7 @@ package recoveryserver import ( "context" + "log/slog" "net" "sync" @@ -27,7 +28,6 @@ import ( "github.com/edgelesssys/constellation/v2/internal/grpc/grpclog" "github.com/edgelesssys/constellation/v2/internal/kms/kms" "github.com/edgelesssys/constellation/v2/internal/logger" - "go.uber.org/zap" "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" @@ -45,13 +45,13 @@ type RecoveryServer struct { grpcServer server factory kmsFactory - log *logger.Logger + log *slog.Logger recoverproto.UnimplementedAPIServer } // New returns a new RecoveryServer. -func New(issuer atls.Issuer, factory kmsFactory, log *logger.Logger) *RecoveryServer { +func New(issuer atls.Issuer, factory kmsFactory, log *slog.Logger) *RecoveryServer { server := &RecoveryServer{ log: log, factory: factory, @@ -59,7 +59,7 @@ func New(issuer atls.Issuer, factory kmsFactory, log *logger.Logger) *RecoverySe grpcServer := grpc.NewServer( grpc.Creds(atlscredentials.New(issuer, nil)), - log.Named("gRPC").GetServerStreamInterceptor(), + logger.GetServerStreamInterceptor(log.WithGroup("gRPC")), ) recoverproto.RegisterAPIServer(grpcServer, server) @@ -72,7 +72,7 @@ func New(issuer atls.Issuer, factory kmsFactory, log *logger.Logger) *RecoverySe // The server will shut down when the call is successful and the keys are returned. // Additionally, the server can be shutdown by canceling the context. func (s *RecoveryServer) Serve(ctx context.Context, listener net.Listener, diskUUID string) (diskKey, measurementSecret []byte, err error) { - s.log.Infof("Starting RecoveryServer") + s.log.Info("Starting RecoveryServer") s.diskUUID = diskUUID recoveryDone := make(chan struct{}, 1) var serveErr error @@ -89,7 +89,7 @@ func (s *RecoveryServer) Serve(ctx context.Context, listener net.Listener, diskU for { select { case <-ctx.Done(): - s.log.Infof("Context canceled, shutting down server") + s.log.Info("Context canceled, shutting down server") s.grpcServer.GracefulStop() return nil, nil, ctx.Err() case <-recoveryDone: @@ -105,9 +105,9 @@ func (s *RecoveryServer) Serve(ctx context.Context, listener net.Listener, diskU func (s *RecoveryServer) Recover(ctx context.Context, req *recoverproto.RecoverMessage) (*recoverproto.RecoverResponse, error) { s.mux.Lock() defer s.mux.Unlock() - log := s.log.With(zap.String("peer", grpclog.PeerAddrFromContext(ctx))) + log := s.log.With(slog.String("peer", grpclog.PeerAddrFromContext(ctx))) - log.Infof("Received recover call") + log.Info("Received recover call") cloudKms, err := s.factory(ctx, req.StorageUri, req.KmsUri) if err != nil { @@ -124,7 +124,7 @@ func (s *RecoveryServer) Recover(ctx context.Context, req *recoverproto.RecoverM } s.stateDiskKey = stateDiskKey s.measurementSecret = measurementSecret - log.Infof("Received state disk key and measurement secret, shutting down server") + log.Info("Received state disk key and measurement secret, shutting down server") go s.grpcServer.GracefulStop() return &recoverproto.RecoverResponse{}, nil @@ -132,18 +132,18 @@ func (s *RecoveryServer) Recover(ctx context.Context, req *recoverproto.RecoverM // StubServer implements the RecoveryServer interface but does not actually start a server. type StubServer struct { - log *logger.Logger + log *slog.Logger } // NewStub returns a new stubbed RecoveryServer. // We use this to avoid having to start a server for worker nodes, since they don't require manual recovery. -func NewStub(log *logger.Logger) *StubServer { +func NewStub(log *slog.Logger) *StubServer { return &StubServer{log: log} } // Serve waits until the context is canceled and returns nil. func (s *StubServer) Serve(ctx context.Context, _ net.Listener, _ string) ([]byte, []byte, error) { - s.log.Infof("Running as worker node, skipping recovery server") + s.log.Info("Running as worker node, skipping recovery server") <-ctx.Done() return nil, nil, ctx.Err() } diff --git a/disk-mapper/internal/rejoinclient/BUILD.bazel b/disk-mapper/internal/rejoinclient/BUILD.bazel index b7bae0f1cf..01f0c26e5e 100644 --- a/disk-mapper/internal/rejoinclient/BUILD.bazel +++ b/disk-mapper/internal/rejoinclient/BUILD.bazel @@ -9,12 +9,10 @@ go_library( deps = [ "//internal/cloud/metadata", "//internal/constants", - "//internal/logger", "//internal/role", "//joinservice/joinproto", "@io_k8s_utils//clock", "@org_golang_google_grpc//:go_default_library", - "@org_uber_go_zap//:zap", ], ) diff --git a/disk-mapper/internal/rejoinclient/rejoinclient.go b/disk-mapper/internal/rejoinclient/rejoinclient.go index 4f50b0b418..bedb01535d 100644 --- a/disk-mapper/internal/rejoinclient/rejoinclient.go +++ b/disk-mapper/internal/rejoinclient/rejoinclient.go @@ -15,16 +15,15 @@ import ( "context" "errors" "fmt" + "log/slog" "net" "strconv" "time" "github.com/edgelesssys/constellation/v2/internal/cloud/metadata" "github.com/edgelesssys/constellation/v2/internal/constants" - "github.com/edgelesssys/constellation/v2/internal/logger" "github.com/edgelesssys/constellation/v2/internal/role" "github.com/edgelesssys/constellation/v2/joinservice/joinproto" - "go.uber.org/zap" "google.golang.org/grpc" "k8s.io/utils/clock" ) @@ -47,12 +46,12 @@ type RejoinClient struct { dialer grpcDialer metadataAPI metadataAPI - log *logger.Logger + log *slog.Logger } // New returns a new RejoinClient. func New(dial grpcDialer, nodeInfo metadata.InstanceMetadata, - meta metadataAPI, log *logger.Logger, + meta metadataAPI, log *slog.Logger, ) *RejoinClient { return &RejoinClient{ nodeInfo: nodeInfo, @@ -70,22 +69,22 @@ func New(dial grpcDialer, nodeInfo metadata.InstanceMetadata, // from the metadata API and send rejoin requests to them. // The function returns after a successful rejoin request has been performed. func (c *RejoinClient) Start(ctx context.Context, diskUUID string) (diskKey, measurementSecret []byte) { - c.log.Infof("Starting RejoinClient") + c.log.Info("Starting RejoinClient") c.diskUUID = diskUUID ticker := c.clock.NewTicker(c.interval) defer ticker.Stop() - defer c.log.Infof("RejoinClient stopped") + defer c.log.Info("RejoinClient stopped") for { endpoints, err := c.getJoinEndpoints() if err != nil { - c.log.With(zap.Error(err)).Errorf("Failed to get control-plane endpoints") + c.log.With(slog.Any("error", err)).Error("Failed to get control-plane endpoints") } else { - c.log.With(zap.Strings("endpoints", endpoints)).Infof("Received list with JoinService endpoints") + c.log.With(slog.Any("endpoints", endpoints)).Info("Received list with JoinService endpoints") diskKey, measurementSecret, err = c.tryRejoinWithAvailableServices(ctx, endpoints) if err == nil { - c.log.Infof("Successfully retrieved rejoin ticket") + c.log.Info("Successfully retrieved rejoin ticket") return diskKey, measurementSecret } } @@ -101,12 +100,12 @@ func (c *RejoinClient) Start(ctx context.Context, diskUUID string) (diskKey, mea // tryRejoinWithAvailableServices tries sending rejoin requests to the available endpoints. func (c *RejoinClient) tryRejoinWithAvailableServices(ctx context.Context, endpoints []string) (diskKey, measurementSecret []byte, err error) { for _, endpoint := range endpoints { - c.log.With(zap.String("endpoint", endpoint)).Infof("Requesting rejoin ticket") + c.log.With(slog.String("endpoint", endpoint)).Info("Requesting rejoin ticket") rejoinTicket, err := c.requestRejoinTicket(endpoint) if err == nil { return rejoinTicket.StateDiskKey, rejoinTicket.MeasurementSecret, nil } - c.log.With(zap.Error(err), zap.String("endpoint", endpoint)).Warnf("Failed to rejoin on endpoint") + c.log.With(slog.Any("error", err), slog.String("endpoint", endpoint)).Warn("Failed to rejoin on endpoint") // stop requesting additional endpoints if the context is done select { @@ -115,7 +114,7 @@ func (c *RejoinClient) tryRejoinWithAvailableServices(ctx context.Context, endpo default: } } - c.log.Errorf("Failed to rejoin on all endpoints") + c.log.Error("Failed to rejoin on all endpoints") return nil, nil, errors.New("failed to join on all endpoints") } diff --git a/disk-mapper/internal/setup/BUILD.bazel b/disk-mapper/internal/setup/BUILD.bazel index 035d57304c..6250ff87db 100644 --- a/disk-mapper/internal/setup/BUILD.bazel +++ b/disk-mapper/internal/setup/BUILD.bazel @@ -20,10 +20,8 @@ go_library( "//internal/constants", "//internal/crypto", "//internal/file", - "//internal/logger", "//internal/nodestate", "@com_github_spf13_afero//:afero", - "@org_uber_go_zap//:zap", ], ) diff --git a/disk-mapper/internal/setup/setup.go b/disk-mapper/internal/setup/setup.go index 3128807138..38b5914512 100644 --- a/disk-mapper/internal/setup/setup.go +++ b/disk-mapper/internal/setup/setup.go @@ -17,6 +17,7 @@ import ( "errors" "fmt" "io/fs" + "log/slog" "net" "os" "path/filepath" @@ -31,10 +32,8 @@ import ( "github.com/edgelesssys/constellation/v2/internal/constants" "github.com/edgelesssys/constellation/v2/internal/crypto" "github.com/edgelesssys/constellation/v2/internal/file" - "github.com/edgelesssys/constellation/v2/internal/logger" "github.com/edgelesssys/constellation/v2/internal/nodestate" "github.com/spf13/afero" - "go.uber.org/zap" ) const ( @@ -49,7 +48,7 @@ const ( // Manager handles formatting, mapping, mounting and unmounting of state disks. type Manager struct { - log *logger.Logger + log *slog.Logger csp string diskPath string fs afero.Afero @@ -60,7 +59,7 @@ type Manager struct { } // New initializes a SetupManager with the given parameters. -func New(log *logger.Logger, csp string, diskPath string, fs afero.Afero, +func New(log *slog.Logger, csp string, diskPath string, fs afero.Afero, mapper DeviceMapper, mounter Mounter, openDevice vtpm.TPMOpenFunc, ) *Manager { return &Manager{ @@ -82,7 +81,7 @@ func (s *Manager) PrepareExistingDisk(recover RecoveryDoer) error { if err != nil { return err } - s.log.With(zap.String("uuid", uuid)).Infof("Preparing existing state disk") + s.log.With(slog.String("uuid", uuid)).Info("Preparing existing state disk") endpoint := net.JoinHostPort("0.0.0.0", strconv.Itoa(constants.RecoveryPort)) passphrase, measurementSecret, err := recover.Do(uuid, endpoint) @@ -128,7 +127,7 @@ func (s *Manager) PrepareExistingDisk(recover RecoveryDoer) error { // PrepareNewDisk prepares an instances state disk by formatting the disk as a LUKS device using a random passphrase. func (s *Manager) PrepareNewDisk() error { uuid, _ := s.mapper.DiskUUID() - s.log.With(zap.String("uuid", uuid)).Infof("Preparing new state disk") + s.log.With(slog.String("uuid", uuid)).Info("Preparing new state disk") // generate and save temporary passphrase passphrase := make([]byte, crypto.RNGLengthDefault) @@ -192,12 +191,12 @@ func (s *Manager) LogDevices() error { devices = append(devices, fileInfo) } - s.log.Infof("List of all available block devices and partitions:") + s.log.Info("List of all available block devices and partitions:") for _, device := range devices { var stat syscall.Statfs_t dev := "/dev/" + device.Name() if err := syscall.Statfs(dev, &stat); err != nil { - s.log.With(zap.Error(err)).Errorf("failed to statfs %s", dev) + s.log.With(slog.Any("error", err)).Error(fmt.Sprintf("failed to statfs %s", dev)) continue } @@ -206,7 +205,7 @@ func (s *Manager) LogDevices() error { free := stat.Bfree * uint64(stat.Bsize) avail := stat.Bavail * uint64(stat.Bsize) - s.log.Infof( + s.log.Info(fmt.Sprintf( "Name: %-15s, Size: %-10d, Mode: %s, ModTime: %s, Size = %-10d, Free = %-10d, Available = %-10d\n", dev, device.Size(), @@ -214,7 +213,7 @@ func (s *Manager) LogDevices() error { device.ModTime(), size, free, - avail) + avail)) } return nil } diff --git a/disk-mapper/internal/test/BUILD.bazel b/disk-mapper/internal/test/BUILD.bazel index 489a2b89f2..8d39f1a031 100644 --- a/disk-mapper/internal/test/BUILD.bazel +++ b/disk-mapper/internal/test/BUILD.bazel @@ -31,7 +31,6 @@ go_test( "@com_github_stretchr_testify//require", "@io_bazel_rules_go//go/runfiles:go_default_library", "@org_uber_go_goleak//:goleak", - "@org_uber_go_zap//zapcore", ], "@io_bazel_rules_go//go/platform:linux": [ "//disk-mapper/internal/diskencryption", @@ -42,7 +41,6 @@ go_test( "@com_github_stretchr_testify//require", "@io_bazel_rules_go//go/runfiles:go_default_library", "@org_uber_go_goleak//:goleak", - "@org_uber_go_zap//zapcore", ], "//conditions:default": [], }), diff --git a/disk-mapper/internal/test/benchmark_test.go b/disk-mapper/internal/test/benchmark_test.go index 6fc92a2846..8581fb8a39 100644 --- a/disk-mapper/internal/test/benchmark_test.go +++ b/disk-mapper/internal/test/benchmark_test.go @@ -10,13 +10,13 @@ package integration import ( "fmt" + "log/slog" "math" "testing" "github.com/edgelesssys/constellation/v2/disk-mapper/internal/diskencryption" "github.com/edgelesssys/constellation/v2/internal/logger" "github.com/martinjungblut/go-cryptsetup" - "go.uber.org/zap/zapcore" ) func BenchmarkMapper(b *testing.B) { @@ -39,7 +39,7 @@ func BenchmarkMapper(b *testing.B) { } passphrase := "benchmark" - mapper, free, err := diskencryption.New(testPath, logger.New(logger.PlainLog, zapcore.InfoLevel)) + mapper, free, err := diskencryption.New(testPath, logger.NewTextLogger(slog.LevelInfo)) if err != nil { b.Fatal("Failed to create mapper:", err) } diff --git a/disk-mapper/internal/test/integration_test.go b/disk-mapper/internal/test/integration_test.go index a22faa28e7..cc865c256b 100644 --- a/disk-mapper/internal/test/integration_test.go +++ b/disk-mapper/internal/test/integration_test.go @@ -12,6 +12,7 @@ import ( "encoding/json" "flag" "fmt" + "log/slog" "os" "os/exec" "path/filepath" @@ -103,7 +104,7 @@ func TestMapper(t *testing.T) { require.NoError(setup(1), "failed to setup test disk") defer func() { require.NoError(teardown(), "failed to delete test disk") }() - mapper, free, err := diskencryption.New(devicePath, logger.NewTest(t)) + mapper, free, err := diskencryption.New(devicePath, logger.NewTextLogger(slog.LevelInfo)) require.NoError(err, "failed to initialize crypt device") defer free() diff --git a/e2e/internal/upgrade/helm.go b/e2e/internal/upgrade/helm.go index b0fc498fdc..18b48a13cf 100644 --- a/e2e/internal/upgrade/helm.go +++ b/e2e/internal/upgrade/helm.go @@ -25,7 +25,7 @@ func servicesVersion(t *testing.T) (semver.Semver, error) { settings := cli.New() settings.KubeConfig = "constellation-admin.conf" actionConfig := &action.Configuration{} - if err := actionConfig.Init(settings.RESTClientGetter(), constants.HelmNamespace, "secret", log.Infof); err != nil { + if err := actionConfig.Init(settings.RESTClientGetter(), constants.HelmNamespace, "secret", log.Info); err != nil { return semver.Semver{}, fmt.Errorf("initializing config: %w", err) } diff --git a/e2e/malicious-join/BUILD.bazel b/e2e/malicious-join/BUILD.bazel index 3f78a37a84..6004b41bbe 100644 --- a/e2e/malicious-join/BUILD.bazel +++ b/e2e/malicious-join/BUILD.bazel @@ -16,8 +16,6 @@ go_library( "//internal/grpc/dialer", "//internal/logger", "//joinservice/joinproto", - "@org_uber_go_zap//:zap", - "@org_uber_go_zap//zapcore", ], ) diff --git a/e2e/malicious-join/malicious-join.go b/e2e/malicious-join/malicious-join.go index a8894d0231..9810358800 100644 --- a/e2e/malicious-join/malicious-join.go +++ b/e2e/malicious-join/malicious-join.go @@ -12,7 +12,9 @@ import ( "encoding/json" "flag" "fmt" + "log/slog" "net" + "os" "strings" "github.com/edgelesssys/constellation/v2/internal/attestation/variant" @@ -20,13 +22,10 @@ import ( "github.com/edgelesssys/constellation/v2/internal/grpc/dialer" "github.com/edgelesssys/constellation/v2/internal/logger" "github.com/edgelesssys/constellation/v2/joinservice/joinproto" - "go.uber.org/zap" - "go.uber.org/zap/zapcore" ) func main() { - log := logger.New(logger.JSONLog, zapcore.DebugLevel) - defer log.Sync() + log := logger.NewJSONLogger(slog.LevelDebug) jsEndpoint := flag.String("js-endpoint", "", "Join service endpoint to use.") csp := flag.String("csp", "", "Cloud service provider to use.") @@ -38,13 +37,13 @@ func main() { ) flag.Parse() log.With( - zap.String("js-endpoint", *jsEndpoint), - zap.String("csp", *csp), - zap.String("variant", *attVariant), - ).Infof("Running tests with flags") + slog.String("js-endpoint", *jsEndpoint), + slog.String("csp", *csp), + slog.String("variant", *attVariant), + ).Info("Running tests with flags") testCases := map[string]struct { - fn func(attVariant, csp, jsEndpoint string, log *logger.Logger) error + fn func(attVariant, csp, jsEndpoint string, log *slog.Logger) error wantErr bool }{ "JoinFromUnattestedNode": { @@ -58,44 +57,45 @@ func main() { TestCases: make(map[string]testCaseOutput), } for name, tc := range testCases { - log.With(zap.String("testcase", name)).Infof("Running testcase") + log.With(slog.String("testcase", name)).Info("Running testcase") err := tc.fn(*attVariant, *csp, *jsEndpoint, log) switch { case err == nil && tc.wantErr: - log.With(zap.Error(err), zap.String("testcase", name)).Errorf("Test case failed: Expected error but got none") + log.With(slog.Any("error", err), slog.String("testcase", name)).Error("Test case failed: Expected error but got none") testOutput.TestCases[name] = testCaseOutput{ Passed: false, Message: "Expected error but got none", } allPassed = false case !tc.wantErr && err != nil: - log.With(zap.Error(err), zap.String("testcase", name)).Errorf("Test case failed: Got unexpected error") + log.With(slog.Any("error", err), slog.String("testcase", name)).Error("Test case failed: Got unexpected error") testOutput.TestCases[name] = testCaseOutput{ Passed: false, Message: fmt.Sprintf("Got unexpected error: %s", err), } allPassed = false case tc.wantErr && err != nil: - log.With(zap.String("testcase", name)).Infof("Test case succeeded") + log.With(slog.String("testcase", name)).Info("Test case succeeded") testOutput.TestCases[name] = testCaseOutput{ Passed: true, Message: fmt.Sprintf("Got expected error: %s", err), } case !tc.wantErr && err == nil: - log.With(zap.String("testcase", name)).Infof("Test case succeeded") + log.With(slog.String("testcase", name)).Info("Test case succeeded") testOutput.TestCases[name] = testCaseOutput{ Passed: true, Message: "No error, as expected", } default: - log.With(zap.String("testcase", name)).Fatalf("invalid result") + log.With(slog.String("testcase", name)).Error("invalid result") + os.Exit(1) } } testOutput.AllPassed = allPassed - log.With(zap.Any("result", testOutput)).Infof("Test completed") + log.With(slog.Any("result", testOutput)).Info("Test completed") } type testOutput struct { @@ -110,7 +110,7 @@ type testCaseOutput struct { // JoinFromUnattestedNode simulates a join request from a Node that uses a stub issuer // and thus cannot be attested correctly. -func JoinFromUnattestedNode(attVariant, csp, jsEndpoint string, log *logger.Logger) error { +func JoinFromUnattestedNode(attVariant, csp, jsEndpoint string, log *slog.Logger) error { joiner, err := newMaliciousJoiner(attVariant, csp, jsEndpoint, log) if err != nil { return fmt.Errorf("creating malicious joiner: %w", err) @@ -125,7 +125,7 @@ func JoinFromUnattestedNode(attVariant, csp, jsEndpoint string, log *logger.Logg // newMaliciousJoiner creates a new malicious joiner, i.e. a simulated node that issues // an invalid join request. -func newMaliciousJoiner(attVariant, csp, endpoint string, log *logger.Logger) (*maliciousJoiner, error) { +func newMaliciousJoiner(attVariant, csp, endpoint string, log *slog.Logger) (*maliciousJoiner, error) { var attVariantOid variant.Variant var err error if strings.EqualFold(attVariant, "default") { @@ -149,30 +149,30 @@ func newMaliciousJoiner(attVariant, csp, endpoint string, log *logger.Logger) (* // maliciousJoiner simulates a malicious node joining a cluster. type maliciousJoiner struct { endpoint string - logger *logger.Logger + logger *slog.Logger dialer *dialer.Dialer } // join issues a join request to the join service endpoint. func (j *maliciousJoiner) join(ctx context.Context) (*joinproto.IssueJoinTicketResponse, error) { - j.logger.Debugf("Dialing join service endpoint %s", j.endpoint) + j.logger.Debug(fmt.Sprintf("Dialing join service endpoint %s", j.endpoint)) conn, err := j.dialer.Dial(ctx, j.endpoint) if err != nil { return nil, fmt.Errorf("dialing join service endpoint: %w", err) } defer conn.Close() - j.logger.Debugf("Successfully dialed join service endpoint %s", j.endpoint) + j.logger.Debug(fmt.Sprintf("Successfully dialed join service endpoint %s", j.endpoint)) protoClient := joinproto.NewAPIClient(conn) - j.logger.Debugf("Issuing join ticket") + j.logger.Debug("Issuing join ticket") req := &joinproto.IssueJoinTicketRequest{ DiskUuid: "", CertificateRequest: []byte{}, IsControlPlane: false, } res, err := protoClient.IssueJoinTicket(ctx, req) - j.logger.Debugf("Got join ticket response: %+v", res) + j.logger.Debug(fmt.Sprintf("Got join ticket response: %+v", res)) if err != nil { return nil, fmt.Errorf("issuing join ticket: %w", err) } diff --git a/go.mod b/go.mod index f0d26c4fd0..e25a088559 100644 --- a/go.mod +++ b/go.mod @@ -126,7 +126,6 @@ require ( go.etcd.io/etcd/client/pkg/v3 v3.5.10 go.etcd.io/etcd/client/v3 v3.5.10 go.uber.org/goleak v1.3.0 - go.uber.org/zap v1.26.0 golang.org/x/crypto v0.17.0 golang.org/x/exp v0.0.0-20231219180239-dc181d75b848 golang.org/x/mod v0.14.0 @@ -153,6 +152,8 @@ require ( sigs.k8s.io/yaml v1.4.0 ) +require go.uber.org/zap v1.26.0 // indirect + require ( cloud.google.com/go v0.110.8 // indirect cloud.google.com/go/iam v1.1.2 // indirect diff --git a/hack/bazel-deps-mirror/BUILD.bazel b/hack/bazel-deps-mirror/BUILD.bazel index 0ccbc3e61d..c4880cc83b 100644 --- a/hack/bazel-deps-mirror/BUILD.bazel +++ b/hack/bazel-deps-mirror/BUILD.bazel @@ -18,7 +18,6 @@ go_library( "//internal/logger", "@com_github_bazelbuild_buildtools//build", "@com_github_spf13_cobra//:cobra", - "@org_uber_go_zap//zapcore", ], ) diff --git a/hack/bazel-deps-mirror/check.go b/hack/bazel-deps-mirror/check.go index c8089ff737..64bd68b738 100644 --- a/hack/bazel-deps-mirror/check.go +++ b/hack/bazel-deps-mirror/check.go @@ -9,6 +9,8 @@ package main import ( "context" "errors" + "fmt" + "log/slog" "github.com/edgelesssys/constellation/v2/hack/bazel-deps-mirror/internal/bazelfiles" "github.com/edgelesssys/constellation/v2/hack/bazel-deps-mirror/internal/issues" @@ -16,7 +18,6 @@ import ( "github.com/edgelesssys/constellation/v2/hack/bazel-deps-mirror/internal/rules" "github.com/edgelesssys/constellation/v2/internal/logger" "github.com/spf13/cobra" - "go.uber.org/zap/zapcore" ) func newCheckCmd() *cobra.Command { @@ -38,15 +39,15 @@ func runCheck(cmd *cobra.Command, _ []string) error { if err != nil { return err } - log := logger.New(logger.PlainLog, flags.logLevel) - log.Debugf("Parsed flags: %+v", flags) + log := logger.NewTextLogger(flags.logLevel) + log.Debug(fmt.Sprintf("Parsed flags: %+v", flags)) filesHelper, err := bazelfiles.New() if err != nil { return err } - log.Debugf("Searching for Bazel files in the current WORKSPACE and all subdirectories...") + log.Debug("Searching for Bazel files in the current WORKSPACE and all subdirectories...") bazelFiles, err := filesHelper.FindFiles() if err != nil { return err @@ -55,10 +56,10 @@ func runCheck(cmd *cobra.Command, _ []string) error { var mirrorCheck mirrorChecker switch { case flags.mirrorUnauthenticated: - log.Debugf("Checking consistency of all referenced CAS objects without authentication.") + log.Debug("Checking consistency of all referenced CAS objects without authentication.") mirrorCheck = mirror.NewUnauthenticated(flags.mirrorBaseURL, mirror.Run, log) case flags.mirror: - log.Debugf("Checking consistency of all referenced CAS objects using AWS S3.") + log.Debug("Checking consistency of all referenced CAS objects using AWS S3.") mirrorCheck, err = mirror.New(cmd.Context(), flags.region, flags.bucket, flags.mirrorBaseURL, mirror.Run, log) if err != nil { return err @@ -78,17 +79,17 @@ func runCheck(cmd *cobra.Command, _ []string) error { } } if len(iss) > 0 { - log.Infof("Found issues in rules") + log.Info("Found issues in rules") iss.Report(cmd.OutOrStdout()) return errors.New("found issues in rules") } - log.Infof("No issues found 🦭") + log.Info("No issues found 🦭") return nil } -func checkBazelFile(ctx context.Context, fileHelper *bazelfiles.Helper, mirrorCheck mirrorChecker, bazelFile bazelfiles.BazelFile, log *logger.Logger) (issByFile issues.ByFile, err error) { - log.Debugf("Checking file: %s", bazelFile.RelPath) +func checkBazelFile(ctx context.Context, fileHelper *bazelfiles.Helper, mirrorCheck mirrorChecker, bazelFile bazelfiles.BazelFile, log *slog.Logger) (issByFile issues.ByFile, err error) { + log.Debug(fmt.Sprintf("Checking file: %s", bazelFile.RelPath)) issByFile = issues.NewByFile() buildfile, err := fileHelper.LoadFile(bazelFile) if err != nil { @@ -96,12 +97,12 @@ func checkBazelFile(ctx context.Context, fileHelper *bazelfiles.Helper, mirrorCh } found := rules.Rules(buildfile, rules.SupportedRules) if len(found) == 0 { - log.Debugf("No rules found in file: %s", bazelFile.RelPath) + log.Debug(fmt.Sprintf("No rules found in file: %s", bazelFile.RelPath)) return issByFile, nil } - log.Debugf("Found %d rules in file: %s", len(found), bazelFile.RelPath) + log.Debug(fmt.Sprintf("Found %d rules in file: %s", len(found), bazelFile.RelPath)) for _, rule := range found { - log.Debugf("Checking rule: %s", rule.Name()) + log.Debug(fmt.Sprintf("Checking rule: %s", rule.Name())) // check if the rule is a valid pinned dependency rule (has all required attributes) if issues := rules.ValidatePinned(rule); len(issues) > 0 { issByFile.Add(rule.Name(), issues...) @@ -130,7 +131,7 @@ type checkFlags struct { region string bucket string mirrorBaseURL string - logLevel zapcore.Level + logLevel slog.Level } func parseCheckFlags(cmd *cobra.Command) (checkFlags, error) { @@ -146,9 +147,9 @@ func parseCheckFlags(cmd *cobra.Command) (checkFlags, error) { if err != nil { return checkFlags{}, err } - logLevel := zapcore.InfoLevel + logLevel := slog.LevelInfo if verbose { - logLevel = zapcore.DebugLevel + logLevel = slog.LevelDebug } region, err := cmd.Flags().GetString("region") if err != nil { diff --git a/hack/bazel-deps-mirror/fix.go b/hack/bazel-deps-mirror/fix.go index c40d42d055..9a327ee277 100644 --- a/hack/bazel-deps-mirror/fix.go +++ b/hack/bazel-deps-mirror/fix.go @@ -9,6 +9,8 @@ package main import ( "context" "errors" + "fmt" + "log/slog" "github.com/bazelbuild/buildtools/build" "github.com/edgelesssys/constellation/v2/hack/bazel-deps-mirror/internal/bazelfiles" @@ -17,7 +19,6 @@ import ( "github.com/edgelesssys/constellation/v2/hack/bazel-deps-mirror/internal/rules" "github.com/edgelesssys/constellation/v2/internal/logger" "github.com/spf13/cobra" - "go.uber.org/zap/zapcore" ) func newFixCmd() *cobra.Command { @@ -38,15 +39,15 @@ func runFix(cmd *cobra.Command, _ []string) error { if err != nil { return err } - log := logger.New(logger.PlainLog, flags.logLevel) - log.Debugf("Parsed flags: %+v", flags) + log := logger.NewTextLogger(flags.logLevel) + log.Debug(fmt.Sprintf("Parsed flags: %+v", flags)) fileHelper, err := bazelfiles.New() if err != nil { return err } - log.Debugf("Searching for Bazel files in the current WORKSPACE and all subdirectories...") + log.Debug("Searching for Bazel files in the current WORKSPACE and all subdirectories...") bazelFiles, err := fileHelper.FindFiles() if err != nil { return err @@ -55,10 +56,10 @@ func runFix(cmd *cobra.Command, _ []string) error { var mirrorUpload mirrorUploader switch { case flags.unauthenticated: - log.Warnf("Fixing rules without authentication for AWS S3. If artifacts are not yet mirrored, this will fail.") + log.Warn("Fixing rules without authentication for AWS S3. If artifacts are not yet mirrored, this will fail.") mirrorUpload = mirror.NewUnauthenticated(flags.mirrorBaseURL, flags.dryRun, log) default: - log.Debugf("Fixing rules with authentication for AWS S3.") + log.Debug("Fixing rules with authentication for AWS S3.") mirrorUpload, err = mirror.New(cmd.Context(), flags.region, flags.bucket, flags.mirrorBaseURL, flags.dryRun, log) if err != nil { return err @@ -76,29 +77,29 @@ func runFix(cmd *cobra.Command, _ []string) error { } } if len(issues) > 0 { - log.Warnf("Found %d unfixable issues in rules", len(issues)) + log.Warn(fmt.Sprintf("Found %d unfixable issues in rules", len(issues))) issues.Report(cmd.OutOrStdout()) return errors.New("found issues in rules") } - log.Infof("No unfixable issues found") + log.Info("No unfixable issues found") return nil } -func fixBazelFile(ctx context.Context, fileHelper *bazelfiles.Helper, mirrorUpload mirrorUploader, bazelFile bazelfiles.BazelFile, dryRun bool, log *logger.Logger) (iss issues.ByFile, err error) { +func fixBazelFile(ctx context.Context, fileHelper *bazelfiles.Helper, mirrorUpload mirrorUploader, bazelFile bazelfiles.BazelFile, dryRun bool, log *slog.Logger) (iss issues.ByFile, err error) { iss = issues.NewByFile() var changed bool // true if any rule in this file was changed - log.Infof("Checking file: %s", bazelFile.RelPath) + log.Info(fmt.Sprintf("Checking file: %s", bazelFile.RelPath)) buildfile, err := fileHelper.LoadFile(bazelFile) if err != nil { return iss, err } found := rules.Rules(buildfile, rules.SupportedRules) if len(found) == 0 { - log.Debugf("No rules found in file: %s", bazelFile.RelPath) + log.Debug(fmt.Sprintf("No rules found in file: %s", bazelFile.RelPath)) return iss, nil } - log.Debugf("Found %d rules in file: %s", len(found), bazelFile.RelPath) + log.Debug(fmt.Sprintf("Found %d rules in file: %s", len(found), bazelFile.RelPath)) for _, rule := range found { changedRule, ruleIssues := fixRule(ctx, mirrorUpload, rule, log) if len(ruleIssues) > 0 { @@ -108,11 +109,11 @@ func fixBazelFile(ctx context.Context, fileHelper *bazelfiles.Helper, mirrorUplo } if len(iss) > 0 { - log.Warnf("File %s has issues. Not saving!", bazelFile.RelPath) + log.Warn(fmt.Sprintf("File %s has issues. Not saving!", bazelFile.RelPath)) return iss, nil } if !changed { - log.Debugf("No changes to file: %s", bazelFile.RelPath) + log.Debug(fmt.Sprintf("No changes to file: %s", bazelFile.RelPath)) return iss, nil } if dryRun { @@ -120,10 +121,10 @@ func fixBazelFile(ctx context.Context, fileHelper *bazelfiles.Helper, mirrorUplo if err != nil { return iss, err } - log.Infof("Dry run: would save updated file %s with diff:\n%s", bazelFile.RelPath, diff) + log.Info(fmt.Sprintf("Dry run: would save updated file %s with diff:\n%s", bazelFile.RelPath, diff)) return iss, nil } - log.Infof("Saving updated file: %s", bazelFile.RelPath) + log.Info(fmt.Sprintf("Saving updated file: %s", bazelFile.RelPath)) if err := fileHelper.WriteFile(bazelFile, buildfile); err != nil { return iss, err } @@ -131,7 +132,7 @@ func fixBazelFile(ctx context.Context, fileHelper *bazelfiles.Helper, mirrorUplo return iss, nil } -func learnHashForRule(ctx context.Context, mirrorUpload mirrorUploader, rule *build.Rule, log *logger.Logger) error { +func learnHashForRule(ctx context.Context, mirrorUpload mirrorUploader, rule *build.Rule, log *slog.Logger) error { upstreamURLs, err := rules.UpstreamURLs(rule) if err != nil { return err @@ -141,12 +142,12 @@ func learnHashForRule(ctx context.Context, mirrorUpload mirrorUploader, rule *bu return err } rules.SetHash(rule, learnedHash) - log.Debugf("Learned hash for rule %s: %s", rule.Name(), learnedHash) + log.Debug(fmt.Sprintf("Learned hash for rule %s: %s", rule.Name(), learnedHash)) return nil } -func fixRule(ctx context.Context, mirrorUpload mirrorUploader, rule *build.Rule, log *logger.Logger) (changed bool, iss []error) { - log.Debugf("Fixing rule: %s", rule.Name()) +func fixRule(ctx context.Context, mirrorUpload mirrorUploader, rule *build.Rule, log *slog.Logger) (changed bool, iss []error) { + log.Debug(fmt.Sprintf("Fixing rule: %s", rule.Name())) // try to learn the hash if hash, err := rules.GetHash(rule); err != nil || hash == "" { @@ -182,14 +183,14 @@ func fixRule(ctx context.Context, mirrorUpload mirrorUploader, rule *build.Rule, } if checkErr := mirrorUpload.Check(ctx, expectedHash); checkErr != nil { - log.Infof("Artifact %s with hash %s is not yet mirrored. Uploading...", rule.Name(), expectedHash) + log.Info(fmt.Sprintf("Artifact %s with hash %s is not yet mirrored. Uploading...", rule.Name(), expectedHash)) if uploadErr := mirrorUpload.Mirror(ctx, expectedHash, rules.GetURLs(rule)); uploadErr != nil { // don't try to fix the rule if the upload failed iss = append(iss, uploadErr) return changed, iss } } else { - log.Infof("Artifact %s with hash %s was already uploaded before. Adding to rule...", rule.Name(), expectedHash) + log.Info(fmt.Sprintf("Artifact %s with hash %s was already uploaded before. Adding to rule...", rule.Name(), expectedHash)) } // now the artifact is mirrored (if it wasn't already) and we can fix the rule @@ -211,7 +212,7 @@ type fixFlags struct { region string bucket string mirrorBaseURL string - logLevel zapcore.Level + logLevel slog.Level } func parseFixFlags(cmd *cobra.Command) (fixFlags, error) { @@ -227,9 +228,9 @@ func parseFixFlags(cmd *cobra.Command) (fixFlags, error) { if err != nil { return fixFlags{}, err } - logLevel := zapcore.InfoLevel + logLevel := slog.LevelInfo if verbose { - logLevel = zapcore.DebugLevel + logLevel = slog.LevelDebug } region, err := cmd.Flags().GetString("region") if err != nil { diff --git a/hack/bazel-deps-mirror/internal/mirror/BUILD.bazel b/hack/bazel-deps-mirror/internal/mirror/BUILD.bazel index 88e4e4c9ee..5e4b25479a 100644 --- a/hack/bazel-deps-mirror/internal/mirror/BUILD.bazel +++ b/hack/bazel-deps-mirror/internal/mirror/BUILD.bazel @@ -7,7 +7,6 @@ go_library( importpath = "github.com/edgelesssys/constellation/v2/hack/bazel-deps-mirror/internal/mirror", visibility = ["//hack/bazel-deps-mirror:__subpackages__"], deps = [ - "//internal/logger", "@com_github_aws_aws_sdk_go_v2_config//:config", "@com_github_aws_aws_sdk_go_v2_feature_s3_manager//:manager", "@com_github_aws_aws_sdk_go_v2_service_s3//:s3", diff --git a/hack/bazel-deps-mirror/internal/mirror/mirror.go b/hack/bazel-deps-mirror/internal/mirror/mirror.go index a4f27c4638..1593cc2988 100644 --- a/hack/bazel-deps-mirror/internal/mirror/mirror.go +++ b/hack/bazel-deps-mirror/internal/mirror/mirror.go @@ -15,6 +15,7 @@ import ( "errors" "fmt" "io" + "log/slog" "net/http" "net/url" "path" @@ -23,7 +24,6 @@ import ( s3manager "github.com/aws/aws-sdk-go-v2/feature/s3/manager" "github.com/aws/aws-sdk-go-v2/service/s3" s3types "github.com/aws/aws-sdk-go-v2/service/s3/types" - "github.com/edgelesssys/constellation/v2/internal/logger" ) // Maintainer can upload and download files to and from a CAS mirror. @@ -39,11 +39,11 @@ type Maintainer struct { unauthenticated bool dryRun bool - log *logger.Logger + log *slog.Logger } // NewUnauthenticated creates a new Maintainer that dose not require authentication can only download files from a CAS mirror. -func NewUnauthenticated(mirrorBaseURL string, dryRun bool, log *logger.Logger) *Maintainer { +func NewUnauthenticated(mirrorBaseURL string, dryRun bool, log *slog.Logger) *Maintainer { return &Maintainer{ httpClient: http.DefaultClient, mirrorBaseURL: mirrorBaseURL, @@ -54,7 +54,7 @@ func NewUnauthenticated(mirrorBaseURL string, dryRun bool, log *logger.Logger) * } // New creates a new Maintainer that can upload and download files to and from a CAS mirror. -func New(ctx context.Context, region, bucket, mirrorBaseURL string, dryRun bool, log *logger.Logger) (*Maintainer, error) { +func New(ctx context.Context, region, bucket, mirrorBaseURL string, dryRun bool, log *slog.Logger) (*Maintainer, error) { cfg, err := awsconfig.LoadDefaultConfig(ctx, awsconfig.WithRegion(region)) if err != nil { return nil, err @@ -95,17 +95,17 @@ func (m *Maintainer) Mirror(ctx context.Context, hash string, urls []string) err } for _, url := range urls { - m.log.Debugf("Mirroring file with hash %v from %q", hash, url) + m.log.Debug(fmt.Sprintf("Mirroring file with hash %v from %q", hash, url)) body, err := m.downloadFromUpstream(ctx, url) if err != nil { - m.log.Debugf("Failed to download file from %q: %v", url, err) + m.log.Debug(fmt.Sprintf("Failed to download file from %q: %v", url, err)) continue } defer body.Close() streamedHash := sha256.New() tee := io.TeeReader(body, streamedHash) if err := m.put(ctx, hash, tee); err != nil { - m.log.Warnf("Failed to stream file from upstream %q to mirror: %v.. Trying next url.", url, err) + m.log.Warn(fmt.Sprintf("Failed to stream file from upstream %q to mirror: %v.. Trying next url.", url, err)) continue } actualHash := hex.EncodeToString(streamedHash.Sum(nil)) @@ -117,7 +117,7 @@ func (m *Maintainer) Mirror(ctx context.Context, hash string, urls []string) err if err != nil { return err } - m.log.Debugf("File uploaded successfully to mirror from %q as %q", url, pubURL) + m.log.Debug(fmt.Sprintf("File uploaded successfully to mirror from %q as %q", url, pubURL)) return nil } return fmt.Errorf("failed to download / reupload file with hash %v from any of the urls: %v", hash, urls) @@ -126,19 +126,19 @@ func (m *Maintainer) Mirror(ctx context.Context, hash string, urls []string) err // Learn downloads a file from one of the existing (non-mirror) urls, hashes it and returns the hash. func (m *Maintainer) Learn(ctx context.Context, urls []string) (string, error) { for _, url := range urls { - m.log.Debugf("Learning new hash from %q", url) + m.log.Debug(fmt.Sprintf("Learning new hash from %q", url)) body, err := m.downloadFromUpstream(ctx, url) if err != nil { - m.log.Debugf("Failed to download file from %q: %v", url, err) + m.log.Debug(fmt.Sprintf("Failed to download file from %q: %v", url, err)) continue } defer body.Close() streamedHash := sha256.New() if _, err := io.Copy(streamedHash, body); err != nil { - m.log.Debugf("Failed to stream file from %q: %v", url, err) + m.log.Debug(fmt.Sprintf("Failed to stream file from %q: %v", url, err)) } learnedHash := hex.EncodeToString(streamedHash.Sum(nil)) - m.log.Debugf("File successfully downloaded from %q with %q", url, learnedHash) + m.log.Debug(fmt.Sprintf("File successfully downloaded from %q with %q", url, learnedHash)) return learnedHash, nil } return "", fmt.Errorf("failed to download file / learn hash from any of the urls: %v", urls) @@ -146,7 +146,7 @@ func (m *Maintainer) Learn(ctx context.Context, urls []string) (string, error) { // Check checks if a file is present and has the correct hash in the CAS mirror. func (m *Maintainer) Check(ctx context.Context, expectedHash string) error { - m.log.Debugf("Checking consistency of object with hash %v", expectedHash) + m.log.Debug(fmt.Sprintf("Checking consistency of object with hash %v", expectedHash)) if m.unauthenticated { return m.checkUnauthenticated(ctx, expectedHash) } @@ -157,7 +157,7 @@ func (m *Maintainer) Check(ctx context.Context, expectedHash string) error { // It uses the authenticated CAS s3 endpoint to download the file metadata. func (m *Maintainer) checkAuthenticated(ctx context.Context, expectedHash string) error { key := path.Join(keyBase, expectedHash) - m.log.Debugf("Check: s3 getObjectAttributes {Bucket: %v, Key: %v}", m.bucket, key) + m.log.Debug(fmt.Sprintf("Check: s3 getObjectAttributes {Bucket: %v, Key: %v}", m.bucket, key)) attributes, err := m.objectStorageClient.GetObjectAttributes(ctx, &s3.GetObjectAttributesInput{ Bucket: &m.bucket, Key: &key, @@ -174,7 +174,7 @@ func (m *Maintainer) checkAuthenticated(ctx context.Context, expectedHash string // checksums are not guaranteed to be present // and if present, they are only meaningful for single part objects // fallback if checksum cannot be verified from attributes - m.log.Debugf("S3 object attributes cannot be used to verify key %v. Falling back to download.", key) + m.log.Debug(fmt.Sprintf("S3 object attributes cannot be used to verify key %v. Falling back to download.", key)) return m.checkUnauthenticated(ctx, expectedHash) } @@ -192,7 +192,7 @@ func (m *Maintainer) checkUnauthenticated(ctx context.Context, expectedHash stri if err != nil { return err } - m.log.Debugf("Check: http get {Url: %v}", pubURL) + m.log.Debug(fmt.Sprintf("Check: http get {Url: %v}", pubURL)) req, err := http.NewRequestWithContext(ctx, http.MethodGet, pubURL, http.NoBody) if err != nil { return err @@ -221,10 +221,10 @@ func (m *Maintainer) put(ctx context.Context, hash string, data io.Reader) error key := path.Join(keyBase, hash) if m.dryRun { - m.log.Debugf("DryRun: s3 put object {Bucket: %v, Key: %v}", m.bucket, key) + m.log.Debug(fmt.Sprintf("DryRun: s3 put object {Bucket: %v, Key: %v}", m.bucket, key)) return nil } - m.log.Debugf("Uploading object with hash %v to s3://%v/%v", hash, m.bucket, key) + m.log.Debug(fmt.Sprintf("Uploading object with hash %v to s3://%v/%v", hash, m.bucket, key)) _, err := m.uploadClient.Upload(ctx, &s3.PutObjectInput{ Bucket: &m.bucket, Key: &key, diff --git a/hack/bazel-deps-mirror/upgrade.go b/hack/bazel-deps-mirror/upgrade.go index 8e2af75f03..e2c07d5c26 100644 --- a/hack/bazel-deps-mirror/upgrade.go +++ b/hack/bazel-deps-mirror/upgrade.go @@ -9,6 +9,8 @@ package main import ( "context" "errors" + "fmt" + "log/slog" "github.com/bazelbuild/buildtools/build" "github.com/edgelesssys/constellation/v2/hack/bazel-deps-mirror/internal/bazelfiles" @@ -17,7 +19,6 @@ import ( "github.com/edgelesssys/constellation/v2/hack/bazel-deps-mirror/internal/rules" "github.com/edgelesssys/constellation/v2/internal/logger" "github.com/spf13/cobra" - "go.uber.org/zap/zapcore" ) func newUpgradeCmd() *cobra.Command { @@ -38,15 +39,15 @@ func runUpgrade(cmd *cobra.Command, _ []string) error { if err != nil { return err } - log := logger.New(logger.PlainLog, flags.logLevel) - log.Debugf("Parsed flags: %+v", flags) + log := logger.NewTextLogger(flags.logLevel) + log.Debug(fmt.Sprintf("Parsed flags: %+v", flags)) fileHelper, err := bazelfiles.New() if err != nil { return err } - log.Debugf("Searching for Bazel files in the current WORKSPACE and all subdirectories...") + log.Debug("Searching for Bazel files in the current WORKSPACE and all subdirectories...") bazelFiles, err := fileHelper.FindFiles() if err != nil { return err @@ -55,10 +56,10 @@ func runUpgrade(cmd *cobra.Command, _ []string) error { var mirrorUpload mirrorUploader switch { case flags.unauthenticated: - log.Warnf("Upgrading rules without authentication for AWS S3. If artifacts are not yet mirrored, this will fail.") + log.Warn("Upgrading rules without authentication for AWS S3. If artifacts are not yet mirrored, this will fail.") mirrorUpload = mirror.NewUnauthenticated(flags.mirrorBaseURL, flags.dryRun, log) default: - log.Debugf("Upgrading rules with authentication for AWS S3.") + log.Debug("Upgrading rules with authentication for AWS S3.") mirrorUpload, err = mirror.New(cmd.Context(), flags.region, flags.bucket, flags.mirrorBaseURL, flags.dryRun, log) if err != nil { return err @@ -76,29 +77,29 @@ func runUpgrade(cmd *cobra.Command, _ []string) error { } } if len(issues) > 0 { - log.Warnf("Found %d issues in rules", len(issues)) + log.Warn(fmt.Sprintf("Found %d issues in rules", len(issues))) issues.Report(cmd.OutOrStdout()) return errors.New("found issues in rules") } - log.Infof("No issues found") + log.Info("No issues found") return nil } -func upgradeBazelFile(ctx context.Context, fileHelper *bazelfiles.Helper, mirrorUpload mirrorUploader, bazelFile bazelfiles.BazelFile, dryRun bool, log *logger.Logger) (iss issues.ByFile, err error) { +func upgradeBazelFile(ctx context.Context, fileHelper *bazelfiles.Helper, mirrorUpload mirrorUploader, bazelFile bazelfiles.BazelFile, dryRun bool, log *slog.Logger) (iss issues.ByFile, err error) { iss = issues.NewByFile() var changed bool // true if any rule in this file was changed - log.Infof("Checking file: %s", bazelFile.RelPath) + log.Info(fmt.Sprintf("Checking file: %s", bazelFile.RelPath)) buildfile, err := fileHelper.LoadFile(bazelFile) if err != nil { return iss, err } found := rules.Rules(buildfile, rules.SupportedRules) if len(found) == 0 { - log.Debugf("No rules found in file: %s", bazelFile.RelPath) + log.Debug(fmt.Sprintf("No rules found in file: %s", bazelFile.RelPath)) return iss, nil } - log.Debugf("Found %d rules in file: %s", len(found), bazelFile.RelPath) + log.Debug(fmt.Sprintf("Found %d rules in file: %s", len(found), bazelFile.RelPath)) for _, rule := range found { changedRule, ruleIssues := upgradeRule(ctx, mirrorUpload, rule, log) if len(ruleIssues) > 0 { @@ -108,11 +109,11 @@ func upgradeBazelFile(ctx context.Context, fileHelper *bazelfiles.Helper, mirror } if len(iss) > 0 { - log.Warnf("File %s has issues. Not saving!", bazelFile.RelPath) + log.Warn(fmt.Sprintf("File %s has issues. Not saving!", bazelFile.RelPath)) return iss, nil } if !changed { - log.Debugf("No changes to file: %s", bazelFile.RelPath) + log.Debug(fmt.Sprintf("No changes to file: %s", bazelFile.RelPath)) return iss, nil } if dryRun { @@ -120,10 +121,10 @@ func upgradeBazelFile(ctx context.Context, fileHelper *bazelfiles.Helper, mirror if err != nil { return iss, err } - log.Infof("Dry run: would save updated file %s with diff:\n%s", bazelFile.RelPath, diff) + log.Info(fmt.Sprintf("Dry run: would save updated file %s with diff:\n%s", bazelFile.RelPath, diff)) return iss, nil } - log.Infof("Saving updated file: %s", bazelFile.RelPath) + log.Info(fmt.Sprintf("Saving updated file: %s", bazelFile.RelPath)) if err := fileHelper.WriteFile(bazelFile, buildfile); err != nil { return iss, err } @@ -131,12 +132,12 @@ func upgradeBazelFile(ctx context.Context, fileHelper *bazelfiles.Helper, mirror return iss, nil } -func upgradeRule(ctx context.Context, mirrorUpload mirrorUploader, rule *build.Rule, log *logger.Logger) (changed bool, iss []error) { - log.Debugf("Upgrading rule: %s", rule.Name()) +func upgradeRule(ctx context.Context, mirrorUpload mirrorUploader, rule *build.Rule, log *slog.Logger) (changed bool, iss []error) { + log.Debug(fmt.Sprintf("Upgrading rule: %s", rule.Name())) upstreamURLs, err := rules.UpstreamURLs(rule) if errors.Is(err, rules.ErrNoUpstreamURL) { - log.Debugf("Rule has no upstream URL. Skipping.") + log.Debug("Rule has no upstream URL. Skipping.") return false, nil } else if err != nil { iss = append(iss, err) @@ -152,7 +153,7 @@ func upgradeRule(ctx context.Context, mirrorUpload mirrorUploader, rule *build.R existingHash, err := rules.GetHash(rule) if err == nil && learnedHash == existingHash { - log.Debugf("Rule already upgraded. Skipping.") + log.Debug("Rule already upgraded. Skipping.") return false, nil } @@ -177,7 +178,7 @@ type upgradeFlags struct { region string bucket string mirrorBaseURL string - logLevel zapcore.Level + logLevel slog.Level } func parseUpgradeFlags(cmd *cobra.Command) (upgradeFlags, error) { @@ -193,9 +194,9 @@ func parseUpgradeFlags(cmd *cobra.Command) (upgradeFlags, error) { if err != nil { return upgradeFlags{}, err } - logLevel := zapcore.InfoLevel + logLevel := slog.LevelInfo if verbose { - logLevel = zapcore.DebugLevel + logLevel = slog.LevelDebug } region, err := cmd.Flags().GetString("region") if err != nil { diff --git a/hack/cli-k8s-compatibility/BUILD.bazel b/hack/cli-k8s-compatibility/BUILD.bazel index d31d56b7de..4c94d507ef 100644 --- a/hack/cli-k8s-compatibility/BUILD.bazel +++ b/hack/cli-k8s-compatibility/BUILD.bazel @@ -10,7 +10,6 @@ go_library( "//internal/constants", "//internal/logger", "//internal/versions", - "@org_uber_go_zap//zapcore", ], ) diff --git a/hack/cli-k8s-compatibility/main.go b/hack/cli-k8s-compatibility/main.go index 616c6fd984..5361fcdf4f 100644 --- a/hack/cli-k8s-compatibility/main.go +++ b/hack/cli-k8s-compatibility/main.go @@ -10,12 +10,14 @@ package main import ( "context" "flag" + "fmt" + "log/slog" + "os" "github.com/edgelesssys/constellation/v2/internal/api/versionsapi" "github.com/edgelesssys/constellation/v2/internal/constants" "github.com/edgelesssys/constellation/v2/internal/logger" "github.com/edgelesssys/constellation/v2/internal/versions" - "go.uber.org/zap/zapcore" ) var ( @@ -25,18 +27,21 @@ var ( ) func main() { - log := logger.New(logger.PlainLog, zapcore.DebugLevel) + log := logger.NewTextLogger(slog.LevelDebug) ctx := context.Background() flag.Parse() if *refFlag == "" { - log.Fatalf("ref must be set") + log.Error("ref must be set") + os.Exit(1) } if *streamFlag == "" { - log.Fatalf("stream must be set") + log.Error("stream must be set") + os.Exit(1) } if *versionFlag == "" { - log.Fatalf("version must be set") + log.Error("version must be set") + os.Exit(1) } cliInfo := versionsapi.CLIInfo{ @@ -52,15 +57,18 @@ func main() { c, cclose, err := versionsapi.NewClient(ctx, "eu-central-1", "cdn-constellation-backend", constants.CDNDefaultDistributionID, false, log) if err != nil { - log.Fatalf("creating s3 client: %w", err) + log.Error(fmt.Sprintf("creating s3 client: %s", err)) + os.Exit(1) } defer func() { if err := cclose(ctx); err != nil { - log.Fatalf("invalidating cache: %w", err) + log.Error(fmt.Sprintf("invalidating cache: %s", err)) + os.Exit(1) } }() if err := c.UpdateCLIInfo(ctx, cliInfo); err != nil { - log.Fatalf("updating cli info: %w", err) + log.Error(fmt.Sprintf("updating cli info: %s", err)) + os.Exit(1) } } diff --git a/hack/oci-pin/BUILD.bazel b/hack/oci-pin/BUILD.bazel index 9a3603791d..15217ba6fa 100644 --- a/hack/oci-pin/BUILD.bazel +++ b/hack/oci-pin/BUILD.bazel @@ -16,7 +16,6 @@ go_library( "//hack/oci-pin/internal/sums", "//internal/logger", "@com_github_spf13_cobra//:cobra", - "@org_uber_go_zap//zapcore", ], ) diff --git a/hack/oci-pin/codegen.go b/hack/oci-pin/codegen.go index 4c8f9fafc7..910056ed06 100644 --- a/hack/oci-pin/codegen.go +++ b/hack/oci-pin/codegen.go @@ -8,6 +8,7 @@ package main import ( "fmt" "io" + "log/slog" "os" "path/filepath" "strings" @@ -16,7 +17,6 @@ import ( "github.com/edgelesssys/constellation/v2/hack/oci-pin/internal/inject" "github.com/edgelesssys/constellation/v2/internal/logger" "github.com/spf13/cobra" - "go.uber.org/zap/zapcore" ) func newCodegenCmd() *cobra.Command { @@ -44,15 +44,15 @@ func runCodegen(cmd *cobra.Command, _ []string) error { if err != nil { return err } - log := logger.New(logger.PlainLog, flags.logLevel) - log.Debugf("Parsed flags: %+v", flags) + log := logger.NewTextLogger(flags.logLevel) + log.Debug(fmt.Sprintf("Parsed flags: %+v", flags)) registry, prefix, name, tag, err := splitRepoTag(flags.imageRepoTag) if err != nil { return fmt.Errorf("splitting OCI image reference %q: %w", flags.imageRepoTag, err) } - log.Debugf("Generating Go code for OCI image %s.", name) + log.Debug(fmt.Sprintf("Generating Go code for OCI image %s.", name)) ociIndexPath := filepath.Join(flags.ociPath, "index.json") index, err := os.Open(ociIndexPath) @@ -78,7 +78,7 @@ func runCodegen(cmd *cobra.Command, _ []string) error { return err } - log.Debugf("OCI image digest: %s", digest) + log.Debug(fmt.Sprintf("OCI image digest: %s", digest)) if err := inject.Render(out, inject.PinningValues{ Package: flags.pkg, @@ -92,7 +92,7 @@ func runCodegen(cmd *cobra.Command, _ []string) error { return fmt.Errorf("rendering Go code: %w", err) } - log.Debugf("Go code created at %q 🤖", flags.output) + log.Debug(fmt.Sprintf("Go code created at %q 🤖", flags.output)) return nil } @@ -102,7 +102,7 @@ type codegenFlags struct { pkg string identifier string imageRepoTag string - logLevel zapcore.Level + logLevel slog.Level } func parseCodegenFlags(cmd *cobra.Command) (codegenFlags, error) { @@ -137,9 +137,9 @@ func parseCodegenFlags(cmd *cobra.Command) (codegenFlags, error) { if err != nil { return codegenFlags{}, err } - logLevel := zapcore.InfoLevel + logLevel := slog.LevelInfo if verbose { - logLevel = zapcore.DebugLevel + logLevel = slog.LevelDebug } return codegenFlags{ diff --git a/hack/oci-pin/merge.go b/hack/oci-pin/merge.go index 081ea6a6f2..94bafd52bf 100644 --- a/hack/oci-pin/merge.go +++ b/hack/oci-pin/merge.go @@ -8,12 +8,12 @@ package main import ( "fmt" "io" + "log/slog" "os" "github.com/edgelesssys/constellation/v2/hack/oci-pin/internal/sums" "github.com/edgelesssys/constellation/v2/internal/logger" "github.com/spf13/cobra" - "go.uber.org/zap/zapcore" ) func newMergeCmd() *cobra.Command { @@ -35,10 +35,10 @@ func runMerge(cmd *cobra.Command, _ []string) error { if err != nil { return err } - log := logger.New(logger.PlainLog, flags.logLevel) - log.Debugf("Parsed flags: %+v", flags) + log := logger.NewTextLogger(flags.logLevel) + log.Debug(fmt.Sprintf("Parsed flags: %+v", flags)) - log.Debugf("Merging sum file from %q into %q.", flags.inputs, flags.output) + log.Debug(fmt.Sprintf("Merging sum file from %q into %q.", flags.inputs, flags.output)) var out io.Writer if flags.output == "-" { @@ -61,7 +61,7 @@ func runMerge(cmd *cobra.Command, _ []string) error { return fmt.Errorf("creating merged sum file: %w", err) } - log.Debugf("Sum file created at %q 🤖", flags.output) + log.Debug(fmt.Sprintf("Sum file created at %q 🤖", flags.output)) return nil } @@ -93,7 +93,7 @@ func parseInput(input string) ([]sums.PinnedImageReference, error) { type mergeFlags struct { inputs []string output string - logLevel zapcore.Level + logLevel slog.Level } func parseMergeFlags(cmd *cobra.Command) (mergeFlags, error) { @@ -109,9 +109,9 @@ func parseMergeFlags(cmd *cobra.Command) (mergeFlags, error) { if err != nil { return mergeFlags{}, err } - logLevel := zapcore.InfoLevel + logLevel := slog.LevelInfo if verbose { - logLevel = zapcore.DebugLevel + logLevel = slog.LevelDebug } return mergeFlags{ diff --git a/hack/oci-pin/sum.go b/hack/oci-pin/sum.go index 2268ae3587..d6be5154ae 100644 --- a/hack/oci-pin/sum.go +++ b/hack/oci-pin/sum.go @@ -8,6 +8,7 @@ package main import ( "fmt" "io" + "log/slog" "os" "path/filepath" "strings" @@ -16,7 +17,6 @@ import ( "github.com/edgelesssys/constellation/v2/hack/oci-pin/internal/sums" "github.com/edgelesssys/constellation/v2/internal/logger" "github.com/spf13/cobra" - "go.uber.org/zap/zapcore" ) func newSumCmd() *cobra.Command { @@ -41,15 +41,15 @@ func runSum(cmd *cobra.Command, _ []string) error { if err != nil { return err } - log := logger.New(logger.PlainLog, flags.logLevel) - log.Debugf("Parsed flags: %+v", flags) + log := logger.NewTextLogger(flags.logLevel) + log.Debug(fmt.Sprintf("Parsed flags: %+v", flags)) registry, prefix, name, tag, err := splitRepoTag(flags.imageRepoTag) if err != nil { return fmt.Errorf("splitting repo tag: %w", err) } - log.Debugf("Generating sum file for OCI image %s.", name) + log.Debug(fmt.Sprintf("Generating sum file for OCI image %s.", name)) ociIndexPath := filepath.Join(flags.ociPath, "index.json") index, err := os.Open(ociIndexPath) @@ -75,7 +75,7 @@ func runSum(cmd *cobra.Command, _ []string) error { return fmt.Errorf("extracting OCI image digest: %w", err) } - log.Debugf("OCI image digest: %s", digest) + log.Debug(fmt.Sprintf("OCI image digest: %s", digest)) refs := []sums.PinnedImageReference{ { @@ -91,7 +91,7 @@ func runSum(cmd *cobra.Command, _ []string) error { return fmt.Errorf("creating sum file: %w", err) } - log.Debugf("Sum file created at %q 🤖", flags.output) + log.Debug(fmt.Sprintf("Sum file created at %q 🤖", flags.output)) return nil } @@ -99,7 +99,7 @@ type sumFlags struct { ociPath string output string imageRepoTag string - logLevel zapcore.Level + logLevel slog.Level } func parseSumFlags(cmd *cobra.Command) (sumFlags, error) { @@ -126,9 +126,9 @@ func parseSumFlags(cmd *cobra.Command) (sumFlags, error) { if err != nil { return sumFlags{}, err } - logLevel := zapcore.InfoLevel + logLevel := slog.LevelInfo if verbose { - logLevel = zapcore.DebugLevel + logLevel = slog.LevelDebug } return sumFlags{ diff --git a/hack/qemu-metadata-api/BUILD.bazel b/hack/qemu-metadata-api/BUILD.bazel index 60e4f80ab4..67811771cf 100644 --- a/hack/qemu-metadata-api/BUILD.bazel +++ b/hack/qemu-metadata-api/BUILD.bazel @@ -17,8 +17,6 @@ go_library( "//hack/qemu-metadata-api/virtwrapper", "//internal/logger", "@org_libvirt_go_libvirt//:libvirt", - "@org_uber_go_zap//:zap", - "@org_uber_go_zap//zapcore", ], ) diff --git a/hack/qemu-metadata-api/main.go b/hack/qemu-metadata-api/main.go index bec42e2c7c..dd5be683c7 100644 --- a/hack/qemu-metadata-api/main.go +++ b/hack/qemu-metadata-api/main.go @@ -10,12 +10,12 @@ package main import ( "flag" + "log/slog" + "os" "github.com/edgelesssys/constellation/v2/hack/qemu-metadata-api/server" "github.com/edgelesssys/constellation/v2/hack/qemu-metadata-api/virtwrapper" "github.com/edgelesssys/constellation/v2/internal/logger" - "go.uber.org/zap" - "go.uber.org/zap/zapcore" "libvirt.org/go/libvirt" ) @@ -26,16 +26,18 @@ func main() { initSecretHash := flag.String("initsecrethash", "", "brcypt hash of the init secret") flag.Parse() - log := logger.New(logger.JSONLog, zapcore.InfoLevel) + log := logger.NewJSONLogger(slog.LevelInfo) conn, err := libvirt.NewConnect(*libvirtURI) if err != nil { - log.With(zap.Error(err)).Fatalf("Failed to connect to libvirt") + log.With(slog.Any("error", err)).Error("Failed to connect to libvirt") + os.Exit(1) } defer conn.Close() serv := server.New(log, *targetNetwork, *initSecretHash, &virtwrapper.Connect{Conn: conn}) if err := serv.ListenAndServe(*bindPort); err != nil { - log.With(zap.Error(err)).Fatalf("Failed to serve") + log.With(slog.Any("error", err)).Error("Failed to serve") + os.Exit(1) } } diff --git a/hack/qemu-metadata-api/server/BUILD.bazel b/hack/qemu-metadata-api/server/BUILD.bazel index 9fe06769b9..5def36c034 100644 --- a/hack/qemu-metadata-api/server/BUILD.bazel +++ b/hack/qemu-metadata-api/server/BUILD.bazel @@ -12,9 +12,7 @@ go_library( deps = [ "//hack/qemu-metadata-api/virtwrapper", "//internal/cloud/metadata", - "//internal/logger", "//internal/role", - "@org_uber_go_zap//:zap", ], ) diff --git a/hack/qemu-metadata-api/server/server.go b/hack/qemu-metadata-api/server/server.go index 93d6f3a044..4f0cad9e97 100644 --- a/hack/qemu-metadata-api/server/server.go +++ b/hack/qemu-metadata-api/server/server.go @@ -9,27 +9,26 @@ package server import ( "encoding/json" "fmt" + "log/slog" "net" "net/http" "strings" "github.com/edgelesssys/constellation/v2/hack/qemu-metadata-api/virtwrapper" "github.com/edgelesssys/constellation/v2/internal/cloud/metadata" - "github.com/edgelesssys/constellation/v2/internal/logger" "github.com/edgelesssys/constellation/v2/internal/role" - "go.uber.org/zap" ) // Server that provides QEMU metadata. type Server struct { - log *logger.Logger + log *slog.Logger virt virConnect network string initSecretHashVal []byte } // New creates a new Server. -func New(log *logger.Logger, network, initSecretHash string, conn virConnect) *Server { +func New(log *slog.Logger, network, initSecretHash string, conn virConnect) *Server { return &Server{ log: log, virt: conn, @@ -55,25 +54,25 @@ func (s *Server) ListenAndServe(port string) error { return err } - s.log.Infof("Starting QEMU metadata API on %s", lis.Addr()) + s.log.Info(fmt.Sprintf("Starting QEMU metadata API on %s", lis.Addr())) return server.Serve(lis) } // listSelf returns peer information about the instance issuing the request. func (s *Server) listSelf(w http.ResponseWriter, r *http.Request) { - log := s.log.With(zap.String("peer", r.RemoteAddr)) - log.Infof("Serving GET request for /self") + log := s.log.With(slog.String("peer", r.RemoteAddr)) + log.Info("Serving GET request for /self") remoteIP, _, err := net.SplitHostPort(r.RemoteAddr) if err != nil { - log.With(zap.Error(err)).Errorf("Failed to parse remote address") + log.With(slog.Any("error", err)).Error("Failed to parse remote address") http.Error(w, fmt.Sprintf("Failed to parse remote address: %s\n", err), http.StatusInternalServerError) return } peers, err := s.listAll() if err != nil { - log.With(zap.Error(err)).Errorf("Failed to list peer metadata") + log.With(slog.Any("error", err)).Error("Failed to list peer metadata") http.Error(w, err.Error(), http.StatusInternalServerError) return } @@ -85,23 +84,23 @@ func (s *Server) listSelf(w http.ResponseWriter, r *http.Request) { http.Error(w, err.Error(), http.StatusInternalServerError) return } - log.Infof("Request successful") + log.Info("Request successful") return } } - log.Errorf("Failed to find peer in active leases") + log.Error("Failed to find peer in active leases") http.Error(w, "No matching peer found", http.StatusNotFound) } // listPeers returns a list of all active peers. func (s *Server) listPeers(w http.ResponseWriter, r *http.Request) { - log := s.log.With(zap.String("peer", r.RemoteAddr)) - log.Infof("Serving GET request for /peers") + log := s.log.With(slog.String("peer", r.RemoteAddr)) + log.Info("Serving GET request for /peers") peers, err := s.listAll() if err != nil { - log.With(zap.Error(err)).Errorf("Failed to list peer metadata") + log.With(slog.Any("error", err)).Error("Failed to list peer metadata") http.Error(w, err.Error(), http.StatusInternalServerError) return } @@ -111,38 +110,38 @@ func (s *Server) listPeers(w http.ResponseWriter, r *http.Request) { http.Error(w, err.Error(), http.StatusInternalServerError) return } - log.Infof("Request successful") + log.Info("Request successful") } // initSecretHash returns the hash of the init secret. func (s *Server) initSecretHash(w http.ResponseWriter, r *http.Request) { - log := s.log.With(zap.String("initSecretHash", r.RemoteAddr)) + log := s.log.With(slog.String("initSecretHash", r.RemoteAddr)) if r.Method != http.MethodGet { - log.With(zap.String("method", r.Method)).Errorf("Invalid method for /initSecretHash") + log.With(slog.String("method", r.Method)).Error("Invalid method for /initSecretHash") http.Error(w, "Method not allowed", http.StatusMethodNotAllowed) return } - log.Infof("Serving GET request for /initsecrethash") + log.Info("Serving GET request for /initsecrethash") w.Header().Set("Content-Type", "text/plain") _, err := w.Write(s.initSecretHashVal) if err != nil { - log.With(zap.Error(err)).Errorf("Failed to write init secret hash") + log.With(slog.Any("error", err)).Error("Failed to write init secret hash") http.Error(w, err.Error(), http.StatusInternalServerError) return } - log.Infof("Request successful") + log.Info("Request successful") } // getEndpoint returns the IP address of the first control-plane instance. // This allows us to fake a load balancer for QEMU instances. func (s *Server) getEndpoint(w http.ResponseWriter, r *http.Request) { - log := s.log.With(zap.String("peer", r.RemoteAddr)) - log.Infof("Serving GET request for /endpoint") + log := s.log.With(slog.String("peer", r.RemoteAddr)) + log.Info("Serving GET request for /endpoint") net, err := s.virt.LookupNetworkByName(s.network) if err != nil { - log.With(zap.Error(err)).Errorf("Failed to lookup network") + log.With(slog.Any("error", err)).Error("Failed to lookup network") http.Error(w, err.Error(), http.StatusInternalServerError) return } @@ -150,7 +149,7 @@ func (s *Server) getEndpoint(w http.ResponseWriter, r *http.Request) { leases, err := net.GetDHCPLeases() if err != nil { - log.With(zap.Error(err)).Errorf("Failed to get DHCP leases") + log.With(slog.Any("error", err)).Error("Failed to get DHCP leases") http.Error(w, err.Error(), http.StatusInternalServerError) } @@ -162,12 +161,12 @@ func (s *Server) getEndpoint(w http.ResponseWriter, r *http.Request) { http.Error(w, err.Error(), http.StatusInternalServerError) return } - log.Infof("Request successful") + log.Info("Request successful") return } } - log.Errorf("Failed to find control-plane peer in active leases") + log.Error("Failed to find control-plane peer in active leases") http.Error(w, "No matching peer found", http.StatusNotFound) } diff --git a/image/upload/internal/cmd/BUILD.bazel b/image/upload/internal/cmd/BUILD.bazel index 1bff3aa8e6..ac78c4c2ef 100644 --- a/image/upload/internal/cmd/BUILD.bazel +++ b/image/upload/internal/cmd/BUILD.bazel @@ -29,6 +29,5 @@ go_library( "//internal/osimage/nop", "//internal/osimage/uplosi", "@com_github_spf13_cobra//:cobra", - "@org_uber_go_zap//zapcore", ], ) diff --git a/image/upload/internal/cmd/flags.go b/image/upload/internal/cmd/flags.go index 24bbaf455c..26b159b44a 100644 --- a/image/upload/internal/cmd/flags.go +++ b/image/upload/internal/cmd/flags.go @@ -8,20 +8,20 @@ package cmd import ( "errors" + "log/slog" "path/filepath" "strings" "github.com/edgelesssys/constellation/v2/internal/api/versionsapi" "github.com/edgelesssys/constellation/v2/internal/cloud/cloudprovider" "github.com/spf13/cobra" - "go.uber.org/zap/zapcore" ) type s3Flags struct { region string bucket string distributionID string - logLevel zapcore.Level + logLevel slog.Level } func parseS3Flags(cmd *cobra.Command) (s3Flags, error) { @@ -41,9 +41,9 @@ func parseS3Flags(cmd *cobra.Command) (s3Flags, error) { if err != nil { return s3Flags{}, err } - logLevel := zapcore.InfoLevel + logLevel := slog.LevelInfo if verbose { - logLevel = zapcore.DebugLevel + logLevel = slog.LevelDebug } return s3Flags{ @@ -84,7 +84,7 @@ func parseUploadMeasurementsFlags(cmd *cobra.Command) (measurementsFlags, error) type mergeMeasurementsFlags struct { out string - logLevel zapcore.Level + logLevel slog.Level } func parseMergeMeasurementsFlags(cmd *cobra.Command) (mergeMeasurementsFlags, error) { @@ -96,9 +96,9 @@ func parseMergeMeasurementsFlags(cmd *cobra.Command) (mergeMeasurementsFlags, er if err != nil { return mergeMeasurementsFlags{}, err } - logLevel := zapcore.InfoLevel + logLevel := slog.LevelInfo if verbose { - logLevel = zapcore.DebugLevel + logLevel = slog.LevelDebug } return mergeMeasurementsFlags{ @@ -112,7 +112,7 @@ type envelopeMeasurementsFlags struct { csp cloudprovider.Provider attestationVariant string in, out string - logLevel zapcore.Level + logLevel slog.Level } func parseEnvelopeMeasurementsFlags(cmd *cobra.Command) (envelopeMeasurementsFlags, error) { @@ -148,9 +148,9 @@ func parseEnvelopeMeasurementsFlags(cmd *cobra.Command) (envelopeMeasurementsFla if err != nil { return envelopeMeasurementsFlags{}, err } - logLevel := zapcore.InfoLevel + logLevel := slog.LevelInfo if verbose { - logLevel = zapcore.DebugLevel + logLevel = slog.LevelDebug } return envelopeMeasurementsFlags{ @@ -176,7 +176,7 @@ type uplosiFlags struct { bucket string distributionID string - logLevel zapcore.Level + logLevel slog.Level } func parseUplosiFlags(cmd *cobra.Command) (uplosiFlags, error) { @@ -271,9 +271,9 @@ func parseUplosiFlags(cmd *cobra.Command) (uplosiFlags, error) { if err != nil { return uplosiFlags{}, err } - logLevel := zapcore.InfoLevel + logLevel := slog.LevelInfo if verbose { - logLevel = zapcore.DebugLevel + logLevel = slog.LevelDebug } return uplosiFlags{ diff --git a/image/upload/internal/cmd/info.go b/image/upload/internal/cmd/info.go index c837c6a03e..cd629600ef 100644 --- a/image/upload/internal/cmd/info.go +++ b/image/upload/internal/cmd/info.go @@ -49,8 +49,8 @@ func runInfo(cmd *cobra.Command, args []string) error { return err } - log := logger.New(logger.PlainLog, flags.logLevel) - log.Debugf("Parsed flags: %+v", flags) + log := logger.NewTextLogger(flags.logLevel) + log.Debug(fmt.Sprintf("Parsed flags: %+v", flags)) info, err := readInfoArgs(args) if err != nil { return err @@ -62,7 +62,7 @@ func runInfo(cmd *cobra.Command, args []string) error { } defer func() { if err := uploadCClose(cmd.Context()); err != nil { - log.Errorf("closing upload client: %v", err) + log.Error(fmt.Sprintf("closing upload client: %v", err)) } }() @@ -70,7 +70,7 @@ func runInfo(cmd *cobra.Command, args []string) error { if err != nil { return fmt.Errorf("uploading image info: %w", err) } - log.Infof("Uploaded image info to %s", url) + log.Info(fmt.Sprintf("Uploaded image info to %s", url)) return nil } diff --git a/image/upload/internal/cmd/measurementsenvelope.go b/image/upload/internal/cmd/measurementsenvelope.go index e7480d1ca4..878dcaf718 100644 --- a/image/upload/internal/cmd/measurementsenvelope.go +++ b/image/upload/internal/cmd/measurementsenvelope.go @@ -53,8 +53,8 @@ func runEnvelopeMeasurements(cmd *cobra.Command, _ []string) error { return err } - log := logger.New(logger.PlainLog, flags.logLevel) - log.Debugf("Parsed flags: %+v", flags) + log := logger.NewTextLogger(flags.logLevel) + log.Debug(fmt.Sprintf("Parsed flags: %+v", flags)) f, err := os.Open(flags.in) if err != nil { @@ -97,7 +97,7 @@ func runEnvelopeMeasurements(cmd *cobra.Command, _ []string) error { if err := json.NewEncoder(out).Encode(enveloped); err != nil { return fmt.Errorf("enveloping measurements: writing output file: %w", err) } - log.Infof("Enveloped image measurements") + log.Info("Enveloped image measurements") return nil } diff --git a/image/upload/internal/cmd/measurementsmerge.go b/image/upload/internal/cmd/measurementsmerge.go index 758f54e5d8..53ec2de2ca 100644 --- a/image/upload/internal/cmd/measurementsmerge.go +++ b/image/upload/internal/cmd/measurementsmerge.go @@ -44,8 +44,8 @@ func runMergeMeasurements(cmd *cobra.Command, args []string) error { return err } - log := logger.New(logger.PlainLog, flags.logLevel) - log.Debugf("Parsed flags: %+v", flags) + log := logger.NewTextLogger(flags.logLevel) + log.Debug(fmt.Sprintf("Parsed flags: %+v", flags)) mergedMeasurements, err := readMeasurementsArgs(args) if err != nil { @@ -65,7 +65,7 @@ func runMergeMeasurements(cmd *cobra.Command, args []string) error { if err := json.NewEncoder(out).Encode(mergedMeasurements); err != nil { return fmt.Errorf("merging measurements: writing output file: %w", err) } - log.Infof("Merged image measurements") + log.Info("Merged image measurements") return nil } diff --git a/image/upload/internal/cmd/measurementsupload.go b/image/upload/internal/cmd/measurementsupload.go index e266fda3f5..850883c630 100644 --- a/image/upload/internal/cmd/measurementsupload.go +++ b/image/upload/internal/cmd/measurementsupload.go @@ -52,8 +52,8 @@ func runMeasurementsUpload(cmd *cobra.Command, _ []string) error { return err } - log := logger.New(logger.PlainLog, flags.logLevel) - log.Debugf("Parsed flags: %+v", flags) + log := logger.NewTextLogger(flags.logLevel) + log.Debug(fmt.Sprintf("Parsed flags: %+v", flags)) uploadC, uploadCClose, err := measurementsuploader.New(cmd.Context(), flags.region, flags.bucket, flags.distributionID, log) if err != nil { @@ -61,7 +61,7 @@ func runMeasurementsUpload(cmd *cobra.Command, _ []string) error { } defer func() { if err := uploadCClose(cmd.Context()); err != nil { - log.Errorf("closing upload client: %v", err) + log.Error("closing upload client: %v", err) } }() @@ -80,6 +80,6 @@ func runMeasurementsUpload(cmd *cobra.Command, _ []string) error { if err != nil { return fmt.Errorf("uploading image info: %w", err) } - log.Infof("Uploaded image measurements to %s (and signature to %s)", measurementsURL, signatureURL) + log.Info(fmt.Sprintf("Uploaded image measurements to %s (and signature to %s)", measurementsURL, signatureURL)) return nil } diff --git a/image/upload/internal/cmd/uplosi.go b/image/upload/internal/cmd/uplosi.go index 6b5f6c0a54..6569074768 100644 --- a/image/upload/internal/cmd/uplosi.go +++ b/image/upload/internal/cmd/uplosi.go @@ -58,8 +58,8 @@ func runUplosi(cmd *cobra.Command, _ []string) error { if err != nil { return err } - log := logger.New(logger.PlainLog, flags.logLevel) - log.Debugf("Parsed flags: %+v", flags) + log := logger.NewTextLogger(flags.logLevel) + log.Debug(fmt.Sprintf("Parsed flags: %+v", flags)) archiveC, archiveCClose, err := archive.New(cmd.Context(), flags.region, flags.bucket, flags.distributionID, log) if err != nil { @@ -67,7 +67,7 @@ func runUplosi(cmd *cobra.Command, _ []string) error { } defer func() { if err := archiveCClose(cmd.Context()); err != nil { - log.Errorf("closing archive client: %v", err) + log.Error(fmt.Sprintf("closing archive client: %v", err)) } }() diff --git a/internal/api/attestationconfigapi/BUILD.bazel b/internal/api/attestationconfigapi/BUILD.bazel index 037f482c94..5db942eb3e 100644 --- a/internal/api/attestationconfigapi/BUILD.bazel +++ b/internal/api/attestationconfigapi/BUILD.bazel @@ -17,7 +17,6 @@ go_library( "//internal/api/fetcher", "//internal/attestation/variant", "//internal/constants", - "//internal/logger", "//internal/sigstore", "//internal/staticupload", "@com_github_aws_aws_sdk_go//aws", diff --git a/internal/api/attestationconfigapi/cli/BUILD.bazel b/internal/api/attestationconfigapi/cli/BUILD.bazel index c235cfd7cd..9825418299 100644 --- a/internal/api/attestationconfigapi/cli/BUILD.bazel +++ b/internal/api/attestationconfigapi/cli/BUILD.bazel @@ -33,7 +33,6 @@ go_library( "@com_github_aws_aws_sdk_go_v2_service_s3//types", "@com_github_spf13_afero//:afero", "@com_github_spf13_cobra//:cobra", - "@org_uber_go_zap//:zap", ], ) diff --git a/internal/api/attestationconfigapi/cli/delete.go b/internal/api/attestationconfigapi/cli/delete.go index faedabd118..d0b0f447f6 100644 --- a/internal/api/attestationconfigapi/cli/delete.go +++ b/internal/api/attestationconfigapi/cli/delete.go @@ -8,6 +8,7 @@ package main import ( "errors" "fmt" + "log/slog" "path" "github.com/edgelesssys/constellation/v2/internal/api/attestationconfigapi" @@ -16,7 +17,6 @@ import ( "github.com/edgelesssys/constellation/v2/internal/logger" "github.com/edgelesssys/constellation/v2/internal/staticupload" "github.com/spf13/cobra" - "go.uber.org/zap" ) // newDeleteCmd creates the delete command. @@ -46,7 +46,7 @@ func newDeleteCmd() *cobra.Command { } func runDelete(cmd *cobra.Command, args []string) (retErr error) { - log := logger.New(logger.PlainLog, zap.DebugLevel).Named("attestationconfigapi") + log := logger.NewTextLogger(slog.LevelDebug).WithGroup("attestationconfigapi") deleteCfg, err := newDeleteConfig(cmd, ([3]string)(args[:3])) if err != nil { @@ -89,7 +89,7 @@ func runRecursiveDelete(cmd *cobra.Command, args []string) (retErr error) { return fmt.Errorf("creating delete config: %w", err) } - log := logger.New(logger.PlainLog, zap.DebugLevel).Named("attestationconfigapi") + log := logger.NewTextLogger(slog.LevelDebug).WithGroup("attestationconfigapi") client, closeFn, err := staticupload.New(cmd.Context(), staticupload.Config{ Bucket: deleteCfg.bucket, Region: deleteCfg.region, diff --git a/internal/api/attestationconfigapi/cli/upload.go b/internal/api/attestationconfigapi/cli/upload.go index 831f99da7d..54036009ab 100644 --- a/internal/api/attestationconfigapi/cli/upload.go +++ b/internal/api/attestationconfigapi/cli/upload.go @@ -9,6 +9,7 @@ import ( "context" "errors" "fmt" + "log/slog" "os" "time" @@ -21,7 +22,6 @@ import ( "github.com/edgelesssys/constellation/v2/internal/verify" "github.com/spf13/afero" "github.com/spf13/cobra" - "go.uber.org/zap" ) func newUploadCmd() *cobra.Command { @@ -61,7 +61,7 @@ func envCheck(_ *cobra.Command, _ []string) error { func runUpload(cmd *cobra.Command, args []string) (retErr error) { ctx := cmd.Context() - log := logger.New(logger.PlainLog, zap.DebugLevel).Named("attestationconfigapi") + log := logger.NewTextLogger(slog.LevelDebug).WithGroup("attestationconfigapi") uploadCfg, err := newConfig(cmd, ([3]string)(args[:3])) if err != nil { @@ -110,25 +110,25 @@ func uploadReport(ctx context.Context, client *attestationconfigapi.Client, cfg uploadConfig, fs file.Handler, - log *logger.Logger, + log *slog.Logger, ) error { if cfg.kind != snpReport { return fmt.Errorf("kind %s not supported", cfg.kind) } - log.Infof("Reading SNP report from file: %s", cfg.path) + log.Info(fmt.Sprintf("Reading SNP report from file: %s", cfg.path)) var report verify.Report if err := fs.ReadJSON(cfg.path, &report); err != nil { return fmt.Errorf("reading snp report: %w", err) } inputVersion := convertTCBVersionToSNPVersion(report.SNPReport.LaunchTCB) - log.Infof("Input report: %+v", inputVersion) + log.Info(fmt.Sprintf("Input report: %+v", inputVersion)) latestAPIVersionAPI, err := attestationconfigapi.NewFetcherWithCustomCDNAndCosignKey(cfg.url, cfg.cosignPublicKey).FetchSEVSNPVersionLatest(ctx, attestation) if err != nil { if errors.Is(err, attestationconfigapi.ErrNoVersionsFound) { - log.Infof("No versions found in API, but assuming that we are uploading the first version.") + log.Info("No versions found in API, but assuming that we are uploading the first version.") } else { return fmt.Errorf("fetching latest version: %w", err) } @@ -137,7 +137,7 @@ func uploadReport(ctx context.Context, latestAPIVersion := latestAPIVersionAPI.SEVSNPVersion if err := client.UploadSEVSNPVersionLatest(ctx, attestation, inputVersion, latestAPIVersion, cfg.uploadDate, cfg.force); err != nil { if errors.Is(err, attestationconfigapi.ErrNoNewerVersion) { - log.Infof("Input version: %+v is not newer than latest API version: %+v", inputVersion, latestAPIVersion) + log.Info(fmt.Sprintf("Input version: %+v is not newer than latest API version: %+v", inputVersion, latestAPIVersion)) return nil } return fmt.Errorf("updating latest version: %w", err) diff --git a/internal/api/attestationconfigapi/client.go b/internal/api/attestationconfigapi/client.go index 9b4575a4c5..583e3bba4d 100644 --- a/internal/api/attestationconfigapi/client.go +++ b/internal/api/attestationconfigapi/client.go @@ -9,11 +9,11 @@ import ( "context" "errors" "fmt" + "log/slog" "time" apiclient "github.com/edgelesssys/constellation/v2/internal/api/client" "github.com/edgelesssys/constellation/v2/internal/attestation/variant" - "github.com/edgelesssys/constellation/v2/internal/logger" "github.com/edgelesssys/constellation/v2/internal/sigstore" "github.com/edgelesssys/constellation/v2/internal/staticupload" @@ -32,7 +32,7 @@ type Client struct { } // NewClient returns a new Client. -func NewClient(ctx context.Context, cfg staticupload.Config, cosignPwd, privateKey []byte, dryRun bool, versionWindowSize int, log *logger.Logger) (*Client, apiclient.CloseFunc, error) { +func NewClient(ctx context.Context, cfg staticupload.Config, cosignPwd, privateKey []byte, dryRun bool, versionWindowSize int, log *slog.Logger) (*Client, apiclient.CloseFunc, error) { s3Client, clientClose, err := apiclient.NewClient(ctx, cfg.Region, cfg.Bucket, cfg.DistributionID, dryRun, log) if err != nil { return nil, nil, fmt.Errorf("failed to create s3 storage: %w", err) diff --git a/internal/api/attestationconfigapi/reporter.go b/internal/api/attestationconfigapi/reporter.go index 4cc4bcad68..00656e8816 100644 --- a/internal/api/attestationconfigapi/reporter.go +++ b/internal/api/attestationconfigapi/reporter.go @@ -55,23 +55,23 @@ func (c Client) UploadSEVSNPVersionLatest(ctx context.Context, attestation varia return fmt.Errorf("list reported versions: %w", err) } if len(versionDates) < c.cacheWindowSize { - c.s3Client.Logger.Warnf("Skipping version update, found %d, expected %d reported versions.", len(versionDates), c.cacheWindowSize) + c.s3Client.Logger.Warn(fmt.Sprintf("Skipping version update, found %d, expected %d reported versions.", len(versionDates), c.cacheWindowSize)) return nil } minVersion, minDate, err := c.findMinVersion(ctx, attestation, versionDates) if err != nil { return fmt.Errorf("get minimal version: %w", err) } - c.s3Client.Logger.Infof("Found minimal version: %+v with date: %s", minVersion, minDate) + c.s3Client.Logger.Info(fmt.Sprintf("Found minimal version: %+v with date: %s", minVersion, minDate)) shouldUpdateAPI, err := isInputNewerThanOtherVersion(minVersion, latestAPIVersion) if err != nil { return ErrNoNewerVersion } if !shouldUpdateAPI { - c.s3Client.Logger.Infof("Input version: %+v is not newer than latest API version: %+v", minVersion, latestAPIVersion) + c.s3Client.Logger.Info(fmt.Sprintf("Input version: %+v is not newer than latest API version: %+v", minVersion, latestAPIVersion)) return nil } - c.s3Client.Logger.Infof("Input version: %+v is newer than latest API version: %+v", minVersion, latestAPIVersion) + c.s3Client.Logger.Info(fmt.Sprintf("Input version: %+v is newer than latest API version: %+v", minVersion, latestAPIVersion)) t, err := time.Parse(VersionFormat, minDate) if err != nil { return fmt.Errorf("parsing date: %w", err) @@ -79,7 +79,7 @@ func (c Client) UploadSEVSNPVersionLatest(ctx context.Context, attestation varia if err := c.uploadSEVSNPVersion(ctx, attestation, minVersion, t); err != nil { return fmt.Errorf("uploading version: %w", err) } - c.s3Client.Logger.Infof("Successfully uploaded new Azure SEV-SNP version: %+v", minVersion) + c.s3Client.Logger.Info(fmt.Sprintf("Successfully uploaded new Azure SEV-SNP version: %+v", minVersion)) return nil } diff --git a/internal/api/client/BUILD.bazel b/internal/api/client/BUILD.bazel index a77457c5ab..a8fef5dd24 100644 --- a/internal/api/client/BUILD.bazel +++ b/internal/api/client/BUILD.bazel @@ -6,12 +6,10 @@ go_library( importpath = "github.com/edgelesssys/constellation/v2/internal/api/client", visibility = ["//:__subpackages__"], deps = [ - "//internal/logger", "//internal/sigstore", "//internal/staticupload", "@com_github_aws_aws_sdk_go_v2_feature_s3_manager//:manager", "@com_github_aws_aws_sdk_go_v2_service_s3//:s3", "@com_github_aws_aws_sdk_go_v2_service_s3//types", - "@org_uber_go_zap//:zap", ], ) diff --git a/internal/api/client/client.go b/internal/api/client/client.go index 1a26945119..d9ad7ec9e2 100644 --- a/internal/api/client/client.go +++ b/internal/api/client/client.go @@ -33,16 +33,15 @@ import ( "encoding/json" "errors" "fmt" + "log/slog" "strings" "time" s3manager "github.com/aws/aws-sdk-go-v2/feature/s3/manager" "github.com/aws/aws-sdk-go-v2/service/s3" s3types "github.com/aws/aws-sdk-go-v2/service/s3/types" - "github.com/edgelesssys/constellation/v2/internal/logger" "github.com/edgelesssys/constellation/v2/internal/sigstore" "github.com/edgelesssys/constellation/v2/internal/staticupload" - "go.uber.org/zap" ) // Client is the a general client for all APIs. @@ -54,13 +53,13 @@ type Client struct { dirtyPaths []string // written paths to be invalidated DryRun bool // no write operations are performed - Logger *logger.Logger + Logger *slog.Logger } // NewReadOnlyClient creates a new read-only client. // This client can be used to fetch objects but cannot write updates. func NewReadOnlyClient(ctx context.Context, region, bucket, distributionID string, - log *logger.Logger, + log *slog.Logger, ) (*Client, CloseFunc, error) { staticUploadClient, staticUploadClientClose, err := staticupload.New(ctx, staticupload.Config{ Region: region, @@ -89,7 +88,7 @@ func NewReadOnlyClient(ctx context.Context, region, bucket, distributionID strin // NewClient creates a new client for the versions API. func NewClient(ctx context.Context, region, bucket, distributionID string, dryRun bool, - log *logger.Logger, + log *slog.Logger, ) (*Client, CloseFunc, error) { staticUploadClient, staticUploadClientClose, err := staticupload.New(ctx, staticupload.Config{ Region: region, @@ -120,7 +119,7 @@ func NewClient(ctx context.Context, region, bucket, distributionID string, dryRu // It invalidates the CDN cache for all uploaded files. func (c *Client) Close(ctx context.Context) error { if c.s3ClientClose == nil { - c.Logger.Debugf("Client has no s3ClientClose") + c.Logger.Debug("Client has no s3ClientClose") return nil } return c.s3ClientClose(ctx) @@ -132,7 +131,7 @@ func (c *Client) DeletePath(ctx context.Context, path string) error { Bucket: &c.bucket, Prefix: &path, } - c.Logger.Debugf("Listing objects in %s", path) + c.Logger.Debug(fmt.Sprintf("Listing objects in %s", path)) objs := []s3types.Object{} out := &s3.ListObjectsV2Output{IsTruncated: ptr(true)} for out.IsTruncated != nil && *out.IsTruncated { @@ -143,10 +142,10 @@ func (c *Client) DeletePath(ctx context.Context, path string) error { } objs = append(objs, out.Contents...) } - c.Logger.Debugf("Found %d objects in %s", len(objs), path) + c.Logger.Debug(fmt.Sprintf("Found %d objects in %s", len(objs), path)) if len(objs) == 0 { - c.Logger.Warnf("Path %s is already empty", path) + c.Logger.Warn(fmt.Sprintf("Path %s is already empty", path)) return nil } @@ -156,7 +155,7 @@ func (c *Client) DeletePath(ctx context.Context, path string) error { } if c.DryRun { - c.Logger.Debugf("DryRun: Deleting %d objects with IDs %v", len(objs), objIDs) + c.Logger.Debug(fmt.Sprintf("DryRun: Deleting %d objects with IDs %v", len(objs), objIDs)) return nil } @@ -168,7 +167,7 @@ func (c *Client) DeletePath(ctx context.Context, path string) error { Objects: objIDs, }, } - c.Logger.Debugf("Deleting %d objects in %s", len(objs), path) + c.Logger.Debug(fmt.Sprintf("Deleting %d objects in %s", len(objs), path)) if _, err := c.s3Client.DeleteObjects(ctx, deleteIn); err != nil { return fmt.Errorf("deleting objects in %s: %w", path, err) } @@ -198,7 +197,7 @@ func Fetch[T APIObject](ctx context.Context, c *Client, obj T) (T, error) { Key: ptr(obj.JSONPath()), } - c.Logger.Debugf("Fetching %T from s3: %s", obj, obj.JSONPath()) + c.Logger.Debug(fmt.Sprintf("Fetching %T from s3: %s", obj, obj.JSONPath())) out, err := c.s3Client.GetObject(ctx, in) var noSuchkey *s3types.NoSuchKey if errors.As(err, &noSuchkey) { @@ -232,7 +231,7 @@ func Update(ctx context.Context, c *Client, obj APIObject) error { } if c.DryRun { - c.Logger.With(zap.String("bucket", c.bucket), zap.String("key", obj.JSONPath()), zap.String("body", string(rawJSON))).Debugf("DryRun: s3 put object") + c.Logger.With(slog.String("bucket", c.bucket), slog.String("key", obj.JSONPath()), slog.String("body", string(rawJSON))).Debug("DryRun: s3 put object") return nil } @@ -244,7 +243,7 @@ func Update(ctx context.Context, c *Client, obj APIObject) error { c.dirtyPaths = append(c.dirtyPaths, "/"+obj.JSONPath()) - c.Logger.Debugf("Uploading %T to s3: %v", obj, obj.JSONPath()) + c.Logger.Debug(fmt.Sprintf("Uploading %T to s3: %v", obj, obj.JSONPath())) if _, err := c.Upload(ctx, in); err != nil { return fmt.Errorf("uploading %T: %w", obj, err) } @@ -307,7 +306,7 @@ func Delete(ctx context.Context, c *Client, obj APIObject) error { Key: ptr(obj.JSONPath()), } - c.Logger.Debugf("Deleting %T from s3: %s", obj, obj.JSONPath()) + c.Logger.Debug(fmt.Sprintf("Deleting %T from s3: %s", obj, obj.JSONPath())) if _, err := c.DeleteObject(ctx, in); err != nil { return fmt.Errorf("deleting s3 object at %s: %w", obj.JSONPath(), err) } diff --git a/internal/api/versionsapi/BUILD.bazel b/internal/api/versionsapi/BUILD.bazel index 2f792d6c16..30fb04c213 100644 --- a/internal/api/versionsapi/BUILD.bazel +++ b/internal/api/versionsapi/BUILD.bazel @@ -20,7 +20,6 @@ go_library( "//internal/api/client", "//internal/api/fetcher", "//internal/constants", - "//internal/logger", "@org_golang_x_mod//semver", ], ) diff --git a/internal/api/versionsapi/cli/BUILD.bazel b/internal/api/versionsapi/cli/BUILD.bazel index 76882ae228..411c68f38d 100644 --- a/internal/api/versionsapi/cli/BUILD.bazel +++ b/internal/api/versionsapi/cli/BUILD.bazel @@ -27,7 +27,6 @@ go_library( "@com_google_cloud_go_compute//apiv1", "@com_google_cloud_go_compute//apiv1/computepb", "@org_golang_x_mod//semver", - "@org_uber_go_zap//zapcore", ], ) diff --git a/internal/api/versionsapi/cli/add.go b/internal/api/versionsapi/cli/add.go index 4efd94612c..89c64c2b79 100644 --- a/internal/api/versionsapi/cli/add.go +++ b/internal/api/versionsapi/cli/add.go @@ -10,12 +10,12 @@ import ( "context" "errors" "fmt" + "log/slog" apiclient "github.com/edgelesssys/constellation/v2/internal/api/client" "github.com/edgelesssys/constellation/v2/internal/api/versionsapi" "github.com/edgelesssys/constellation/v2/internal/logger" "github.com/spf13/cobra" - "go.uber.org/zap/zapcore" "golang.org/x/mod/semver" ) @@ -52,21 +52,21 @@ func runAdd(cmd *cobra.Command, _ []string) (retErr error) { if err != nil { return err } - log := logger.New(logger.PlainLog, flags.logLevel) - log.Debugf("Parsed flags: %+v", flags) + log := logger.NewTextLogger(flags.logLevel) + log.Debug(fmt.Sprintf("Parsed flags: %+v", flags)) - log.Debugf("Validating flags") + log.Debug("Validating flags") if err := flags.validate(log); err != nil { return err } - log.Debugf("Creating version struct") + log.Debug("Creating version struct") ver, err := versionsapi.NewVersion(flags.ref, flags.stream, flags.version, flags.kind) if err != nil { return fmt.Errorf("creating version: %w", err) } - log.Debugf("Creating versions API client") + log.Debug("Creating versions API client") client, clientClose, err := versionsapi.NewClient(cmd.Context(), flags.region, flags.bucket, flags.distributionID, flags.dryRun, log) if err != nil { return fmt.Errorf("creating client: %w", err) @@ -78,7 +78,7 @@ func runAdd(cmd *cobra.Command, _ []string) (retErr error) { } }() - log.Infof("Adding version") + log.Info("Adding version") if err := ensureVersion(cmd.Context(), client, flags.kind, ver, versionsapi.GranularityMajor, log); err != nil { return err } @@ -93,14 +93,14 @@ func runAdd(cmd *cobra.Command, _ []string) (retErr error) { } } - log.Infof("List major->minor URL: %s", ver.ListURL(versionsapi.GranularityMajor)) - log.Infof("List minor->patch URL: %s", ver.ListURL(versionsapi.GranularityMinor)) + log.Info(fmt.Sprintf("List major->minor URL: %s", ver.ListURL(versionsapi.GranularityMajor))) + log.Info(fmt.Sprintf("List minor->patch URL: %s", ver.ListURL(versionsapi.GranularityMinor))) return nil } func ensureVersion(ctx context.Context, client *versionsapi.Client, kind versionsapi.VersionKind, ver versionsapi.Version, gran versionsapi.Granularity, - log *logger.Logger, + log *slog.Logger, ) error { verListReq := versionsapi.List{ Ref: ver.Ref(), @@ -112,34 +112,34 @@ func ensureVersion(ctx context.Context, client *versionsapi.Client, kind version verList, err := client.FetchVersionList(ctx, verListReq) var notFoundErr *apiclient.NotFoundError if errors.As(err, ¬FoundErr) { - log.Infof("Version list for %s versions under %q does not exist. Creating new list", gran.String(), ver.Major()) + log.Info(fmt.Sprintf("Version list for %s versions under %q does not exist. Creating new list", gran.String(), ver.Major())) verList = verListReq } else if err != nil { return fmt.Errorf("failed to list minor versions: %w", err) } - log.Debugf("%s version list: %v", gran.String(), verList) + log.Debug(fmt.Sprintf("%s version list: %v", gran.String(), verList)) insertGran := gran + 1 insertVersion := ver.WithGranularity(insertGran) if verList.Contains(insertVersion) { - log.Infof("Version %q already exists in list %v", insertVersion, verList.Versions) + log.Info(fmt.Sprintf("Version %q already exists in list %v", insertVersion, verList.Versions)) return nil } - log.Infof("Inserting %s version %q into list", insertGran.String(), insertVersion) + log.Info(fmt.Sprintf("Inserting %s version %q into list", insertGran.String(), insertVersion)) verList.Versions = append(verList.Versions, insertVersion) - log.Debugf("New %s version list: %v", gran.String(), verList) + log.Debug(fmt.Sprintf("New %s version list: %v", gran.String(), verList)) if err := client.UpdateVersionList(ctx, verList); err != nil { return fmt.Errorf("failed to add %s version: %w", gran.String(), err) } - log.Infof("Added %q to list", insertVersion) + log.Info(fmt.Sprintf("Added %q to list", insertVersion)) return nil } -func updateLatest(ctx context.Context, client *versionsapi.Client, kind versionsapi.VersionKind, ver versionsapi.Version, log *logger.Logger) error { +func updateLatest(ctx context.Context, client *versionsapi.Client, kind versionsapi.VersionKind, ver versionsapi.Version, log *slog.Logger) error { latest := versionsapi.Latest{ Ref: ver.Ref(), Stream: ver.Stream(), @@ -148,17 +148,17 @@ func updateLatest(ctx context.Context, client *versionsapi.Client, kind versions latest, err := client.FetchVersionLatest(ctx, latest) var notFoundErr *apiclient.NotFoundError if errors.As(err, ¬FoundErr) { - log.Debugf("Latest version for ref %q and stream %q not found", ver.Ref(), ver.Stream()) + log.Debug(fmt.Sprintf("Latest version for ref %q and stream %q not found", ver.Ref(), ver.Stream())) } else if err != nil { return fmt.Errorf("fetching latest version: %w", err) } if latest.Version == ver.Version() { - log.Infof("Version %q is already latest version", ver) + log.Info(fmt.Sprintf("Version %q is already latest version", ver.Version())) return nil } - log.Infof("Setting %q as latest version", ver) + log.Info(fmt.Sprintf("Setting %q as latest version", ver.Version())) latest = versionsapi.Latest{ Ref: ver.Ref(), Stream: ver.Stream(), @@ -183,10 +183,10 @@ type addFlags struct { bucket string distributionID string kind versionsapi.VersionKind - logLevel zapcore.Level + logLevel slog.Level } -func (f *addFlags) validate(log *logger.Logger) error { +func (f *addFlags) validate(log *slog.Logger) error { if !semver.IsValid(f.version) { return fmt.Errorf("version %q is not a valid semantic version", f.version) } @@ -203,10 +203,10 @@ func (f *addFlags) validate(log *logger.Logger) error { } if f.release { - log.Debugf("Setting ref to %q, as release flag is set", versionsapi.ReleaseRef) + log.Debug(fmt.Sprintf("Setting ref to %q, as release flag is set", versionsapi.ReleaseRef)) f.ref = versionsapi.ReleaseRef } else { - log.Debugf("Setting latest to true, as release flag is not set") + log.Debug("Setting latest to true, as release flag is not set") f.latest = true // always set latest for non-release versions } @@ -256,9 +256,9 @@ func parseAddFlags(cmd *cobra.Command) (addFlags, error) { if err != nil { return addFlags{}, err } - logLevel := zapcore.InfoLevel + logLevel := slog.LevelInfo if verbose { - logLevel = zapcore.DebugLevel + logLevel = slog.LevelDebug } region, err := cmd.Flags().GetString("region") if err != nil { diff --git a/internal/api/versionsapi/cli/latest.go b/internal/api/versionsapi/cli/latest.go index 0a77406970..797cfc64d5 100644 --- a/internal/api/versionsapi/cli/latest.go +++ b/internal/api/versionsapi/cli/latest.go @@ -10,11 +10,11 @@ import ( "encoding/json" "errors" "fmt" + "log/slog" "github.com/edgelesssys/constellation/v2/internal/api/versionsapi" "github.com/edgelesssys/constellation/v2/internal/logger" "github.com/spf13/cobra" - "go.uber.org/zap/zapcore" ) func newLatestCmd() *cobra.Command { @@ -38,15 +38,15 @@ func runLatest(cmd *cobra.Command, _ []string) (retErr error) { if err != nil { return err } - log := logger.New(logger.PlainLog, flags.logLevel) - log.Debugf("Parsed flags: %+v", flags) + log := logger.NewTextLogger(flags.logLevel) + log.Debug(fmt.Sprintf("Parsed flags: %+v", flags)) - log.Debugf("Validating flags") + log.Debug("Validating flags") if err := flags.validate(); err != nil { return err } - log.Debugf("Creating versions API client") + log.Debug("Creating versions API client") client, clientClose, err := versionsapi.NewReadOnlyClient(cmd.Context(), flags.region, flags.bucket, flags.distributionID, log) if err != nil { return fmt.Errorf("creating client: %w", err) @@ -58,7 +58,7 @@ func runLatest(cmd *cobra.Command, _ []string) (retErr error) { } }() - log.Debugf("Requesting latest version") + log.Debug("Requesting latest version") latest := versionsapi.Latest{ Ref: flags.ref, Stream: flags.stream, @@ -89,7 +89,7 @@ type latestFlags struct { region string bucket string distributionID string - logLevel zapcore.Level + logLevel slog.Level } func (l *latestFlags) validate() error { @@ -133,9 +133,9 @@ func parseLatestFlags(cmd *cobra.Command) (latestFlags, error) { if err != nil { return latestFlags{}, err } - logLevel := zapcore.InfoLevel + logLevel := slog.LevelInfo if verbose { - logLevel = zapcore.DebugLevel + logLevel = slog.LevelDebug } return latestFlags{ diff --git a/internal/api/versionsapi/cli/list.go b/internal/api/versionsapi/cli/list.go index f158d6d3c8..717ba6c77e 100644 --- a/internal/api/versionsapi/cli/list.go +++ b/internal/api/versionsapi/cli/list.go @@ -11,9 +11,9 @@ import ( "encoding/json" "errors" "fmt" + "log/slog" "github.com/spf13/cobra" - "go.uber.org/zap/zapcore" "golang.org/x/mod/semver" apiclient "github.com/edgelesssys/constellation/v2/internal/api/client" @@ -43,15 +43,15 @@ func runList(cmd *cobra.Command, _ []string) (retErr error) { if err != nil { return err } - log := logger.New(logger.PlainLog, flags.logLevel) - log.Debugf("Parsed flags: %+v", flags) + log := logger.NewTextLogger(flags.logLevel) + log.Debug(fmt.Sprintf("Parsed flags: %+v", flags)) - log.Debugf("Validating flags") + log.Debug("Validating flags") if err := flags.validate(); err != nil { return err } - log.Debugf("Creating versions API client") + log.Debug("Creating versions API client") client, clientClose, err := versionsapi.NewReadOnlyClient(cmd.Context(), flags.region, flags.bucket, flags.distributionID, log) if err != nil { return fmt.Errorf("creating client: %w", err) @@ -67,29 +67,29 @@ func runList(cmd *cobra.Command, _ []string) (retErr error) { if flags.minorVersion != "" { minorVersions = []string{flags.minorVersion} } else { - log.Debugf("Getting minor versions") + log.Debug("Getting minor versions") minorVersions, err = listMinorVersions(cmd.Context(), client, flags.ref, flags.stream) var errNotFound *apiclient.NotFoundError if err != nil && errors.As(err, &errNotFound) { - log.Infof("No minor versions found for ref %q and stream %q.", flags.ref, flags.stream) + log.Info(fmt.Sprintf("No minor versions found for ref %q and stream %q.", flags.ref, flags.stream)) return nil } else if err != nil { return err } } - log.Debugf("Getting patch versions") + log.Debug("Getting patch versions") patchVersions, err := listPatchVersions(cmd.Context(), client, flags.ref, flags.stream, minorVersions) var errNotFound *apiclient.NotFoundError if err != nil && errors.As(err, &errNotFound) { - log.Infof("No patch versions found for ref %q, stream %q and minor versions %v.", flags.ref, flags.stream, minorVersions) + log.Info(fmt.Sprintf("No patch versions found for ref %q, stream %q and minor versions %v.", flags.ref, flags.stream, minorVersions)) return nil } else if err != nil { return err } if flags.json { - log.Debugf("Printing versions as JSON") + log.Debug("Printing versions as JSON") var vers []string for _, v := range patchVersions { vers = append(vers, v.Version()) @@ -102,7 +102,7 @@ func runList(cmd *cobra.Command, _ []string) (retErr error) { return nil } - log.Debugf("Printing versions") + log.Debug("Printing versions") for _, v := range patchVersions { fmt.Println(v.ShortPath()) } @@ -158,7 +158,7 @@ type listFlags struct { bucket string distributionID string json bool - logLevel zapcore.Level + logLevel slog.Level } func (l *listFlags) validate() error { @@ -211,9 +211,9 @@ func parseListFlags(cmd *cobra.Command) (listFlags, error) { if err != nil { return listFlags{}, err } - logLevel := zapcore.InfoLevel + logLevel := slog.LevelInfo if verbose { - logLevel = zapcore.DebugLevel + logLevel = slog.LevelDebug } return listFlags{ diff --git a/internal/api/versionsapi/cli/rm.go b/internal/api/versionsapi/cli/rm.go index f41d7510c3..51802b5fbe 100644 --- a/internal/api/versionsapi/cli/rm.go +++ b/internal/api/versionsapi/cli/rm.go @@ -12,6 +12,7 @@ import ( "fmt" "io" "log" + "log/slog" "regexp" "strings" "time" @@ -29,7 +30,6 @@ import ( "github.com/edgelesssys/constellation/v2/internal/logger" gaxv2 "github.com/googleapis/gax-go/v2" "github.com/spf13/cobra" - "go.uber.org/zap/zapcore" ) func newRemoveCmd() *cobra.Command { @@ -74,33 +74,33 @@ func runRemove(cmd *cobra.Command, _ []string) (retErr error) { if err != nil { return err } - log := logger.New(logger.PlainLog, flags.logLevel) - log.Debugf("Parsed flags: %+v", flags) + log := logger.NewTextLogger(flags.logLevel) + log.Debug(fmt.Sprintf("Parsed flags: %+v", flags)) - log.Debugf("Validating flags") + log.Debug("Validating flags") if err := flags.validate(); err != nil { return err } - log.Debugf("Creating GCP client") + log.Debug("Creating GCP client") gcpClient, err := newGCPClient(cmd.Context(), flags.gcpProject) if err != nil { return fmt.Errorf("creating GCP client: %w", err) } - log.Debugf("Creating AWS client") + log.Debug("Creating AWS client") awsClient, err := newAWSClient() if err != nil { return fmt.Errorf("creating AWS client: %w", err) } - log.Debugf("Creating Azure client") + log.Debug("Creating Azure client") azClient, err := newAzureClient(flags.azSubscription, flags.azLocation, flags.azResourceGroup) if err != nil { return fmt.Errorf("creating Azure client: %w", err) } - log.Debugf("Creating versions API client") + log.Debug("Creating versions API client") verclient, verclientClose, err := versionsapi.NewClient(cmd.Context(), flags.region, flags.bucket, flags.distributionID, flags.dryrun, log) if err != nil { return fmt.Errorf("creating client: %w", err) @@ -120,14 +120,14 @@ func runRemove(cmd *cobra.Command, _ []string) (retErr error) { } if flags.all { - log.Infof("Deleting ref %s", flags.ref) + log.Info(fmt.Sprintf("Deleting ref %s", flags.ref)) if err := deleteRef(cmd.Context(), imageClients, flags.ref, flags.dryrun, log); err != nil { return fmt.Errorf("deleting ref: %w", err) } return nil } - log.Infof("Deleting single version %s", flags.ver.ShortPath()) + log.Info(fmt.Sprintf("Deleting single version %s", flags.ver.ShortPath())) if err := deleteSingleVersion(cmd.Context(), imageClients, flags.ver, flags.dryrun, log); err != nil { return fmt.Errorf("deleting single version: %w", err) } @@ -135,15 +135,15 @@ func runRemove(cmd *cobra.Command, _ []string) (retErr error) { return nil } -func deleteSingleVersion(ctx context.Context, clients rmImageClients, ver versionsapi.Version, dryrun bool, log *logger.Logger) error { +func deleteSingleVersion(ctx context.Context, clients rmImageClients, ver versionsapi.Version, dryrun bool, log *slog.Logger) error { var retErr error - log.Debugf("Deleting images for %s", ver.Version) + log.Debug(fmt.Sprintf("Deleting images for %s", ver.Version())) if err := deleteImage(ctx, clients, ver, dryrun, log); err != nil { retErr = errors.Join(retErr, fmt.Errorf("deleting images: %w", err)) } - log.Debugf("Deleting version %s from versions API", ver.Version) + log.Debug(fmt.Sprintf("Deleting version %s from versions API", ver.Version())) if err := clients.version.DeleteVersion(ctx, ver); err != nil { retErr = errors.Join(retErr, fmt.Errorf("deleting version from versions API: %w", err)) } @@ -151,15 +151,15 @@ func deleteSingleVersion(ctx context.Context, clients rmImageClients, ver versio return retErr } -func deleteRef(ctx context.Context, clients rmImageClients, ref string, dryrun bool, log *logger.Logger) error { +func deleteRef(ctx context.Context, clients rmImageClients, ref string, dryrun bool, log *slog.Logger) error { var vers []versionsapi.Version for _, stream := range []string{"nightly", "console", "debug"} { - log.Infof("Listing versions of stream %s", stream) + log.Info(fmt.Sprintf("Listing versions of stream %s", stream)) minorVersions, err := listMinorVersions(ctx, clients.version, ref, stream) var notFoundErr *apiclient.NotFoundError if errors.As(err, ¬FoundErr) { - log.Debugf("No minor versions found for stream %s", stream) + log.Debug(fmt.Sprintf("No minor versions found for stream %s", stream)) continue } else if err != nil { return fmt.Errorf("listing minor versions for stream %s: %w", stream, err) @@ -167,7 +167,7 @@ func deleteRef(ctx context.Context, clients rmImageClients, ref string, dryrun b patchVersions, err := listPatchVersions(ctx, clients.version, ref, stream, minorVersions) if errors.As(err, ¬FoundErr) { - log.Debugf("No patch versions found for stream %s", stream) + log.Debug(fmt.Sprintf("No patch versions found for stream %s", stream)) continue } else if err != nil { return fmt.Errorf("listing patch versions for stream %s: %w", stream, err) @@ -175,7 +175,7 @@ func deleteRef(ctx context.Context, clients rmImageClients, ref string, dryrun b vers = append(vers, patchVersions...) } - log.Infof("Found %d versions to delete", len(vers)) + log.Info(fmt.Sprintf("Found %d versions to delete", len(vers))) var retErr error @@ -185,7 +185,7 @@ func deleteRef(ctx context.Context, clients rmImageClients, ref string, dryrun b } } - log.Infof("Deleting ref %s from versions API", ref) + log.Info(fmt.Sprintf("Deleting ref %s from versions API", ref)) if err := clients.version.DeleteRef(ctx, ref); err != nil { retErr = errors.Join(retErr, fmt.Errorf("deleting ref from versions API: %w", err)) } @@ -193,7 +193,7 @@ func deleteRef(ctx context.Context, clients rmImageClients, ref string, dryrun b return retErr } -func deleteImage(ctx context.Context, clients rmImageClients, ver versionsapi.Version, dryrun bool, log *logger.Logger) error { +func deleteImage(ctx context.Context, clients rmImageClients, ver versionsapi.Version, dryrun bool, log *slog.Logger) error { var retErr error imageInfo := versionsapi.ImageInfo{ @@ -204,8 +204,8 @@ func deleteImage(ctx context.Context, clients rmImageClients, ver versionsapi.Ve imageInfo, err := clients.version.FetchImageInfo(ctx, imageInfo) var notFound *apiclient.NotFoundError if errors.As(err, ¬Found) { - log.Warnf("Image info for %s not found", ver.Version) - log.Warnf("Skipping image deletion") + log.Warn(fmt.Sprintf("Image info for %s not found", ver.Version())) + log.Warn("Skipping image deletion") return nil } else if err != nil { return fmt.Errorf("fetching image info: %w", err) @@ -214,17 +214,17 @@ func deleteImage(ctx context.Context, clients rmImageClients, ver versionsapi.Ve for _, entry := range imageInfo.List { switch entry.CSP { case "aws": - log.Infof("Deleting AWS images from %s", imageInfo.JSONPath()) + log.Info(fmt.Sprintf("Deleting AWS images from %s", imageInfo.JSONPath())) if err := clients.aws.deleteImage(ctx, entry.Reference, entry.Region, dryrun, log); err != nil { retErr = errors.Join(retErr, fmt.Errorf("deleting AWS image %s: %w", entry.Reference, err)) } case "gcp": - log.Infof("Deleting GCP images from %s", imageInfo.JSONPath()) + log.Info(fmt.Sprintf("Deleting GCP images from %s", imageInfo.JSONPath())) if err := clients.gcp.deleteImage(ctx, entry.Reference, dryrun, log); err != nil { retErr = errors.Join(retErr, fmt.Errorf("deleting GCP image %s: %w", entry.Reference, err)) } case "azure": - log.Infof("Deleting Azure images from %s", imageInfo.JSONPath()) + log.Info(fmt.Sprintf("Deleting Azure images from %s", imageInfo.JSONPath())) if err := clients.az.deleteImage(ctx, entry.Reference, dryrun, log); err != nil { retErr = errors.Join(retErr, fmt.Errorf("deleting Azure image %s: %w", entry.Reference, err)) } @@ -259,7 +259,7 @@ type rmFlags struct { azSubscription string azLocation string azResourceGroup string - logLevel zapcore.Level + logLevel slog.Level ver versionsapi.Version } @@ -358,9 +358,9 @@ func parseRmFlags(cmd *cobra.Command) (*rmFlags, error) { if err != nil { return nil, err } - logLevel := zapcore.InfoLevel + logLevel := slog.LevelInfo if verbose { - logLevel = zapcore.DebugLevel + logLevel = slog.LevelDebug } return &rmFlags{ @@ -400,17 +400,17 @@ type ec2API interface { ) (*ec2.DeleteSnapshotOutput, error) } -func (a *awsClient) deleteImage(ctx context.Context, ami string, region string, dryrun bool, log *logger.Logger) error { +func (a *awsClient) deleteImage(ctx context.Context, ami string, region string, dryrun bool, log *slog.Logger) error { cfg, err := awsconfig.LoadDefaultConfig(ctx, awsconfig.WithRegion(region)) if err != nil { return err } a.ec2 = ec2.NewFromConfig(cfg) - log.Debugf("Deleting resources in AWS region %s", region) + log.Debug(fmt.Sprintf("Deleting resources in AWS region %s", region)) snapshotID, err := a.getSnapshotID(ctx, ami, log) if err != nil { - log.Warnf("Failed to get AWS snapshot ID for image %s: %v", ami, err) + log.Warn(fmt.Sprintf("Failed to get AWS snapshot ID for image %s: %v", ami, err)) } if err := a.deregisterImage(ctx, ami, dryrun, log); err != nil { @@ -426,8 +426,8 @@ func (a *awsClient) deleteImage(ctx context.Context, ami string, region string, return nil } -func (a *awsClient) deregisterImage(ctx context.Context, ami string, dryrun bool, log *logger.Logger) error { - log.Debugf("Deregistering image %s", ami) +func (a *awsClient) deregisterImage(ctx context.Context, ami string, dryrun bool, log *slog.Logger) error { + log.Debug(fmt.Sprintf("Deregistering image %s", ami)) deregisterReq := ec2.DeregisterImageInput{ ImageId: &ami, @@ -438,15 +438,15 @@ func (a *awsClient) deregisterImage(ctx context.Context, ami string, dryrun bool if errors.As(err, &apiErr) && (apiErr.ErrorCode() == "InvalidAMIID.NotFound" || apiErr.ErrorCode() == "InvalidAMIID.Unavailable") { - log.Warnf("AWS image %s not found", ami) + log.Warn(fmt.Sprintf("AWS image %s not found", ami)) return nil } return err } -func (a *awsClient) getSnapshotID(ctx context.Context, ami string, log *logger.Logger) (string, error) { - log.Debugf("Describing image %s", ami) +func (a *awsClient) getSnapshotID(ctx context.Context, ami string, log *slog.Logger) (string, error) { + log.Debug(fmt.Sprintf("Describing image %s", ami)) req := ec2.DescribeImagesInput{ ImageIds: []string{ami}, @@ -481,8 +481,8 @@ func (a *awsClient) getSnapshotID(ctx context.Context, ami string, log *logger.L return snapshotID, nil } -func (a *awsClient) deleteSnapshot(ctx context.Context, snapshotID string, dryrun bool, log *logger.Logger) error { - log.Debugf("Deleting AWS snapshot %s", snapshotID) +func (a *awsClient) deleteSnapshot(ctx context.Context, snapshotID string, dryrun bool, log *slog.Logger) error { + log.Debug(fmt.Sprintf("Deleting AWS snapshot %s", snapshotID)) req := ec2.DeleteSnapshotInput{ SnapshotId: &snapshotID, @@ -493,7 +493,7 @@ func (a *awsClient) deleteSnapshot(ctx context.Context, snapshotID string, dryru if errors.As(err, &apiErr) && (apiErr.ErrorCode() == "InvalidSnapshot.NotFound" || apiErr.ErrorCode() == "InvalidSnapshot.Unavailable") { - log.Warnf("AWS snapshot %s not found", snapshotID) + log.Warn(fmt.Sprintf("AWS snapshot %s not found", snapshotID)) return nil } @@ -523,7 +523,7 @@ type gcpComputeAPI interface { io.Closer } -func (g *gcpClient) deleteImage(ctx context.Context, imageURI string, dryrun bool, log *logger.Logger) error { +func (g *gcpClient) deleteImage(ctx context.Context, imageURI string, dryrun bool, log *slog.Logger) error { // Extract image name from image URI // Expected input into function: "projects/constellation-images/global/images/v2-6-0-stable" // Required for computepb.DeleteImageRequest: "v2-6-0-stable" @@ -536,20 +536,20 @@ func (g *gcpClient) deleteImage(ctx context.Context, imageURI string, dryrun boo } if dryrun { - log.Debugf("DryRun: delete image request: %v", req) + log.Debug(fmt.Sprintf("DryRun: delete image request: %v", req)) return nil } - log.Debugf("Deleting image %s", image) + log.Debug(fmt.Sprintf("Deleting image %s", image)) op, err := g.compute.Delete(ctx, req) if err != nil && strings.Contains(err.Error(), "404") { - log.Warnf("GCP image %s not found", image) + log.Warn(fmt.Sprintf("GCP image %s not found", image)) return nil } else if err != nil { return fmt.Errorf("deleting image %s: %w", image, err) } - log.Debugf("Waiting for operation to finish") + log.Debug("Waiting for operation to finish") if err := op.Wait(ctx); err != nil { return fmt.Errorf("waiting for operation: %w", err) } @@ -624,30 +624,30 @@ var ( azCommunityImageRegex = regexp.MustCompile("^/CommunityGalleries/([[:alnum:]-]+)/Images/([[:alnum:]._-]+)/Versions/([[:alnum:]._-]+)$") ) -func (a *azureClient) deleteImage(ctx context.Context, image string, dryrun bool, log *logger.Logger) error { +func (a *azureClient) deleteImage(ctx context.Context, image string, dryrun bool, log *slog.Logger) error { azImage, err := a.parseImage(ctx, image, log) if err != nil { return err } if dryrun { - log.Debugf("DryRun: delete image %v", azImage) + log.Debug(fmt.Sprintf("DryRun: delete image %v", azImage)) return nil } - log.Debugf("Deleting image %q, version %q", azImage.imageDefinition, azImage.version) + log.Debug(fmt.Sprintf("Deleting image %q, version %q", azImage.imageDefinition, azImage.version)) poller, err := a.imageVersions.BeginDelete(ctx, azImage.resourceGroup, azImage.gallery, azImage.imageDefinition, azImage.version, nil) if err != nil { return fmt.Errorf("begin delete image version: %w", err) } - log.Debugf("Waiting for operation to finish") + log.Debug("Waiting for operation to finish") if _, err := poller.PollUntilDone(ctx, nil); err != nil { return fmt.Errorf("waiting for operation: %w", err) } - log.Debugf("Checking if image definition %q still has versions left", azImage.imageDefinition) + log.Debug(fmt.Sprintf("Checking if image definition %q still has versions left", azImage.imageDefinition)) pager := a.imageVersions.NewListByGalleryImagePager(azImage.resourceGroup, azImage.gallery, azImage.imageDefinition, nil) for pager.More() { @@ -656,20 +656,20 @@ func (a *azureClient) deleteImage(ctx context.Context, image string, dryrun bool return fmt.Errorf("listing image versions of image definition %s: %w", azImage.imageDefinition, err) } if len(nextResult.Value) != 0 { - log.Debugf("Image definition %q still has versions left, won't be deleted", azImage.imageDefinition) + log.Debug(fmt.Sprintf("Image definition %q still has versions left, won't be deleted", azImage.imageDefinition)) return nil } } time.Sleep(15 * time.Second) // Azure needs time understand that there is no version left... - log.Debugf("Deleting image definition %s", azImage.imageDefinition) + log.Debug(fmt.Sprintf("Deleting image definition %s", azImage.imageDefinition)) op, err := a.image.BeginDelete(ctx, azImage.resourceGroup, azImage.gallery, azImage.imageDefinition, nil) if err != nil { return fmt.Errorf("deleting image definition %s: %w", azImage.imageDefinition, err) } - log.Debugf("Waiting for operation to finish") + log.Debug("Waiting for operation to finish") if _, err := op.PollUntilDone(ctx, nil); err != nil { return fmt.Errorf("waiting for operation: %w", err) } @@ -684,12 +684,12 @@ type azImage struct { version string } -func (a *azureClient) parseImage(ctx context.Context, image string, log *logger.Logger) (azImage, error) { +func (a *azureClient) parseImage(ctx context.Context, image string, log *slog.Logger) (azImage, error) { if m := azImageRegex.FindStringSubmatch(image); len(m) == 5 { - log.Debugf( + log.Debug(fmt.Sprintf( "Image matches local image format, resource group: %s, gallery: %s, image definition: %s, version: %s", m[1], m[2], m[3], m[4], - ) + )) return azImage{ resourceGroup: m[1], gallery: m[2], @@ -707,10 +707,10 @@ func (a *azureClient) parseImage(ctx context.Context, image string, log *logger. imageDefinition := m[2] version := m[3] - log.Debugf( + log.Debug(fmt.Sprintf( "Image matches community image format, gallery public name: %s, image definition: %s, version: %s", galleryPublicName, imageDefinition, version, - ) + )) var galleryName string pager := a.galleries.NewListPager(nil) @@ -721,24 +721,24 @@ func (a *azureClient) parseImage(ctx context.Context, image string, log *logger. } for _, v := range nextResult.Value { if v.Name == nil { - log.Debugf("Skipping gallery with nil name") + log.Debug("Skipping gallery with nil name") continue } if v.Properties.SharingProfile == nil { - log.Debugf("Skipping gallery %s with nil sharing profile", *v.Name) + log.Debug(fmt.Sprintf("Skipping gallery %s with nil sharing profile", *v.Name)) continue } if v.Properties.SharingProfile.CommunityGalleryInfo == nil { - log.Debugf("Skipping gallery %s with nil community gallery info", *v.Name) + log.Debug(fmt.Sprintf("Skipping gallery %s with nil community gallery info", *v.Name)) continue } if v.Properties.SharingProfile.CommunityGalleryInfo.PublicNames == nil { - log.Debugf("Skipping gallery %s with nil public names", *v.Name) + log.Debug(fmt.Sprintf("Skipping gallery %s with nil public names", *v.Name)) continue } for _, publicName := range v.Properties.SharingProfile.CommunityGalleryInfo.PublicNames { if publicName == nil { - log.Debugf("Skipping nil public name") + log.Debug("Skipping nil public name") continue } if *publicName == galleryPublicName { diff --git a/internal/api/versionsapi/client.go b/internal/api/versionsapi/client.go index 67900ec0e3..c03e8a7b6b 100644 --- a/internal/api/versionsapi/client.go +++ b/internal/api/versionsapi/client.go @@ -10,13 +10,13 @@ import ( "context" "errors" "fmt" + "log/slog" "path" "golang.org/x/mod/semver" apiclient "github.com/edgelesssys/constellation/v2/internal/api/client" "github.com/edgelesssys/constellation/v2/internal/constants" - "github.com/edgelesssys/constellation/v2/internal/logger" ) // Client is a client for the versions API. @@ -27,7 +27,7 @@ type Client struct { // NewClient creates a new client for the versions API. func NewClient(ctx context.Context, region, bucket, distributionID string, dryRun bool, - log *logger.Logger, + log *slog.Logger, ) (*Client, CloseFunc, error) { genericClient, genericClientClose, err := apiclient.NewClient(ctx, region, bucket, distributionID, dryRun, log) versionsClient := &Client{ @@ -43,7 +43,7 @@ func NewClient(ctx context.Context, region, bucket, distributionID string, dryRu // NewReadOnlyClient creates a new read-only client. // This client can be used to fetch objects but cannot write updates. func NewReadOnlyClient(ctx context.Context, region, bucket, distributionID string, - log *logger.Logger, + log *slog.Logger, ) (*Client, CloseFunc, error) { genericClient, genericClientClose, err := apiclient.NewReadOnlyClient(ctx, region, bucket, distributionID, log) if err != nil { @@ -131,18 +131,18 @@ func (c *Client) DeleteRef(ctx context.Context, ref string) error { func (c *Client) DeleteVersion(ctx context.Context, ver Version) error { var retErr error - c.Client.Logger.Debugf("Deleting version %s from minor version list", ver.version) + c.Client.Logger.Debug(fmt.Sprintf("Deleting version %s from minor version list", ver.version)) possibleNewLatest, err := c.deleteVersionFromMinorVersionList(ctx, ver) if err != nil { retErr = errors.Join(retErr, fmt.Errorf("removing from minor version list: %w", err)) } - c.Client.Logger.Debugf("Checking latest version for %s", ver.version) + c.Client.Logger.Debug(fmt.Sprintf("Checking latest version for %s", ver.version)) if err := c.deleteVersionFromLatest(ctx, ver, possibleNewLatest); err != nil { retErr = errors.Join(retErr, fmt.Errorf("updating latest version: %w", err)) } - c.Client.Logger.Debugf("Deleting artifact path %s for %s", ver.ArtifactPath(APIV1), ver.version) + c.Client.Logger.Debug(fmt.Sprintf("Deleting artifact path %s for %s", ver.ArtifactPath(APIV1), ver.version)) if err := c.Client.DeletePath(ctx, ver.ArtifactPath(APIV1)); err != nil { retErr = errors.Join(retErr, fmt.Errorf("deleting artifact path: %w", err)) } @@ -159,20 +159,20 @@ func (c *Client) deleteVersionFromMinorVersionList(ctx context.Context, ver Vers Base: ver.WithGranularity(GranularityMinor), Kind: VersionKindImage, } - c.Client.Logger.Debugf("Fetching minor version list for version %s", ver.version) + c.Client.Logger.Debug(fmt.Sprintf("Fetching minor version list for version %s", ver.version)) minorList, err := c.FetchVersionList(ctx, minorList) var notFoundErr *apiclient.NotFoundError if errors.As(err, ¬FoundErr) { - c.Client.Logger.Warnf("Minor version list for version %s not found", ver.version) - c.Client.Logger.Warnf("Skipping update of minor version list") + c.Client.Logger.Warn(fmt.Sprintf("Minor version list for version %s not found", ver.version)) + c.Client.Logger.Warn("Skipping update of minor version list") return nil, nil } else if err != nil { return nil, fmt.Errorf("fetching minor version list for version %s: %w", ver.version, err) } if !minorList.Contains(ver.version) { - c.Client.Logger.Warnf("Version %s is not in minor version list %s", ver.version, minorList.JSONPath()) - c.Client.Logger.Warnf("Skipping update of minor version list") + c.Client.Logger.Warn(fmt.Sprintf("Version %s is not in minor version list %s", ver.version, minorList.JSONPath())) + c.Client.Logger.Warn("Skipping update of minor version list") return nil, nil } @@ -192,20 +192,20 @@ func (c *Client) deleteVersionFromMinorVersionList(ctx context.Context, ver Vers Kind: VersionKindImage, Version: minorList.Versions[len(minorList.Versions)-1], } - c.Client.Logger.Debugf("Possible latest version replacement %q", latest.Version) + c.Client.Logger.Debug(fmt.Sprintf("Possible latest version replacement %q", latest.Version)) } if c.Client.DryRun { - c.Client.Logger.Debugf("DryRun: Updating minor version list %s to %v", minorList.JSONPath(), minorList) + c.Client.Logger.Debug(fmt.Sprintf("DryRun: Updating minor version list %s to %v", minorList.JSONPath(), minorList)) return latest, nil } - c.Client.Logger.Debugf("Updating minor version list %s", minorList.JSONPath()) + c.Client.Logger.Debug(fmt.Sprintf("Updating minor version list %s", minorList.JSONPath())) if err := c.UpdateVersionList(ctx, minorList); err != nil { return latest, fmt.Errorf("updating minor version list %s: %w", minorList.JSONPath(), err) } - c.Client.Logger.Debugf("Removed version %s from minor version list %s", ver.version, minorList.JSONPath()) + c.Client.Logger.Debug(fmt.Sprintf("Removed version %s from minor version list %s", ver.version, minorList.JSONPath())) return latest, nil } @@ -216,33 +216,33 @@ func (c *Client) deleteVersionFromLatest(ctx context.Context, ver Version, possi Stream: ver.stream, Kind: VersionKindImage, } - c.Client.Logger.Debugf("Fetching latest version from %s", latest.JSONPath()) + c.Client.Logger.Debug(fmt.Sprintf("Fetching latest version from %s", latest.JSONPath())) latest, err := c.FetchVersionLatest(ctx, latest) var notFoundErr *apiclient.NotFoundError if errors.As(err, ¬FoundErr) { - c.Client.Logger.Warnf("Latest version for %s not found", latest.JSONPath()) + c.Client.Logger.Warn(fmt.Sprintf("Latest version for %s not found", latest.JSONPath())) return nil } else if err != nil { return fmt.Errorf("fetching latest version: %w", err) } if latest.Version != ver.version { - c.Client.Logger.Debugf("Latest version is %s, not the deleted version %s", latest.Version, ver.version) + c.Client.Logger.Debug(fmt.Sprintf("Latest version is %s, not the deleted version %s", latest.Version, ver.version)) return nil } if possibleNewLatest == nil { - c.Client.Logger.Errorf("Latest version is %s, but no new latest version was found", latest.Version) - c.Client.Logger.Errorf("A manual update of latest at %s might be needed", latest.JSONPath()) + c.Client.Logger.Error(fmt.Sprintf("Latest version is %s, but no new latest version was found", latest.Version)) + c.Client.Logger.Error(fmt.Sprintf("A manual update of latest at %s might be needed", latest.JSONPath())) return fmt.Errorf("latest version is %s, but no new latest version was found", latest.Version) } if c.Client.DryRun { - c.Client.Logger.Debugf("Would update latest version from %s to %s", latest.Version, possibleNewLatest.Version) + c.Client.Logger.Debug(fmt.Sprintf("Would update latest version from %s to %s", latest.Version, possibleNewLatest.Version)) return nil } - c.Client.Logger.Infof("Updating latest version from %s to %s", latest.Version, possibleNewLatest.Version) + c.Client.Logger.Info(fmt.Sprintf("Updating latest version from %s to %s", latest.Version, possibleNewLatest.Version)) if err := c.UpdateVersionLatest(ctx, *possibleNewLatest); err != nil { return fmt.Errorf("updating latest version: %w", err) } diff --git a/internal/attestation/attestation.go b/internal/attestation/attestation.go index f09988dce8..d5e458012f 100644 --- a/internal/attestation/attestation.go +++ b/internal/attestation/attestation.go @@ -45,18 +45,18 @@ const ( // Logger is a logger used to print warnings and infos during attestation validation. type Logger interface { - Infof(format string, args ...any) - Warnf(format string, args ...any) + Info(msg string, args ...any) + Warn(msg string, args ...any) } // NOPLogger is a no-op implementation of [Logger]. type NOPLogger struct{} -// Infof is a no-op. -func (NOPLogger) Infof(string, ...interface{}) {} +// Info is a no-op. +func (NOPLogger) Info(string, ...interface{}) {} -// Warnf is a no-op. -func (NOPLogger) Warnf(string, ...interface{}) {} +// Warn is a no-op. +func (NOPLogger) Warn(string, ...interface{}) {} // DeriveClusterID derives the cluster ID from a salt and secret value. func DeriveClusterID(secret, salt []byte) ([]byte, error) { diff --git a/internal/attestation/azure/snp/validator.go b/internal/attestation/azure/snp/validator.go index 0d971a4187..a4b58e4d40 100644 --- a/internal/attestation/azure/snp/validator.go +++ b/internal/attestation/azure/snp/validator.go @@ -212,18 +212,18 @@ func (v *Validator) checkIDKeyDigest(ctx context.Context, report *spb.Attestatio // the MAA if necessary. switch v.config.FirmwareSignerConfig.EnforcementPolicy { case idkeydigest.MAAFallback: - v.log.Infof( + v.log.Info(fmt.Sprintf( "Configured idkeydigests %x don't contain reported idkeydigest %x, falling back to MAA validation", v.config.FirmwareSignerConfig.AcceptedKeyDigests, report.Report.IdKeyDigest, - ) + )) return v.maa.validateToken(ctx, v.config.FirmwareSignerConfig.MAAURL, maaToken, extraData) case idkeydigest.WarnOnly: - v.log.Warnf( + v.log.Warn(fmt.Sprintf( "Configured idkeydigests %x don't contain reported idkeydigest %x", v.config.FirmwareSignerConfig.AcceptedKeyDigests, report.Report.IdKeyDigest, - ) + )) default: return fmt.Errorf( "configured idkeydigests %x don't contain reported idkeydigest %x", @@ -240,10 +240,10 @@ func (v *Validator) checkIDKeyDigest(ctx context.Context, report *spb.Attestatio type nopAttestationLogger struct{} // Infof is a no-op. -func (nopAttestationLogger) Infof(string, ...interface{}) {} +func (nopAttestationLogger) Info(string, ...interface{}) {} // Warnf is a no-op. -func (nopAttestationLogger) Warnf(string, ...interface{}) {} +func (nopAttestationLogger) Warn(string, ...interface{}) {} type maaValidator interface { validateToken(ctx context.Context, maaURL string, token string, extraData []byte) error diff --git a/internal/attestation/snp/snp.go b/internal/attestation/snp/snp.go index e7b285ba61..95cba55bfc 100644 --- a/internal/attestation/snp/snp.go +++ b/internal/attestation/snp/snp.go @@ -57,7 +57,7 @@ func (a *InstanceInfo) addReportSigner(att *spb.Attestation, report *spb.Report, // If the VCEK certificate is present, parse it and format it. reportSigner, err := a.ParseReportSigner() if err != nil { - logger.Warnf("Error parsing report signer: %v", err) + logger.Warn(fmt.Sprintf("Error parsing report signer: %v", err)) } signerInfo, err := abi.ParseSignerInfo(report.GetSignerInfo()) @@ -77,7 +77,7 @@ func (a *InstanceInfo) addReportSigner(att *spb.Attestation, report *spb.Report, // If no VCEK is present, fetch it from AMD. if reportSigner == nil { - logger.Infof("VCEK certificate not present, falling back to retrieving it from AMD KDS") + logger.Info("VCEK certificate not present, falling back to retrieving it from AMD KDS") vcekURL := kds.VCEKCertURL(productName, report.GetChipId(), kds.TCBVersion(report.GetReportedTcb())) vcekData, err = getter.Get(vcekURL) if err != nil { @@ -123,43 +123,43 @@ func (a *InstanceInfo) AttestationWithCerts(getter trust.HTTPSGetter, // If the certificate chain from THIM is present, parse it and format it. ask, ark, err := a.ParseCertChain() if err != nil { - logger.Warnf("Error parsing certificate chain: %v", err) + logger.Warn(fmt.Sprintf("Error parsing certificate chain: %v", err)) } if ask != nil { - logger.Infof("Using ASK certificate from Azure THIM") + logger.Info("Using ASK certificate from Azure THIM") att.CertificateChain.AskCert = ask.Raw } if ark != nil { - logger.Infof("Using ARK certificate from Azure THIM") + logger.Info("Using ARK certificate from Azure THIM") att.CertificateChain.ArkCert = ark.Raw } // If a cached ASK or an ARK from the Constellation config is present, use it. if att.CertificateChain.AskCert == nil && fallbackCerts.ask != nil { - logger.Infof("Using cached ASK certificate") + logger.Info("Using cached ASK certificate") att.CertificateChain.AskCert = fallbackCerts.ask.Raw } if att.CertificateChain.ArkCert == nil && fallbackCerts.ark != nil { - logger.Infof("Using ARK certificate from %s", constants.ConfigFilename) + logger.Info(fmt.Sprintf("Using ARK certificate from %s", constants.ConfigFilename)) att.CertificateChain.ArkCert = fallbackCerts.ark.Raw } // Otherwise, retrieve it from AMD KDS. if att.CertificateChain.AskCert == nil || att.CertificateChain.ArkCert == nil { - logger.Infof( + logger.Info(fmt.Sprintf( "Certificate chain not fully present (ARK present: %t, ASK present: %t), falling back to retrieving it from AMD KDS", (att.CertificateChain.ArkCert != nil), (att.CertificateChain.AskCert != nil), - ) + )) kdsCertChain, err := trust.GetProductChain(productName, signingInfo, getter) if err != nil { return nil, fmt.Errorf("retrieving certificate chain from AMD KDS: %w", err) } if att.CertificateChain.AskCert == nil && kdsCertChain.Ask != nil { - logger.Infof("Using ASK certificate from AMD KDS") + logger.Info("Using ASK certificate from AMD KDS") att.CertificateChain.AskCert = kdsCertChain.Ask.Raw } if att.CertificateChain.ArkCert == nil && kdsCertChain.Ask != nil { - logger.Infof("Using ARK certificate from AMD KDS") + logger.Info("Using ARK certificate from AMD KDS") att.CertificateChain.ArkCert = kdsCertChain.Ark.Raw } } diff --git a/internal/attestation/tdx/issuer.go b/internal/attestation/tdx/issuer.go index 4bee70065b..0dadb4b3ce 100644 --- a/internal/attestation/tdx/issuer.go +++ b/internal/attestation/tdx/issuer.go @@ -37,10 +37,10 @@ func NewIssuer(log attestation.Logger) *Issuer { // Issue issues a TDX attestation document. func (i *Issuer) Issue(_ context.Context, userData []byte, nonce []byte) (attDoc []byte, err error) { - i.log.Infof("Issuing attestation statement") + i.log.Info("Issuing attestation statement") defer func() { if err != nil { - i.log.Warnf("Failed to issue attestation document: %s", err) + i.log.Warn(fmt.Sprintf("Failed to issue attestation document: %s", err)) } }() diff --git a/internal/attestation/tdx/validator.go b/internal/attestation/tdx/validator.go index 0a18f1c9dc..dcf92d7420 100644 --- a/internal/attestation/tdx/validator.go +++ b/internal/attestation/tdx/validator.go @@ -49,10 +49,10 @@ func NewValidator(cfg *config.QEMUTDX, log attestation.Logger) *Validator { // Validate validates the given attestation document using TDX attestation. func (v *Validator) Validate(ctx context.Context, attDocRaw []byte, nonce []byte) (userData []byte, err error) { - v.log.Infof("Validating attestation document") + v.log.Info("Validating attestation document") defer func() { if err != nil { - v.log.Warnf("Failed to validate attestation document: %s", err) + v.log.Warn(fmt.Sprintf("Failed to validate attestation document: %s", err)) } }() @@ -83,7 +83,7 @@ func (v *Validator) Validate(ctx context.Context, attDocRaw []byte, nonce []byte // Verify the quote against the expected measurements. warnings, errs := v.expected.Compare(tdMeasure) for _, warning := range warnings { - v.log.Warnf(warning) + v.log.Warn(warning) } if len(errs) > 0 { return nil, fmt.Errorf("measurement validation failed:\n%w", errors.Join(errs...)) diff --git a/internal/attestation/vtpm/attestation.go b/internal/attestation/vtpm/attestation.go index a8a5e9b0ff..77c396b9a4 100644 --- a/internal/attestation/vtpm/attestation.go +++ b/internal/attestation/vtpm/attestation.go @@ -103,10 +103,10 @@ func NewIssuer( // Issue generates an attestation document using a TPM. func (i *Issuer) Issue(ctx context.Context, userData []byte, nonce []byte) (res []byte, err error) { - i.log.Infof("Issuing attestation statement") + i.log.Info("Issuing attestation statement") defer func() { if err != nil { - i.log.Warnf("Failed to issue attestation statement: %s", err) + i.log.Warn(fmt.Sprintf("Failed to issue attestation statement: %s", err)) } }() @@ -147,7 +147,7 @@ func (i *Issuer) Issue(ctx context.Context, userData []byte, nonce []byte) (res return nil, fmt.Errorf("marshaling attestation document: %w", err) } - i.log.Infof("Successfully issued attestation statement") + i.log.Info("Successfully issued attestation statement") return rawAttDoc, nil } @@ -177,10 +177,10 @@ func NewValidator(expected measurements.M, getTrustedKey GetTPMTrustedAttestatio // Validate a TPM based attestation. func (v *Validator) Validate(ctx context.Context, attDocRaw []byte, nonce []byte) (userData []byte, err error) { - v.log.Infof("Validating attestation document") + v.log.Info("Validating attestation document") defer func() { if err != nil { - v.log.Warnf("Failed to validate attestation document: %s", err) + v.log.Warn(fmt.Sprintf("Failed to validate attestation document: %s", err)) } }() @@ -233,13 +233,13 @@ func (v *Validator) Validate(ctx context.Context, attDocRaw []byte, nonce []byte } warnings, errs := v.expected.Compare(attDoc.Attestation.Quotes[quoteIdx].Pcrs.Pcrs) for _, warning := range warnings { - v.log.Warnf(warning) + v.log.Warn(warning) } if len(errs) > 0 { return nil, fmt.Errorf("measurement validation failed:\n%w", errors.Join(errs...)) } - v.log.Infof("Successfully validated attestation document") + v.log.Info("Successfully validated attestation document") return attDoc.UserData, nil } diff --git a/internal/attestation/vtpm/attestation_test.go b/internal/attestation/vtpm/attestation_test.go index 9eeeef3b83..a1bb237c50 100644 --- a/internal/attestation/vtpm/attestation_test.go +++ b/internal/attestation/vtpm/attestation_test.go @@ -481,10 +481,10 @@ type testAttestationLogger struct { warnings []string } -func (w *testAttestationLogger) Infof(format string, args ...any) { +func (w *testAttestationLogger) Info(format string, args ...any) { w.infos = append(w.infos, fmt.Sprintf(format, args...)) } -func (w *testAttestationLogger) Warnf(format string, args ...any) { +func (w *testAttestationLogger) Warn(format string, args ...any) { w.warnings = append(w.warnings, fmt.Sprintf(format, args...)) } diff --git a/internal/cloud/azure/BUILD.bazel b/internal/cloud/azure/BUILD.bazel index efbca7f597..ff8fbea926 100644 --- a/internal/cloud/azure/BUILD.bazel +++ b/internal/cloud/azure/BUILD.bazel @@ -15,7 +15,6 @@ go_library( "//internal/cloud/azureshared", "//internal/cloud/metadata", "//internal/constants", - "//internal/logger", "//internal/role", "@com_github_azure_azure_sdk_for_go_sdk_azcore//runtime", "@com_github_azure_azure_sdk_for_go_sdk_azidentity//:azidentity", @@ -23,7 +22,6 @@ go_library( "@com_github_azure_azure_sdk_for_go_sdk_resourcemanager_network_armnetwork_v5//:armnetwork", "@io_k8s_kubernetes//pkg/util/iptables", "@io_k8s_utils//exec", - "@org_uber_go_zap//:zap", ], ) diff --git a/internal/cloud/azure/azure.go b/internal/cloud/azure/azure.go index 8d8735e099..52a4684713 100644 --- a/internal/cloud/azure/azure.go +++ b/internal/cloud/azure/azure.go @@ -19,6 +19,7 @@ import ( "context" "errors" "fmt" + "log/slog" "path" "strconv" @@ -29,9 +30,7 @@ import ( "github.com/edgelesssys/constellation/v2/internal/cloud/azureshared" "github.com/edgelesssys/constellation/v2/internal/cloud/metadata" "github.com/edgelesssys/constellation/v2/internal/constants" - "github.com/edgelesssys/constellation/v2/internal/logger" "github.com/edgelesssys/constellation/v2/internal/role" - "go.uber.org/zap" "k8s.io/kubernetes/pkg/util/iptables" "k8s.io/utils/exec" ) @@ -455,7 +454,7 @@ func (c *Cloud) getLoadBalancerDNSName(ctx context.Context) (string, error) { // // OpenShift also uses the same mechanism to redirect traffic to the API server: // https://github.com/openshift/machine-config-operator/blob/e453bd20bac0e48afa74e9a27665abaf454d93cd/templates/master/00-master/azure/files/opt-libexec-openshift-azure-routes-sh.yaml -func (c *Cloud) PrepareControlPlaneNode(ctx context.Context, log *logger.Logger) error { +func (c *Cloud) PrepareControlPlaneNode(ctx context.Context, log *slog.Logger) error { selfMetadata, err := c.Self(ctx) if err != nil { return fmt.Errorf("failed to get self metadata: %w", err) @@ -463,7 +462,7 @@ func (c *Cloud) PrepareControlPlaneNode(ctx context.Context, log *logger.Logger) // skipping iptables setup for worker nodes if selfMetadata.Role != role.ControlPlane { - log.Infof("not a control plane node, skipping iptables setup") + log.Info("not a control plane node, skipping iptables setup") return nil } @@ -471,11 +470,11 @@ func (c *Cloud) PrepareControlPlaneNode(ctx context.Context, log *logger.Logger) // for public LB architectures loadbalancerIP, err := c.getLoadBalancerPrivateIP(ctx) if err != nil { - log.With(zap.Error(err)).Warnf("skipping iptables setup, failed to get load balancer private IP") + log.With(slog.Any("error", err)).Warn("skipping iptables setup, failed to get load balancer private IP") return nil } - log.Infof("Setting up iptables for control plane node with load balancer IP %s", loadbalancerIP) + log.Info(fmt.Sprintf("Setting up iptables for control plane node with load balancer IP %s", loadbalancerIP)) iptablesExec := iptables.New(exec.New(), iptables.ProtocolIPv4) if err != nil { diff --git a/internal/constellation/apply.go b/internal/constellation/apply.go index 611b1557f1..bbd61cf8c2 100644 --- a/internal/constellation/apply.go +++ b/internal/constellation/apply.go @@ -51,7 +51,7 @@ type licenseChecker interface { } type debugLog interface { - Debugf(format string, args ...any) + Debug(msg string, args ...any) } // NewApplier creates a new Applier. @@ -87,7 +87,7 @@ func (a *Applier) SetKubeConfig(kubeConfig []byte) error { // CheckLicense checks the given Constellation license with the license server // and returns the allowed quota for the license. func (a *Applier) CheckLicense(ctx context.Context, csp cloudprovider.Provider, initRequest bool, licenseID string) (int, error) { - a.log.Debugf("Contacting license server for license '%s'", licenseID) + a.log.Debug(fmt.Sprintf("Contacting license server for license '%s'", licenseID)) var action license.Action if initRequest { @@ -103,14 +103,14 @@ func (a *Applier) CheckLicense(ctx context.Context, csp cloudprovider.Provider, if err != nil { return 0, fmt.Errorf("checking license: %w", err) } - a.log.Debugf("Got response from license server for license '%s'", licenseID) + a.log.Debug(fmt.Sprintf("Got response from license server for license '%s'", licenseID)) return quota, nil } // GenerateMasterSecret generates a new master secret. func (a *Applier) GenerateMasterSecret() (uri.MasterSecret, error) { - a.log.Debugf("Generating master secret") + a.log.Debug("Generating master secret") key, err := crypto.GenerateRandomBytes(crypto.MasterSecretLengthDefault) if err != nil { return uri.MasterSecret{}, err @@ -123,17 +123,17 @@ func (a *Applier) GenerateMasterSecret() (uri.MasterSecret, error) { Key: key, Salt: salt, } - a.log.Debugf("Generated master secret key and salt values") + a.log.Debug("Generated master secret key and salt values") return secret, nil } // GenerateMeasurementSalt generates a new measurement salt. func (a *Applier) GenerateMeasurementSalt() ([]byte, error) { - a.log.Debugf("Generating measurement salt") + a.log.Debug("Generating measurement salt") measurementSalt, err := crypto.GenerateRandomBytes(crypto.RNGLengthDefault) if err != nil { return nil, fmt.Errorf("generating measurement salt: %w", err) } - a.log.Debugf("Generated measurement salt") + a.log.Debug("Generated measurement salt") return measurementSalt, nil } diff --git a/internal/constellation/applyinit.go b/internal/constellation/applyinit.go index cbec0cc2c5..f02c9e8ccf 100644 --- a/internal/constellation/applyinit.go +++ b/internal/constellation/applyinit.go @@ -85,21 +85,21 @@ func (a *Applier) Init( // Create a wrapper function that allows logging any returned error from the retrier before checking if it's the expected retriable one. serviceIsUnavailable := func(err error) bool { isServiceUnavailable := grpcRetry.ServiceIsUnavailable(err) - a.log.Debugf("Encountered error (retriable: %t): %s", isServiceUnavailable, err) + a.log.Debug(fmt.Sprintf("Encountered error (retriable: %t): %s", isServiceUnavailable, err)) return isServiceUnavailable } // Perform the RPC - a.log.Debugf("Making initialization call, doer is %+v", doer) + a.log.Debug(fmt.Sprintf("Making initialization call, doer is %+v", doer)) a.spinner.Start("Connecting ", false) retrier := retry.NewIntervalRetrier(doer, 30*time.Second, serviceIsUnavailable) if err := retrier.Do(ctx); err != nil { return InitOutput{}, fmt.Errorf("doing init call: %w", err) } a.spinner.Stop() - a.log.Debugf("Initialization request finished") + a.log.Debug("Initialization request finished") - a.log.Debugf("Rewriting cluster server address in kubeconfig to %s", state.Infrastructure.ClusterEndpoint) + a.log.Debug(fmt.Sprintf("Rewriting cluster server address in kubeconfig to %s", state.Infrastructure.ClusterEndpoint)) kubeconfig, err := clientcmd.Load(doer.resp.Kubeconfig) if err != nil { return InitOutput{}, fmt.Errorf("loading kubeconfig: %w", err) @@ -175,7 +175,7 @@ func (d *initDoer) Do(ctx context.Context) error { conn, err := d.dialer.Dial(ctx, d.endpoint) if err != nil { - d.log.Debugf("Dialing init server failed: %s. Retrying...", err) + d.log.Debug(fmt.Sprintf("Dialing init server failed: %s. Retrying...", err)) return fmt.Errorf("dialing init server: %w", err) } defer conn.Close() @@ -188,7 +188,7 @@ func (d *initDoer) Do(ctx context.Context) error { d.handleGRPCStateChanges(grpcStateLogCtx, &wg, conn) protoClient := initproto.NewAPIClient(conn) - d.log.Debugf("Created protoClient") + d.log.Debug("Created protoClient") resp, err := protoClient.Init(ctx, d.req) if err != nil { return &NonRetriableInitError{ @@ -200,7 +200,7 @@ func (d *initDoer) Do(ctx context.Context) error { res, err := resp.Recv() // get first response, either success or failure if err != nil { if e := d.getLogs(resp); e != nil { - d.log.Debugf("Failed to collect logs: %s", e) + d.log.Debug(fmt.Sprintf("Failed to collect logs: %s", e)) return &NonRetriableInitError{ LogCollectionErr: e, Err: err, @@ -214,7 +214,7 @@ func (d *initDoer) Do(ctx context.Context) error { d.resp = res.GetInitSuccess() case *initproto.InitResponse_InitFailure: if e := d.getLogs(resp); e != nil { - d.log.Debugf("Failed to get logs from cluster: %s", e) + d.log.Debug(fmt.Sprintf("Failed to get logs from cluster: %s", e)) return &NonRetriableInitError{ LogCollectionErr: e, Err: errors.New(res.GetInitFailure().GetError()), @@ -222,10 +222,10 @@ func (d *initDoer) Do(ctx context.Context) error { } return &NonRetriableInitError{Err: errors.New(res.GetInitFailure().GetError())} case nil: - d.log.Debugf("Cluster returned nil response type") + d.log.Debug("Cluster returned nil response type") err = errors.New("empty response from cluster") if e := d.getLogs(resp); e != nil { - d.log.Debugf("Failed to collect logs: %s", e) + d.log.Debug(fmt.Sprintf("Failed to collect logs: %s", e)) return &NonRetriableInitError{ LogCollectionErr: e, Err: err, @@ -233,10 +233,10 @@ func (d *initDoer) Do(ctx context.Context) error { } return &NonRetriableInitError{Err: err} default: - d.log.Debugf("Cluster returned unknown response type") + d.log.Debug("Cluster returned unknown response type") err = errors.New("unknown response from cluster") if e := d.getLogs(resp); e != nil { - d.log.Debugf("Failed to collect logs: %s", e) + d.log.Debug(fmt.Sprintf("Failed to collect logs: %s", e)) return &NonRetriableInitError{ LogCollectionErr: e, Err: err, @@ -249,7 +249,7 @@ func (d *initDoer) Do(ctx context.Context) error { // getLogs retrieves the cluster logs from the bootstrapper and saves them in the initDoer. func (d *initDoer) getLogs(resp initproto.API_InitClient) error { - d.log.Debugf("Attempting to collect cluster logs") + d.log.Debug("Attempting to collect cluster logs") for { res, err := resp.Recv() if err == io.EOF { @@ -277,7 +277,7 @@ func (d *initDoer) getLogs(resp initproto.API_InitClient) error { } } - d.log.Debugf("Received cluster logs") + d.log.Debug("Received cluster logs") return nil } diff --git a/internal/constellation/helm/action.go b/internal/constellation/helm/action.go index 5d73347e08..6d3f5a78b5 100644 --- a/internal/constellation/helm/action.go +++ b/internal/constellation/helm/action.go @@ -37,7 +37,7 @@ type applyAction interface { func newActionConfig(kubeConfig []byte, logger debugLog) (*action.Configuration, error) { actionConfig := &action.Configuration{} if err := actionConfig.Init(&clientGetter{kubeConfig: kubeConfig}, constants.HelmNamespace, - "secret", logger.Debugf); err != nil { + "secret", logger.Debug); err != nil { return nil, err } return actionConfig, nil diff --git a/internal/constellation/helm/actionfactory.go b/internal/constellation/helm/actionfactory.go index 4237b77310..73336f3eb3 100644 --- a/internal/constellation/helm/actionfactory.go +++ b/internal/constellation/helm/actionfactory.go @@ -90,15 +90,15 @@ func (a actionFactory) appendNewAction( ) } - a.log.Debugf("release %s not found, adding to new releases...", release.releaseName) + a.log.Debug(fmt.Sprintf("release %s not found, adding to new releases...", release.releaseName)) *actions = append(*actions, a.newInstall(release, timeout)) return nil } if err != nil { return fmt.Errorf("getting version for %s: %w", release.releaseName, err) } - a.log.Debugf("Current %s version: %s", release.releaseName, currentVersion) - a.log.Debugf("New %s version: %s", release.releaseName, newVersion) + a.log.Debug(fmt.Sprintf("Current %s version: %s", release.releaseName, currentVersion)) + a.log.Debug(fmt.Sprintf("New %s version: %s", release.releaseName, newVersion)) if !force { // For charts we package ourselves, the version is equal to the CLI version (charts are embedded in the binary). @@ -132,7 +132,7 @@ func (a actionFactory) appendNewAction( release.releaseName == certManagerInfo.releaseName { return ErrConfirmationMissing } - a.log.Debugf("Upgrading %s from %s to %s", release.releaseName, currentVersion, newVersion) + a.log.Debug(fmt.Sprintf("Upgrading %s from %s to %s", release.releaseName, currentVersion, newVersion)) *actions = append(*actions, a.newUpgrade(release, timeout)) return nil } @@ -162,7 +162,7 @@ func (a actionFactory) updateCRDs(ctx context.Context, chart *chart.Chart) error for _, dep := range chart.Dependencies() { for _, crdFile := range dep.Files { if strings.HasPrefix(crdFile.Name, "crds/") { - a.log.Debugf("Updating crd: %s", crdFile.Name) + a.log.Debug(fmt.Sprintf("Updating crd: %s", crdFile.Name)) err := a.kubeClient.ApplyCRD(ctx, crdFile.Data) if err != nil { return err diff --git a/internal/constellation/helm/helm.go b/internal/constellation/helm/helm.go index 990ec2b2b2..ab04382149 100644 --- a/internal/constellation/helm/helm.go +++ b/internal/constellation/helm/helm.go @@ -53,7 +53,7 @@ const ( ) type debugLog interface { - Debugf(format string, args ...any) + Debug(msg string, args ...any) } // Client is a Helm client to apply charts. @@ -102,7 +102,7 @@ func (h Client) PrepareApply( return nil, false, fmt.Errorf("loading Helm releases: %w", err) } - h.log.Debugf("Loaded Helm releases") + h.log.Debug("Loaded Helm releases") actions, includesUpgrades, err := h.factory.GetActions( releases, flags.MicroserviceVersion, flags.Force, flags.AllowDestructive, flags.ApplyTimeout, ) @@ -114,7 +114,7 @@ func (h Client) loadReleases( stateFile *state.State, flags Options, serviceAccURI string, openStackCfg *config.OpenStackConfig, ) ([]release, error) { helmLoader := newLoader(csp, attestationVariant, k8sVersion, stateFile, h.cliVersion) - h.log.Debugf("Created new Helm loader") + h.log.Debug("Created new Helm loader") return helmLoader.loadReleases(flags.Conformance, flags.DeployCSIDriver, flags.HelmWaitMode, secret, serviceAccURI, openStackCfg) } @@ -133,7 +133,7 @@ type ChartApplyExecutor struct { // Apply applies the charts in order. func (c ChartApplyExecutor) Apply(ctx context.Context) error { for _, action := range c.actions { - c.log.Debugf("Applying %q", action.ReleaseName()) + c.log.Debug(fmt.Sprintf("Applying %q", action.ReleaseName())) if err := action.Apply(ctx); err != nil { return fmt.Errorf("applying %s: %w", action.ReleaseName(), err) } diff --git a/internal/constellation/helm/retryaction.go b/internal/constellation/helm/retryaction.go index ca61944b09..32e54b7463 100644 --- a/internal/constellation/helm/retryaction.go +++ b/internal/constellation/helm/retryaction.go @@ -49,7 +49,7 @@ func retryApply(ctx context.Context, action retrieableApplier, retryInterval tim return fmt.Errorf("helm install: %w", err) } retryLoopFinishDuration := time.Since(retryLoopStartTime) - log.Debugf("Helm chart %q installation finished after %s", action.ReleaseName(), retryLoopFinishDuration) + log.Debug(fmt.Sprintf("Helm chart %q installation finished after %s", action.ReleaseName(), retryLoopFinishDuration)) return nil } @@ -61,9 +61,9 @@ type applyDoer struct { // Do tries to apply the action. func (i applyDoer) Do(ctx context.Context) error { - i.log.Debugf("Trying to apply Helm chart %s", i.applier.ReleaseName()) + i.log.Debug(fmt.Sprintf("Trying to apply Helm chart %s", i.applier.ReleaseName())) if err := i.applier.apply(ctx); err != nil { - i.log.Debugf("Helm chart installation %s failed: %v", i.applier.ReleaseName(), err) + i.log.Debug(fmt.Sprintf("Helm chart installation %s failed: %v", i.applier.ReleaseName(), err)) return err } diff --git a/internal/constellation/kubecmd/backup.go b/internal/constellation/kubecmd/backup.go index 2a396da8bb..c7e32d5be8 100644 --- a/internal/constellation/kubecmd/backup.go +++ b/internal/constellation/kubecmd/backup.go @@ -26,7 +26,7 @@ type crdLister interface { // BackupCRDs backs up all CRDs to the upgrade workspace. func (k *KubeCmd) BackupCRDs(ctx context.Context, fileHandler file.Handler, upgradeDir string) ([]apiextensionsv1.CustomResourceDefinition, error) { - k.log.Debugf("Starting CRD backup") + k.log.Debug("Starting CRD backup") crds, err := k.kubectl.ListCRDs(ctx) if err != nil { return nil, fmt.Errorf("getting CRDs: %w", err) @@ -39,7 +39,7 @@ func (k *KubeCmd) BackupCRDs(ctx context.Context, fileHandler file.Handler, upgr for i := range crds { path := filepath.Join(crdBackupFolder, crds[i].Name+".yaml") - k.log.Debugf("Creating CRD backup: %s", path) + k.log.Debug(fmt.Sprintf("Creating CRD backup: %s", path)) // We have to manually set kind/apiversion because of a long-standing limitation of the API: // https://github.com/kubernetes/kubernetes/issues/3030#issuecomment-67543738 @@ -56,15 +56,15 @@ func (k *KubeCmd) BackupCRDs(ctx context.Context, fileHandler file.Handler, upgr return nil, err } } - k.log.Debugf("CRD backup complete") + k.log.Debug("CRD backup complete") return crds, nil } // BackupCRs backs up all CRs to the upgrade workspace. func (k *KubeCmd) BackupCRs(ctx context.Context, fileHandler file.Handler, crds []apiextensionsv1.CustomResourceDefinition, upgradeDir string) error { - k.log.Debugf("Starting CR backup") + k.log.Debug("Starting CR backup") for _, crd := range crds { - k.log.Debugf("Creating backup for resource type: %s", crd.Name) + k.log.Debug(fmt.Sprintf("Creating backup for resource type: %s", crd.Name)) // Iterate over all versions of the CRD // TODO(daniel-weisse): Consider iterating over crd.Status.StoredVersions instead @@ -72,7 +72,7 @@ func (k *KubeCmd) BackupCRs(ctx context.Context, fileHandler file.Handler, crds // a version that is not installed in the cluster. // With the StoredVersions field, we could only iterate over the installed versions. for _, version := range crd.Spec.Versions { - k.log.Debugf("Creating backup of CRs for %q at version %q", crd.Name, version.Name) + k.log.Debug(fmt.Sprintf("Creating backup of CRs for %q at version %q", crd.Name, version.Name)) gvr := schema.GroupVersionResource{Group: crd.Spec.Group, Version: version.Name, Resource: crd.Spec.Names.Plural} crs, err := k.kubectl.ListCRs(ctx, gvr) @@ -80,7 +80,7 @@ func (k *KubeCmd) BackupCRs(ctx context.Context, fileHandler file.Handler, crds if !k8serrors.IsNotFound(err) { return fmt.Errorf("retrieving CR %s: %w", crd.Name, err) } - k.log.Debugf("No CRs found for %q at version %q, skipping...", crd.Name, version.Name) + k.log.Debug(fmt.Sprintf("No CRs found for %q at version %q, skipping...", crd.Name, version.Name)) continue } @@ -101,9 +101,9 @@ func (k *KubeCmd) BackupCRs(ctx context.Context, fileHandler file.Handler, crds } } - k.log.Debugf("Backup for resource type %q complete", crd.Name) + k.log.Debug(fmt.Sprintf("Backup for resource type %q complete", crd.Name)) } - k.log.Debugf("CR backup complete") + k.log.Debug("CR backup complete") return nil } diff --git a/internal/constellation/kubecmd/backup_test.go b/internal/constellation/kubecmd/backup_test.go index 21a3dc65b6..a95c26be58 100644 --- a/internal/constellation/kubecmd/backup_test.go +++ b/internal/constellation/kubecmd/backup_test.go @@ -166,8 +166,7 @@ func TestBackupCRs(t *testing.T) { type stubLog struct{} -func (s stubLog) Debugf(_ string, _ ...any) {} -func (s stubLog) Sync() {} +func (s stubLog) Debug(_ string, _ ...any) {} func (c stubKubectl) ListCRDs(_ context.Context) ([]apiextensionsv1.CustomResourceDefinition, error) { if c.getCRDsError != nil { diff --git a/internal/constellation/kubecmd/kubecmd.go b/internal/constellation/kubecmd/kubecmd.go index d2ea0d7f4e..dedb4539be 100644 --- a/internal/constellation/kubecmd/kubecmd.go +++ b/internal/constellation/kubecmd/kubecmd.go @@ -93,7 +93,7 @@ func (k *KubeCmd) UpgradeNodeImage(ctx context.Context, imageVersion semver.Semv return err } - k.log.Debugf("Checking if image upgrade is valid") + k.log.Debug("Checking if image upgrade is valid") var upgradeErr *compatibility.InvalidUpgradeError err = k.isValidImageUpgrade(nodeVersion, imageVersion.String(), force) switch { @@ -103,7 +103,7 @@ func (k *KubeCmd) UpgradeNodeImage(ctx context.Context, imageVersion semver.Semv return fmt.Errorf("updating image version: %w", err) } - k.log.Debugf("Updating local copy of nodeVersion image version from %s to %s", nodeVersion.Spec.ImageVersion, imageVersion.String()) + k.log.Debug(fmt.Sprintf("Updating local copy of nodeVersion image version from %s to %s", nodeVersion.Spec.ImageVersion, imageVersion.String())) nodeVersion.Spec.ImageReference = imageReference nodeVersion.Spec.ImageVersion = imageVersion.String() @@ -214,20 +214,20 @@ func (k *KubeCmd) ApplyJoinConfig(ctx context.Context, newAttestConfig config.At return fmt.Errorf("getting %s ConfigMap: %w", constants.JoinConfigMap, err) } - k.log.Debugf("ConfigMap %q does not exist in namespace %q, creating it now", constants.JoinConfigMap, constants.ConstellationNamespace) + k.log.Debug(fmt.Sprintf("ConfigMap %q does not exist in namespace %q, creating it now", constants.JoinConfigMap, constants.ConstellationNamespace)) if err := retryAction(ctx, k.retryInterval, maxRetryAttempts, func(ctx context.Context) error { return k.kubectl.CreateConfigMap(ctx, joinConfigMap(newConfigJSON, measurementSalt)) }, k.log); err != nil { return fmt.Errorf("creating join-config ConfigMap: %w", err) } - k.log.Debugf("Created %q ConfigMap in namespace %q", constants.JoinConfigMap, constants.ConstellationNamespace) + k.log.Debug(fmt.Sprintf("Created %q ConfigMap in namespace %q", constants.JoinConfigMap, constants.ConstellationNamespace)) return nil } // create backup of previous config joinConfig.Data[constants.AttestationConfigFilename+"_backup"] = joinConfig.Data[constants.AttestationConfigFilename] joinConfig.Data[constants.AttestationConfigFilename] = string(newConfigJSON) - k.log.Debugf("Triggering attestation config update now") + k.log.Debug("Triggering attestation config update now") if err := retryAction(ctx, k.retryInterval, maxRetryAttempts, func(ctx context.Context) error { _, err = k.kubectl.UpdateConfigMap(ctx, joinConfig) return err @@ -263,10 +263,10 @@ func (k *KubeCmd) ExtendClusterConfigCertSANs(ctx context.Context, alternativeNa } if len(missingSANs) == 0 { - k.log.Debugf("No new SANs to add to the cluster's apiserver SAN field") + k.log.Debug("No new SANs to add to the cluster's apiserver SAN field") return nil } - k.log.Debugf("Extending the cluster's apiserver SAN field with the following SANs: %s\n", strings.Join(missingSANs, ", ")) + k.log.Debug(fmt.Sprintf("Extending the cluster's apiserver SAN field with the following SANs: %s\n", strings.Join(missingSANs, ", "))) clusterConfiguration.APIServer.CertSANs = append(clusterConfiguration.APIServer.CertSANs, missingSANs...) sort.Strings(clusterConfiguration.APIServer.CertSANs) @@ -277,12 +277,12 @@ func (k *KubeCmd) ExtendClusterConfigCertSANs(ctx context.Context, alternativeNa } kubeadmConfig.Data[constants.ClusterConfigurationKey] = string(newConfigYAML) - k.log.Debugf("Triggering kubeadm config update now") + k.log.Debug("Triggering kubeadm config update now") if _, err = k.kubectl.UpdateConfigMap(ctx, kubeadmConfig); err != nil { return fmt.Errorf("setting new kubeadm config: %w", err) } - k.log.Debugf("Successfully extended the cluster's apiserver SAN field") + k.log.Debug("Successfully extended the cluster's apiserver SAN field") return nil } @@ -345,7 +345,7 @@ func (k *KubeCmd) applyComponentsCM(ctx context.Context, components *corev1.Conf } func (k *KubeCmd) applyNodeVersion(ctx context.Context, nodeVersion updatev1alpha1.NodeVersion) (updatev1alpha1.NodeVersion, error) { - k.log.Debugf("Triggering NodeVersion upgrade now") + k.log.Debug("Triggering NodeVersion upgrade now") var updatedNodeVersion updatev1alpha1.NodeVersion err := retry.RetryOnConflict(retry.DefaultBackoff, func() error { newNode, err := k.getConstellationVersion(ctx) @@ -409,7 +409,7 @@ func (k *KubeCmd) prepareUpdateK8s(nodeVersion *updatev1alpha1.NodeVersion, newC } } - k.log.Debugf("Updating local copy of nodeVersion Kubernetes version from %s to %s", nodeVersion.Spec.KubernetesClusterVersion, newClusterVersion) + k.log.Debug(fmt.Sprintf("Updating local copy of nodeVersion Kubernetes version from %s to %s", nodeVersion.Spec.KubernetesClusterVersion, newClusterVersion)) nodeVersion.Spec.KubernetesComponentsReference = configMap.ObjectMeta.Name nodeVersion.Spec.KubernetesClusterVersion = newClusterVersion @@ -461,7 +461,7 @@ func retryGetJoinConfig(ctx context.Context, kubectl kubectlInterface, retryInte return false } retries++ - log.Debugf("Getting join-config ConfigMap failed (attempt %d/%d): %s", retries, maxRetryAttempts, err) + log.Debug(fmt.Sprintf("Getting join-config ConfigMap failed (attempt %d/%d): %s", retries, maxRetryAttempts, err)) return retries < maxRetryAttempts } @@ -483,7 +483,7 @@ func retryAction(ctx context.Context, retryInterval time.Duration, maxRetries in ctr := 0 retrier := conretry.NewIntervalRetrier(&kubeDoer{action: action}, retryInterval, func(err error) bool { ctr++ - log.Debugf("Action failed (attempt %d/%d): %s", ctr, maxRetries, err) + log.Debug(fmt.Sprintf("Action failed (attempt %d/%d): %s", ctr, maxRetries, err)) return ctr < maxRetries }) return retrier.Do(ctx) @@ -502,5 +502,5 @@ type kubectlInterface interface { } type debugLog interface { - Debugf(format string, args ...any) + Debug(msg string, args ...any) } diff --git a/internal/grpc/grpclog/grpclog.go b/internal/grpc/grpclog/grpclog.go index aceb4af46d..e29c990b6a 100644 --- a/internal/grpc/grpclog/grpclog.go +++ b/internal/grpc/grpclog/grpclog.go @@ -9,6 +9,7 @@ package grpclog import ( "context" + "fmt" "sync" "google.golang.org/grpc/connectivity" @@ -30,15 +31,15 @@ func LogStateChangesUntilReady(ctx context.Context, conn getStater, log debugLog go func() { defer wg.Done() state := conn.GetState() - log.Debugf("Connection state started as %s", state) + log.Debug(fmt.Sprintf("Connection state started as %s", state)) for ; state != connectivity.Ready && conn.WaitForStateChange(ctx, state); state = conn.GetState() { - log.Debugf("Connection state changed to %s", state) + log.Debug(fmt.Sprintf("Connection state changed to %s", state)) } if state == connectivity.Ready { - log.Debugf("Connection ready") + log.Debug("Connection ready") isReadyCallback() } else { - log.Debugf("Connection state ended with %s", state) + log.Debug(fmt.Sprintf("Connection state ended with %s", state)) } }() } @@ -49,5 +50,5 @@ type getStater interface { } type debugLog interface { - Debugf(format string, args ...any) + Debug(msg string, args ...any) } diff --git a/internal/grpc/grpclog/grpclog_test.go b/internal/grpc/grpclog/grpclog_test.go index 7460de7d9c..704f1a9237 100644 --- a/internal/grpc/grpclog/grpclog_test.go +++ b/internal/grpc/grpclog/grpclog_test.go @@ -9,7 +9,6 @@ package grpclog import ( "context" - "fmt" "sync" "testing" @@ -88,8 +87,8 @@ type spyLog struct { msgs []string } -func (f *spyLog) Debugf(format string, args ...any) { - f.msgs = append(f.msgs, fmt.Sprintf(format, args...)) +func (f *spyLog) Debug(msg string, _ ...any) { + f.msgs = append(f.msgs, msg) } type stubConn struct { diff --git a/internal/logger/BUILD.bazel b/internal/logger/BUILD.bazel index f1b95ba193..4b8daad967 100644 --- a/internal/logger/BUILD.bazel +++ b/internal/logger/BUILD.bazel @@ -5,6 +5,7 @@ go_library( srcs = [ "cmdline.go", "grpclogger.go", + "levelhandler.go", "log.go", ], importpath = "github.com/edgelesssys/constellation/v2/internal/logger", @@ -13,8 +14,5 @@ go_library( "@com_github_grpc_ecosystem_go_grpc_middleware_v2//interceptors/logging", "@org_golang_google_grpc//:go_default_library", "@org_golang_google_grpc//grpclog", - "@org_uber_go_zap//:zap", - "@org_uber_go_zap//zapcore", - "@org_uber_go_zap//zaptest", ], ) diff --git a/internal/logger/cmdline.go b/internal/logger/cmdline.go index 4957e05cad..f03b773b06 100644 --- a/internal/logger/cmdline.go +++ b/internal/logger/cmdline.go @@ -7,25 +7,24 @@ SPDX-License-Identifier: AGPL-3.0-only package logger import ( - "go.uber.org/zap" - "go.uber.org/zap/zapcore" + "log/slog" ) // CmdLineVerbosityDescription explains numeric log levels. -const CmdLineVerbosityDescription = "log verbosity in zap logging levels. Use -1 for debug information, 0 for info, 1 for warn, 2 for error" +const CmdLineVerbosityDescription = "log verbosity: Use -1 for debug information, 0 for info, 1 for warn, 2 for error" -// VerbosityFromInt converts a verbosity level from an integer to a zapcore.Level. -func VerbosityFromInt(verbosity int) zapcore.Level { +// VerbosityFromInt converts a verbosity level from an integer to a slog.Level. +func VerbosityFromInt(verbosity int) slog.Level { switch { case verbosity <= -1: - return zap.DebugLevel + return slog.LevelDebug case verbosity == 0: - return zap.InfoLevel + return slog.LevelInfo case verbosity == 1: - return zap.WarnLevel + return slog.LevelWarn case verbosity >= 2: - return zap.ErrorLevel + return slog.LevelError default: - return zap.InfoLevel + return slog.LevelInfo } } diff --git a/internal/logger/grpclogger.go b/internal/logger/grpclogger.go index 3381df30ad..716b0e495f 100644 --- a/internal/logger/grpclogger.go +++ b/internal/logger/grpclogger.go @@ -7,71 +7,92 @@ SPDX-License-Identifier: AGPL-3.0-only package logger import ( + "context" "fmt" + "log/slog" + "os" + "runtime" + "time" - "go.uber.org/zap" "google.golang.org/grpc/grpclog" ) -func replaceGRPCLogger(log *zap.Logger) { +func replaceGRPCLogger(log *slog.Logger) { gl := &grpcLogger{ - logger: log.With(zap.String("system", "grpc"), zap.Bool("grpc_log", true)).WithOptions(zap.AddCallerSkip(2)), + logger: log.With(slog.String("system", "grpc"), slog.Bool("grpc_log", true)), verbosity: 0, } grpclog.SetLoggerV2(gl) } +func (l *grpcLogger) log(level slog.Level, args ...interface{}) { + var pcs [1]uintptr + runtime.Callers(3, pcs[:]) + r := slog.NewRecord(time.Now(), level, fmt.Sprint(args...), pcs[0]) + _ = l.logger.Handler().Handle(context.Background(), r) +} + +func (l *grpcLogger) logf(level slog.Level, format string, args ...interface{}) { + var pcs [1]uintptr + runtime.Callers(3, pcs[:]) + r := slog.NewRecord(time.Now(), level, fmt.Sprintf(format, args...), pcs[0]) + _ = l.logger.Handler().Handle(context.Background(), r) +} + type grpcLogger struct { - logger *zap.Logger + logger *slog.Logger verbosity int } func (l *grpcLogger) Info(args ...interface{}) { - l.logger.Info(fmt.Sprint(args...)) + l.log(slog.LevelInfo, args...) } func (l *grpcLogger) Infoln(args ...interface{}) { - l.logger.Info(fmt.Sprint(args...)) + l.log(slog.LevelInfo, args...) } func (l *grpcLogger) Infof(format string, args ...interface{}) { - l.logger.Info(fmt.Sprintf(format, args...)) + l.logf(slog.LevelInfo, format, args...) } func (l *grpcLogger) Warning(args ...interface{}) { - l.logger.Warn(fmt.Sprint(args...)) + l.log(slog.LevelWarn, args...) } func (l *grpcLogger) Warningln(args ...interface{}) { - l.logger.Warn(fmt.Sprint(args...)) + l.log(slog.LevelWarn, args...) } func (l *grpcLogger) Warningf(format string, args ...interface{}) { - l.logger.Warn(fmt.Sprintf(format, args...)) + l.logf(slog.LevelWarn, format, args...) } func (l *grpcLogger) Error(args ...interface{}) { - l.logger.Error(fmt.Sprint(args...)) + l.log(slog.LevelError, args...) } func (l *grpcLogger) Errorln(args ...interface{}) { - l.logger.Error(fmt.Sprint(args...)) + l.log(slog.LevelError, args...) } func (l *grpcLogger) Errorf(format string, args ...interface{}) { - l.logger.Error(fmt.Sprintf(format, args...)) + l.logf(slog.LevelError, format, args...) } func (l *grpcLogger) Fatal(args ...interface{}) { - l.logger.Fatal(fmt.Sprint(args...)) + l.log(slog.LevelError, args...) + os.Exit(1) } func (l *grpcLogger) Fatalln(args ...interface{}) { - l.logger.Fatal(fmt.Sprint(args...)) + l.log(slog.LevelError, args...) + os.Exit(1) } func (l *grpcLogger) Fatalf(format string, args ...interface{}) { - l.logger.Fatal(fmt.Sprintf(format, args...)) + l.logf(slog.LevelError, format, args...) + os.Exit(1) } func (l *grpcLogger) V(level int) bool { diff --git a/internal/logger/levelhandler.go b/internal/logger/levelhandler.go new file mode 100644 index 0000000000..f0e4e15441 --- /dev/null +++ b/internal/logger/levelhandler.go @@ -0,0 +1,57 @@ +/* +Copyright (c) Edgeless Systems GmbH + +SPDX-License-Identifier: AGPL-3.0-only +*/ + +package logger + +import ( + "context" + "log/slog" +) + +// LevelHandler copied from the official LevelHandler example in the slog package documentation. + +// LevelHandler wraps a Handler with an Enabled method +// that returns false for levels below a minimum. +type LevelHandler struct { + level slog.Leveler + handler slog.Handler +} + +// NewLevelHandler returns a LevelHandler with the given level. +// All methods except Enabled delegate to h. +func NewLevelHandler(level slog.Leveler, h slog.Handler) *LevelHandler { + // Optimization: avoid chains of LevelHandlers. + if lh, ok := h.(*LevelHandler); ok { + h = lh.Handler() + } + return &LevelHandler{level, h} +} + +// Enabled implements Handler.Enabled by reporting whether +// level is at least as large as h's level. +func (h *LevelHandler) Enabled(_ context.Context, level slog.Level) bool { + return level >= h.level.Level() +} + +// Handle implements Handler.Handle. +func (h *LevelHandler) Handle(ctx context.Context, r slog.Record) error { + return h.handler.Handle(ctx, r) +} + +// WithAttrs implements Handler.WithAttrs. +func (h *LevelHandler) WithAttrs(attrs []slog.Attr) slog.Handler { + return NewLevelHandler(h.level, h.handler.WithAttrs(attrs)) +} + +// WithGroup implements Handler.WithGroup. +func (h *LevelHandler) WithGroup(name string) slog.Handler { + return NewLevelHandler(h.level, h.handler.WithGroup(name)) +} + +// Handler returns the Handler wrapped by h. +func (h *LevelHandler) Handler() slog.Handler { + return h.handler +} diff --git a/internal/logger/log.go b/internal/logger/log.go index 55a9cb9c1e..26c891009c 100644 --- a/internal/logger/log.go +++ b/internal/logger/log.go @@ -5,223 +5,118 @@ SPDX-License-Identifier: AGPL-3.0-only */ /* -Package logger provides logging functionality for Constellation services. -It is a thin wrapper around the zap package, providing a consistent interface for logging. -Use this package to implement logging for your Constellation services. +Package logger provides helper functions that can be used in combination with slog to increase functionality or make +working with slog easier. -# Usage +1. Logging in unit tests -1. Create a logger using New(). +To log in unit tests you can create a new slog logger that uses logger.testWriter as its writer. This can be constructed +by creating a logger like this: `logger.NewTest(t)`. -2. Defer the Sync() method to ensure that all log entries are flushed. +2. Creating a new logger with an increased log level based on another logger -3. Use the Debugf(), Infof(), Warnf(), Errorf(), and Fatalf() methods depending on the level of logging you need. +You can create a new logger with a new log level by creating a new slog.Logger with the LevelHandler in this package +and passing the handler of the other logger. As an example, if you have a slog.Logger named `log` you can create a +new logger with an increased log level (here slog.LevelWarn) like this: -4. Use the Named() method to create a named child logger. - -5. Use the With() method to create a child logger with structured context. -This can also be used to add context to a single log message: - - logger.With(zap.String("key", "value")).Infof("log message") - -# Log Levels - -Use [Logger.Debugf] to log low level and detailed information that is useful for debugging. - -Use [Logger.Infof] to log general information. This method is correct for most logging purposes. - -Use [Logger.Warnf] to log information that may indicate unwanted behavior, but is not an error. - -Use [Logger.Errorf] to log information about any errors that occurred. - -Use [Logger.Fatalf] to log information about any errors that occurred and then exit the program. + slog.New(logger.NewLevelHandler(slog.LevelWarn, log.Handler())) */ package logger import ( "context" - "fmt" + "log/slog" "os" + "runtime" "testing" + "time" "github.com/grpc-ecosystem/go-grpc-middleware/v2/interceptors/logging" - "go.uber.org/zap" - "go.uber.org/zap/zapcore" - "go.uber.org/zap/zaptest" "google.golang.org/grpc" ) -// LogType indicates the output encoding of the log. -type LogType int - -const ( - // JSONLog encodes logs in JSON format. - JSONLog LogType = iota - // PlainLog encodes logs as human readable text. - PlainLog -) - -// Logger is a wrapper for zap logger. -// The purpose is to provide a simple interface for logging with sensible defaults. -type Logger struct { - logger *zap.SugaredLogger -} - -// New creates a new Logger. -// Set name to an empty string to create an unnamed logger. -func New(logType LogType, logLevel zapcore.Level) *Logger { - encoderCfg := zap.NewProductionEncoderConfig() - encoderCfg.StacktraceKey = zapcore.OmitKey - encoderCfg.EncodeLevel = zapcore.CapitalLevelEncoder - encoderCfg.EncodeTime = zapcore.RFC3339TimeEncoder - - var encoder zapcore.Encoder - if logType == PlainLog { - encoder = zapcore.NewConsoleEncoder(encoderCfg) - } else { - encoder = zapcore.NewJSONEncoder(encoderCfg) - } - - logCore := zapcore.NewCore(encoder, zapcore.Lock(os.Stderr), zap.NewAtomicLevelAt(logLevel)) - - logger := zap.New( - logCore, - zap.AddCaller(), // add the file and line number of the logging call - zap.AddCallerSkip(1), // skip the first caller so that we don't only see this package as the caller - ) - - return &Logger{logger: logger.Sugar()} -} - -// NewTest creates a logger for unit / integration tests. -func NewTest(t *testing.T) *Logger { - return &Logger{ - logger: zaptest.NewLogger(t).Sugar().Named(fmt.Sprintf("%q", t.Name())), - } -} - -// Debugf logs a message at Debug level. -// Debug logs are typically voluminous, and contain detailed information on the flow of execution. -func (l *Logger) Debugf(format string, args ...any) { - l.logger.Debugf(format, args...) -} - -// Infof logs a message at Info level. -// This is the default logging priority and should be used for all normal messages. -func (l *Logger) Infof(format string, args ...any) { - l.logger.Infof(format, args...) -} - -// Warnf logs a message at Warn level. -// Warn logs are more important than Info, but they don't need human review or necessarily indicate an error. -func (l *Logger) Warnf(format string, args ...any) { - l.logger.Warnf(format, args...) -} - -// Errorf logs a message at Error level. -// Error logs are high priority and indicate something has gone wrong. -func (l *Logger) Errorf(format string, args ...any) { - l.logger.Errorf(format, args...) -} - -// Fatalf logs the message and then calls os.Exit(1). -// Use this to exit your program when a fatal error occurs. -func (l *Logger) Fatalf(format string, args ...any) { - l.logger.Fatalf(format, args...) -} - -// Sync flushes any buffered log entries. -// Applications should take care to call Sync before exiting. -func (l *Logger) Sync() { - _ = l.logger.Sync() -} - -// WithIncreasedLevel returns a logger with increased logging level. -func (l *Logger) WithIncreasedLevel(level zapcore.Level) *Logger { - return &Logger{logger: l.getZapLogger().WithOptions(zap.IncreaseLevel(level)).Sugar()} -} - -// With returns a logger with structured context. -func (l *Logger) With(fields ...any) *Logger { - return &Logger{logger: l.logger.With(fields...)} -} - -// Named returns a named logger. -func (l *Logger) Named(name string) *Logger { - return &Logger{logger: l.logger.Named(name)} -} - // ReplaceGRPCLogger replaces grpc's internal logger with the given logger. -func (l *Logger) ReplaceGRPCLogger() { - replaceGRPCLogger(l.getZapLogger()) +func ReplaceGRPCLogger(l *slog.Logger) { + replaceGRPCLogger(l) } // GetServerUnaryInterceptor returns a gRPC server option for intercepting unary gRPC logs. -func (l *Logger) GetServerUnaryInterceptor() grpc.ServerOption { +func GetServerUnaryInterceptor(l *slog.Logger) grpc.ServerOption { return grpc.UnaryInterceptor( - logging.UnaryServerInterceptor(l.middlewareLogger()), + logging.UnaryServerInterceptor(middlewareLogger(l)), ) } // GetServerStreamInterceptor returns a gRPC server option for intercepting streaming gRPC logs. -func (l *Logger) GetServerStreamInterceptor() grpc.ServerOption { +func GetServerStreamInterceptor(l *slog.Logger) grpc.ServerOption { return grpc.StreamInterceptor( - logging.StreamServerInterceptor(l.middlewareLogger()), + logging.StreamServerInterceptor(middlewareLogger(l)), ) } // GetClientUnaryInterceptor returns a gRPC client option for intercepting unary gRPC logs. -func (l *Logger) GetClientUnaryInterceptor() grpc.DialOption { +func GetClientUnaryInterceptor(l *slog.Logger) grpc.DialOption { return grpc.WithUnaryInterceptor( - logging.UnaryClientInterceptor(l.middlewareLogger()), + logging.UnaryClientInterceptor(middlewareLogger(l)), ) } // GetClientStreamInterceptor returns a gRPC client option for intercepting stream gRPC logs. -func (l *Logger) GetClientStreamInterceptor() grpc.DialOption { +func GetClientStreamInterceptor(l *slog.Logger) grpc.DialOption { return grpc.WithStreamInterceptor( - logging.StreamClientInterceptor(l.middlewareLogger()), + logging.StreamClientInterceptor(middlewareLogger(l)), ) } -// getZapLogger returns the underlying zap logger. -func (l *Logger) getZapLogger() *zap.Logger { - return l.logger.Desugar() -} - -func (l *Logger) middlewareLogger() logging.Logger { +func middlewareLogger(l *slog.Logger) logging.Logger { return logging.LoggerFunc(func(ctx context.Context, lvl logging.Level, msg string, fields ...any) { - f := make([]zap.Field, 0, len(fields)/2) - - for i := 0; i < len(fields); i += 2 { - key := fields[i] - value := fields[i+1] - - switch v := value.(type) { - case string: - f = append(f, zap.String(key.(string), v)) - case int: - f = append(f, zap.Int(key.(string), v)) - case bool: - f = append(f, zap.Bool(key.(string), v)) - default: - f = append(f, zap.Any(key.(string), v)) - } - } - - logger := l.getZapLogger().WithOptions(zap.AddCallerSkip(1)).With(f...) + var pcs [1]uintptr + runtime.Callers(2, pcs[:]) // skip [Callers, LoggerFunc] + level := slog.LevelDebug switch lvl { case logging.LevelDebug: - logger.Debug(msg) + break case logging.LevelInfo: - logger.Info(msg) + level = slog.LevelInfo case logging.LevelWarn: - logger.Warn(msg) + level = slog.LevelWarn case logging.LevelError: - logger.Error(msg) + level = slog.LevelError default: - panic(fmt.Sprintf("unknown level %v", lvl)) + level = slog.LevelError } + + r := slog.NewRecord(time.Now(), level, msg, pcs[0]) + r.Add(fields...) + _ = l.Handler().Handle(context.Background(), r) }) } + +// NewTextLogger creates a new slog.Logger that writes text formatted log messages +// to os.Stderr. +func NewTextLogger(level slog.Level) *slog.Logger { + return slog.New(slog.NewTextHandler(os.Stderr, &slog.HandlerOptions{AddSource: true, Level: level})) +} + +// NewJSONLogger creates a new slog.Logger that writes JSON formatted log messages +// to os.Stderr. +func NewJSONLogger(level slog.Level) *slog.Logger { + return slog.New(slog.NewJSONHandler(os.Stderr, &slog.HandlerOptions{AddSource: true, Level: level})) +} + +// NewTest creates a new slog.Logger that writes to a testing.T. +func NewTest(t *testing.T) *slog.Logger { + return slog.New(slog.NewTextHandler(testWriter{t: t}, &slog.HandlerOptions{AddSource: true})) +} + +// TestWriter is a writer to a testing.T used in tests for logging with slog. +type testWriter struct { + t *testing.T +} + +func (t testWriter) Write(p []byte) (int, error) { + t.t.Helper() + t.t.Log(string(p)) + return len(p), nil +} diff --git a/internal/osimage/archive/BUILD.bazel b/internal/osimage/archive/BUILD.bazel index 122ceef0cb..4f90c37e4c 100644 --- a/internal/osimage/archive/BUILD.bazel +++ b/internal/osimage/archive/BUILD.bazel @@ -8,7 +8,6 @@ go_library( deps = [ "//internal/api/versionsapi", "//internal/constants", - "//internal/logger", "//internal/staticupload", "@com_github_aws_aws_sdk_go_v2_feature_s3_manager//:manager", "@com_github_aws_aws_sdk_go_v2_service_s3//:s3", diff --git a/internal/osimage/archive/archive.go b/internal/osimage/archive/archive.go index b31f202027..f42b48e4c1 100644 --- a/internal/osimage/archive/archive.go +++ b/internal/osimage/archive/archive.go @@ -9,7 +9,9 @@ package archive import ( "context" + "fmt" "io" + "log/slog" "net/url" "time" @@ -18,7 +20,6 @@ import ( s3types "github.com/aws/aws-sdk-go-v2/service/s3/types" "github.com/edgelesssys/constellation/v2/internal/api/versionsapi" "github.com/edgelesssys/constellation/v2/internal/constants" - "github.com/edgelesssys/constellation/v2/internal/logger" "github.com/edgelesssys/constellation/v2/internal/staticupload" ) @@ -29,11 +30,11 @@ type Archivist struct { // bucket is the name of the S3 bucket to use. bucket string - log *logger.Logger + log *slog.Logger } // New creates a new Archivist. -func New(ctx context.Context, region, bucket, distributionID string, log *logger.Logger) (*Archivist, CloseFunc, error) { +func New(ctx context.Context, region, bucket, distributionID string, log *slog.Logger) (*Archivist, CloseFunc, error) { staticUploadClient, staticUploadClientClose, err := staticupload.New(ctx, staticupload.Config{ Region: region, Bucket: bucket, @@ -73,7 +74,7 @@ func (a *Archivist) Archive(ctx context.Context, version versionsapi.Version, cs if err != nil { return "", err } - a.log.Debugf("Archiving OS image %s %s %v to s3://%v/%v", csp, attestationVariant, version.ShortPath(), a.bucket, key) + a.log.Debug(fmt.Sprintf("Archiving OS image %s %s %v to s3://%v/%v", csp, attestationVariant, version.ShortPath(), a.bucket, key)) _, err = a.uploadClient.Upload(ctx, &s3.PutObjectInput{ Bucket: &a.bucket, Key: &key, diff --git a/internal/osimage/imageinfo/BUILD.bazel b/internal/osimage/imageinfo/BUILD.bazel index 7c9e5c386b..2a13b111ea 100644 --- a/internal/osimage/imageinfo/BUILD.bazel +++ b/internal/osimage/imageinfo/BUILD.bazel @@ -8,7 +8,6 @@ go_library( deps = [ "//internal/api/versionsapi", "//internal/constants", - "//internal/logger", "//internal/staticupload", "@com_github_aws_aws_sdk_go_v2_feature_s3_manager//:manager", "@com_github_aws_aws_sdk_go_v2_service_s3//:s3", diff --git a/internal/osimage/imageinfo/imageinfo.go b/internal/osimage/imageinfo/imageinfo.go index ed5bea857b..a26ab24a53 100644 --- a/internal/osimage/imageinfo/imageinfo.go +++ b/internal/osimage/imageinfo/imageinfo.go @@ -12,6 +12,7 @@ import ( "context" "encoding/json" "fmt" + "log/slog" "net/url" "time" @@ -20,7 +21,6 @@ import ( s3types "github.com/aws/aws-sdk-go-v2/service/s3/types" "github.com/edgelesssys/constellation/v2/internal/api/versionsapi" "github.com/edgelesssys/constellation/v2/internal/constants" - "github.com/edgelesssys/constellation/v2/internal/logger" "github.com/edgelesssys/constellation/v2/internal/staticupload" ) @@ -31,11 +31,11 @@ type Uploader struct { // bucket is the name of the S3 bucket to use. bucket string - log *logger.Logger + log *slog.Logger } // New creates a new Uploader. -func New(ctx context.Context, region, bucket, distributionID string, log *logger.Logger) (*Uploader, CloseFunc, error) { +func New(ctx context.Context, region, bucket, distributionID string, log *slog.Logger) (*Uploader, CloseFunc, error) { staticUploadClient, staticUploadClientClose, err := staticupload.New(ctx, staticupload.Config{ Region: region, Bucket: bucket, @@ -78,7 +78,7 @@ func (a *Uploader) Upload(ctx context.Context, imageInfo versionsapi.ImageInfo) if err != nil { return "", err } - a.log.Debugf("Archiving image info to s3://%v/%v", a.bucket, key) + a.log.Debug(fmt.Sprintf("Archiving image info to s3://%v/%v", a.bucket, key)) buf := &bytes.Buffer{} if err := json.NewEncoder(buf).Encode(imageInfo); err != nil { return "", err diff --git a/internal/osimage/measurementsuploader/BUILD.bazel b/internal/osimage/measurementsuploader/BUILD.bazel index 5ce43727b0..25183bf35f 100644 --- a/internal/osimage/measurementsuploader/BUILD.bazel +++ b/internal/osimage/measurementsuploader/BUILD.bazel @@ -9,7 +9,6 @@ go_library( "//internal/api/versionsapi", "//internal/attestation/measurements", "//internal/constants", - "//internal/logger", "//internal/staticupload", "@com_github_aws_aws_sdk_go_v2_feature_s3_manager//:manager", "@com_github_aws_aws_sdk_go_v2_service_s3//:s3", diff --git a/internal/osimage/measurementsuploader/measurementsuploader.go b/internal/osimage/measurementsuploader/measurementsuploader.go index ab2bcce7bd..1e6c9ffa05 100644 --- a/internal/osimage/measurementsuploader/measurementsuploader.go +++ b/internal/osimage/measurementsuploader/measurementsuploader.go @@ -12,6 +12,7 @@ import ( "encoding/json" "fmt" "io" + "log/slog" "net/url" "time" @@ -21,7 +22,6 @@ import ( "github.com/edgelesssys/constellation/v2/internal/api/versionsapi" "github.com/edgelesssys/constellation/v2/internal/attestation/measurements" "github.com/edgelesssys/constellation/v2/internal/constants" - "github.com/edgelesssys/constellation/v2/internal/logger" "github.com/edgelesssys/constellation/v2/internal/staticupload" ) @@ -32,11 +32,11 @@ type Uploader struct { // bucket is the name of the S3 bucket to use. bucket string - log *logger.Logger + log *slog.Logger } // New creates a new Uploader. -func New(ctx context.Context, region, bucket, distributionID string, log *logger.Logger) (*Uploader, CloseFunc, error) { +func New(ctx context.Context, region, bucket, distributionID string, log *slog.Logger) (*Uploader, CloseFunc, error) { staticUploadClient, staticUploadClientClose, err := staticupload.New(ctx, staticupload.Config{ Region: region, Bucket: bucket, @@ -92,7 +92,7 @@ func (a *Uploader) Upload(ctx context.Context, rawMeasurement, signature io.Read if err != nil { return "", "", err } - a.log.Debugf("Archiving image measurements to s3://%v/%v and s3://%v/%v", a.bucket, key, a.bucket, sigKey) + a.log.Debug(fmt.Sprintf("Archiving image measurements to s3://%v/%v and s3://%v/%v", a.bucket, key, a.bucket, sigKey)) if _, err = a.uploadClient.Upload(ctx, &s3.PutObjectInput{ Bucket: &a.bucket, Key: &key, diff --git a/internal/osimage/nop/BUILD.bazel b/internal/osimage/nop/BUILD.bazel index 6e5b5d9152..2dcd1bc549 100644 --- a/internal/osimage/nop/BUILD.bazel +++ b/internal/osimage/nop/BUILD.bazel @@ -7,7 +7,6 @@ go_library( visibility = ["//:__subpackages__"], deps = [ "//internal/api/versionsapi", - "//internal/logger", "//internal/osimage", ], ) diff --git a/internal/osimage/nop/nop.go b/internal/osimage/nop/nop.go index c329dd5eaf..5618acf038 100644 --- a/internal/osimage/nop/nop.go +++ b/internal/osimage/nop/nop.go @@ -9,24 +9,25 @@ package nop import ( "context" + "fmt" + "log/slog" "github.com/edgelesssys/constellation/v2/internal/api/versionsapi" - "github.com/edgelesssys/constellation/v2/internal/logger" "github.com/edgelesssys/constellation/v2/internal/osimage" ) // Uploader is a no-op uploader. type Uploader struct { - log *logger.Logger + log *slog.Logger } // New creates a new Uploader. -func New(log *logger.Logger) *Uploader { +func New(log *slog.Logger) *Uploader { return &Uploader{log: log} } // Upload pretends to upload images to a csp. func (u *Uploader) Upload(_ context.Context, req *osimage.UploadRequest) ([]versionsapi.ImageInfoEntry, error) { - u.log.Debugf("Skipping image upload of %s since this CSP does not require images to be uploaded in advance.", req.Version.ShortPath()) + u.log.Debug(fmt.Sprintf("Skipping image upload of %s since this CSP does not require images to be uploaded in advance.", req.Version.ShortPath())) return nil, nil } diff --git a/internal/osimage/uplosi/BUILD.bazel b/internal/osimage/uplosi/BUILD.bazel index ad50cd8653..bbd70e880f 100644 --- a/internal/osimage/uplosi/BUILD.bazel +++ b/internal/osimage/uplosi/BUILD.bazel @@ -9,7 +9,6 @@ go_library( deps = [ "//internal/api/versionsapi", "//internal/cloud/cloudprovider", - "//internal/logger", "//internal/osimage", "@com_github_burntsushi_toml//:toml", ], diff --git a/internal/osimage/uplosi/uplosiupload.go b/internal/osimage/uplosi/uplosiupload.go index 0ef421645f..1c8f9bd9f5 100644 --- a/internal/osimage/uplosi/uplosiupload.go +++ b/internal/osimage/uplosi/uplosiupload.go @@ -13,6 +13,7 @@ import ( _ "embed" "errors" "fmt" + "log/slog" "os" "os/exec" "path/filepath" @@ -23,7 +24,6 @@ import ( "github.com/BurntSushi/toml" "github.com/edgelesssys/constellation/v2/internal/api/versionsapi" "github.com/edgelesssys/constellation/v2/internal/cloud/cloudprovider" - "github.com/edgelesssys/constellation/v2/internal/logger" "github.com/edgelesssys/constellation/v2/internal/osimage" ) @@ -36,11 +36,11 @@ const timestampFormat = "20060102150405" type Uploader struct { uplosiPath string - log *logger.Logger + log *slog.Logger } // New creates a new Uploader. -func New(uplosiPath string, log *logger.Logger) *Uploader { +func New(uplosiPath string, log *slog.Logger) *Uploader { return &Uploader{ uplosiPath: uplosiPath, log: log, diff --git a/internal/staticupload/BUILD.bazel b/internal/staticupload/BUILD.bazel index 38134eb394..2e54630b65 100644 --- a/internal/staticupload/BUILD.bazel +++ b/internal/staticupload/BUILD.bazel @@ -13,7 +13,6 @@ go_library( visibility = ["//:__subpackages__"], deps = [ "//internal/constants", - "//internal/logger", "@com_github_aws_aws_sdk_go_v2_config//:config", "@com_github_aws_aws_sdk_go_v2_feature_s3_manager//:manager", "@com_github_aws_aws_sdk_go_v2_service_cloudfront//:cloudfront", diff --git a/internal/staticupload/staticupload.go b/internal/staticupload/staticupload.go index 8b4e807bf5..fd09734ad9 100644 --- a/internal/staticupload/staticupload.go +++ b/internal/staticupload/staticupload.go @@ -15,6 +15,7 @@ package staticupload import ( "context" "fmt" + "log/slog" "strings" "sync" "time" @@ -25,7 +26,6 @@ import ( cftypes "github.com/aws/aws-sdk-go-v2/service/cloudfront/types" "github.com/aws/aws-sdk-go-v2/service/s3" "github.com/edgelesssys/constellation/v2/internal/constants" - "github.com/edgelesssys/constellation/v2/internal/logger" "github.com/google/uuid" ) @@ -45,7 +45,7 @@ type Client struct { dirtyKeys []string // invalidationIDs is a list of invalidation IDs that are currently in progress. invalidationIDs []string - logger *logger.Logger + logger *slog.Logger } // Config is the configuration for the Client. @@ -101,7 +101,7 @@ func (e *InvalidationError) Unwrap() error { } // New creates a new Client. Call CloseFunc when done with operations. -func New(ctx context.Context, config Config, log *logger.Logger) (*Client, CloseFunc, error) { +func New(ctx context.Context, config Config, log *slog.Logger) (*Client, CloseFunc, error) { config.SetsDefault() cfg, err := awsconfig.LoadDefaultConfig(ctx, awsconfig.WithRegion(config.Region)) if err != nil { @@ -134,7 +134,7 @@ func (c *Client) Flush(ctx context.Context) error { c.mux.Lock() defer c.mux.Unlock() - c.logger.Debugf("Invalidating keys: %s", c.dirtyKeys) + c.logger.Debug(fmt.Sprintf("Invalidating keys: %s", c.dirtyKeys)) if len(c.dirtyKeys) == 0 { return nil } @@ -214,12 +214,12 @@ func (c *Client) invalidateCacheForKeys(ctx context.Context, keys []string) (str // waitForInvalidations waits for all invalidations to finish. func (c *Client) waitForInvalidations(ctx context.Context) error { if c.cacheInvalidationWaitTimeout == 0 { - c.logger.Warnf("cacheInvalidationWaitTimeout set to 0, not waiting for invalidations to finish") + c.logger.Warn("cacheInvalidationWaitTimeout set to 0, not waiting for invalidations to finish") return nil } waiter := cloudfront.NewInvalidationCompletedWaiter(c.cdnClient) - c.logger.Debugf("Waiting for invalidations %s in distribution %s", c.invalidationIDs, c.distributionID) + c.logger.Debug(fmt.Sprintf("Waiting for invalidations %s in distribution %s", c.invalidationIDs, c.distributionID)) for _, invalidationID := range c.invalidationIDs { waitIn := &cloudfront.GetInvalidationInput{ DistributionId: &c.distributionID, @@ -230,7 +230,7 @@ func (c *Client) waitForInvalidations(ctx context.Context) error { } } - c.logger.Debugf("Invalidations finished") + c.logger.Debug("Invalidations finished") c.invalidationIDs = nil return nil } diff --git a/internal/verify/verify.go b/internal/verify/verify.go index fa41079f02..60b2e726e0 100644 --- a/internal/verify/verify.go +++ b/internal/verify/verify.go @@ -216,12 +216,12 @@ type Certificate struct { func newCertificates(certTypeName string, cert []byte, log debugLog) (certs []Certificate, err error) { newlinesTrimmed := strings.TrimSpace(string(cert)) - log.Debugf("Decoding PEM certificate: %s", certTypeName) + log.Debug(fmt.Sprintf("Decoding PEM certificate: %s", certTypeName)) i := 1 var rest []byte var block *pem.Block for block, rest = pem.Decode([]byte(newlinesTrimmed)); block != nil; block, rest = pem.Decode(rest) { - log.Debugf("Parsing PEM block: %d", i) + log.Debug(fmt.Sprintf("Parsing PEM block: %d", i)) if block.Type != "CERTIFICATE" { return certs, fmt.Errorf("parse %s: expected PEM block type 'CERTIFICATE', got '%s'", certTypeName, block.Type) } @@ -708,5 +708,5 @@ func httpGet(ctx context.Context, url string) ([]byte, error) { } type debugLog interface { - Debugf(format string, args ...any) + Debug(msg string, args ...any) } diff --git a/joinservice/cmd/BUILD.bazel b/joinservice/cmd/BUILD.bazel index c55b08cc75..572e12ca8b 100644 --- a/joinservice/cmd/BUILD.bazel +++ b/joinservice/cmd/BUILD.bazel @@ -29,7 +29,6 @@ go_library( "//joinservice/internal/server", "//joinservice/internal/watcher", "@com_github_spf13_afero//:afero", - "@org_uber_go_zap//:zap", ], ) diff --git a/joinservice/cmd/main.go b/joinservice/cmd/main.go index 435577dd7d..8aaab96540 100644 --- a/joinservice/cmd/main.go +++ b/joinservice/cmd/main.go @@ -10,7 +10,10 @@ import ( "context" "errors" "flag" + "fmt" + "log/slog" "net" + "os" "path/filepath" "strconv" "time" @@ -36,7 +39,6 @@ import ( "github.com/edgelesssys/constellation/v2/joinservice/internal/server" "github.com/edgelesssys/constellation/v2/joinservice/internal/watcher" "github.com/spf13/afero" - "go.uber.org/zap" ) // vpcIPTimeout is the maximum amount of time to wait for retrieval of the VPC ip. @@ -49,35 +51,39 @@ func main() { verbosity := flag.Int("v", 0, logger.CmdLineVerbosityDescription) flag.Parse() - log := logger.New(logger.JSONLog, logger.VerbosityFromInt(*verbosity)) + log := logger.NewJSONLogger(logger.VerbosityFromInt(*verbosity)) log.With( - zap.String("version", constants.BinaryVersion().String()), - zap.String("cloudProvider", *provider), - zap.String("attestationVariant", *attestationVariant), - ).Infof("Constellation Node Join Service") + slog.String("version", constants.BinaryVersion().String()), + slog.String("cloudProvider", *provider), + slog.String("attestationVariant", *attestationVariant), + ).Info("Constellation Node Join Service") handler := file.NewHandler(afero.NewOsFs()) kubeClient, err := kubernetes.New() if err != nil { - log.With(zap.Error(err)).Fatalf("Failed to create Kubernetes client") + log.With(slog.Any("error", err)).Error("Failed to create Kubernetes client") + os.Exit(1) } attVariant, err := variant.FromString(*attestationVariant) if err != nil { - log.With(zap.Error(err)).Fatalf("Failed to parse attestation variant") + log.With(slog.Any("error", err)).Error("Failed to parse attestation variant") + os.Exit(1) } - certCacheClient := certcache.NewClient(log.Named("certcache"), kubeClient, attVariant) + certCacheClient := certcache.NewClient(log.WithGroup("certcache"), kubeClient, attVariant) cachedCerts, err := certCacheClient.CreateCertChainCache(context.Background()) if err != nil { - log.With(zap.Error(err)).Fatalf("Failed to create certificate chain cache") + log.With(slog.Any("error", err)).Error("Failed to create certificate chain cache") + os.Exit(1) } - validator, err := watcher.NewValidator(log.Named("validator"), attVariant, handler, cachedCerts) + validator, err := watcher.NewValidator(log.WithGroup("validator"), attVariant, handler, cachedCerts) if err != nil { flag.Usage() - log.With(zap.Error(err)).Fatalf("Failed to create validator") + log.With(slog.Any("error", err)).Error("Failed to create validator") + os.Exit(1) } creds := atlscredentials.New(nil, []atls.Validator{validator}) @@ -87,47 +93,53 @@ func main() { vpcIP, err := getVPCIP(vpcCtx, *provider) if err != nil { - log.With(zap.Error(err)).Fatalf("Failed to get IP in VPC") + log.With(slog.Any("error", err)).Error("Failed to get IP in VPC") + os.Exit(1) } apiServerEndpoint := net.JoinHostPort(vpcIP, strconv.Itoa(constants.KubernetesPort)) - kubeadm, err := kubeadm.New(apiServerEndpoint, log.Named("kubeadm")) + kubeadm, err := kubeadm.New(apiServerEndpoint, log.WithGroup("kubeadm")) if err != nil { - log.With(zap.Error(err)).Fatalf("Failed to create kubeadm") + log.With(slog.Any("error", err)).Error("Failed to create kubeadm") } - keyServiceClient := kms.New(log.Named("keyServiceClient"), *keyServiceEndpoint) + keyServiceClient := kms.New(log.WithGroup("keyServiceClient"), *keyServiceEndpoint) measurementSalt, err := handler.Read(filepath.Join(constants.ServiceBasePath, constants.MeasurementSaltFilename)) if err != nil { - log.With(zap.Error(err)).Fatalf("Failed to read measurement salt") + log.With(slog.Any("error", err)).Error("Failed to read measurement salt") + os.Exit(1) } server, err := server.New( measurementSalt, - kubernetesca.New(log.Named("certificateAuthority"), handler), + kubernetesca.New(log.WithGroup("certificateAuthority"), handler), kubeadm, keyServiceClient, kubeClient, - log.Named("server"), + log.WithGroup("server"), ) if err != nil { - log.With(zap.Error(err)).Fatalf("Failed to create server") + log.With(slog.Any("error", err)).Error("Failed to create server") + os.Exit(1) } - watcher, err := watcher.New(log.Named("fileWatcher"), validator) + watcher, err := watcher.New(log.WithGroup("fileWatcher"), validator) if err != nil { - log.With(zap.Error(err)).Fatalf("Failed to create watcher for measurements updates") + log.With(slog.Any("error", err)).Error("Failed to create watcher for measurements updates") + os.Exit(1) } defer watcher.Close() go func() { - log.Infof("starting file watcher for measurements file %s", filepath.Join(constants.ServiceBasePath, constants.AttestationConfigFilename)) + log.Info(fmt.Sprintf("starting file watcher for measurements file %s", filepath.Join(constants.ServiceBasePath, constants.AttestationConfigFilename))) if err := watcher.Watch(filepath.Join(constants.ServiceBasePath, constants.AttestationConfigFilename)); err != nil { - log.With(zap.Error(err)).Fatalf("Failed to watch measurements file") + log.With(slog.Any("error", err)).Error("Failed to watch measurements file") + os.Exit(1) } }() if err := server.Run(creds, strconv.Itoa(constants.JoinServicePort)); err != nil { - log.With(zap.Error(err)).Fatalf("Failed to run server") + log.With(slog.Any("error", err)).Error("Failed to run server") + os.Exit(1) } } diff --git a/joinservice/internal/certcache/BUILD.bazel b/joinservice/internal/certcache/BUILD.bazel index c5ba663dd3..3d07b94ca7 100644 --- a/joinservice/internal/certcache/BUILD.bazel +++ b/joinservice/internal/certcache/BUILD.bazel @@ -10,7 +10,6 @@ go_library( "//internal/attestation/variant", "//internal/constants", "//internal/crypto", - "//internal/logger", "//joinservice/internal/certcache/amdkds", "@com_github_google_go_sev_guest//abi", "@com_github_google_go_sev_guest//verify/trust", diff --git a/joinservice/internal/certcache/amdkds/amdkds_test.go b/joinservice/internal/certcache/amdkds/amdkds_test.go index e3b8787297..1ce3706a9a 100644 --- a/joinservice/internal/certcache/amdkds/amdkds_test.go +++ b/joinservice/internal/certcache/amdkds/amdkds_test.go @@ -7,6 +7,8 @@ SPDX-License-Identifier: AGPL-3.0-only package amdkds import ( + "fmt" + "log/slog" "testing" "github.com/edgelesssys/constellation/v2/internal/logger" @@ -63,12 +65,12 @@ func TestCertChain(t *testing.T) { } type stubGetter struct { - log *logger.Logger + log *slog.Logger ret []byte err error } func (s *stubGetter) Get(url string) ([]byte, error) { - s.log.Debugf("Request to %s", url) + s.log.Debug(fmt.Sprintf("Request to %s", url)) return s.ret, s.err } diff --git a/joinservice/internal/certcache/certcache.go b/joinservice/internal/certcache/certcache.go index fa14ca2054..cada6fd7c3 100644 --- a/joinservice/internal/certcache/certcache.go +++ b/joinservice/internal/certcache/certcache.go @@ -11,11 +11,11 @@ import ( "context" "crypto/x509" "fmt" + "log/slog" "github.com/edgelesssys/constellation/v2/internal/attestation/variant" "github.com/edgelesssys/constellation/v2/internal/constants" "github.com/edgelesssys/constellation/v2/internal/crypto" - "github.com/edgelesssys/constellation/v2/internal/logger" "github.com/edgelesssys/constellation/v2/joinservice/internal/certcache/amdkds" "github.com/google/go-sev-guest/abi" "github.com/google/go-sev-guest/verify/trust" @@ -24,14 +24,14 @@ import ( // Client is a client for interacting with the certificate chain cache. type Client struct { - log *logger.Logger + log *slog.Logger attVariant variant.Variant kdsClient kubeClient kubeClient } // NewClient creates a new CertCacheClient. -func NewClient(log *logger.Logger, kubeClient kubeClient, attVariant variant.Variant) *Client { +func NewClient(log *slog.Logger, kubeClient kubeClient, attVariant variant.Variant) *Client { kdsClient := amdkds.NewKDSClient(trust.DefaultHTTPSGetter()) return &Client{ @@ -53,11 +53,11 @@ func (c *Client) CreateCertChainCache(ctx context.Context) (*CachedCerts, error) case variant.AWSSEVSNP{}: reportSigner = abi.VlekReportSigner default: - c.log.Debugf("No certificate chain caching possible for attestation variant %s", c.attVariant) + c.log.Debug(fmt.Sprintf("No certificate chain caching possible for attestation variant %s", c.attVariant)) return nil, nil } - c.log.Debugf("Creating %s certificate chain cache", c.attVariant) + c.log.Debug(fmt.Sprintf("Creating %s certificate chain cache", c.attVariant)) ask, ark, err := c.createCertChainCache(ctx, reportSigner) if err != nil { return nil, fmt.Errorf("creating %s certificate chain cache: %w", c.attVariant, err) @@ -84,7 +84,7 @@ func (c *CachedCerts) SevSnpCerts() (ask, ark *x509.Certificate) { // nothing is done and the existing ASK and ARK are returned. If the configmap already exists but either ASK or ARK // are missing, the missing certificate is retrieved from the KDS and the configmap is updated with the missing value. func (c *Client) createCertChainCache(ctx context.Context, signingType abi.ReportSigner) (ask, ark *x509.Certificate, err error) { - c.log.Debugf("Creating certificate chain cache") + c.log.Debug("Creating certificate chain cache") var shouldCreateConfigMap bool // Check if ASK or ARK is already cached. @@ -93,13 +93,13 @@ func (c *Client) createCertChainCache(ctx context.Context, signingType abi.Repor // KDS and update the configmap with the missing value. cacheAsk, cacheArk, err := c.getCertChainCache(ctx) if k8serrors.IsNotFound(err) { - c.log.Debugf("Certificate chain cache does not exist") + c.log.Debug("Certificate chain cache does not exist") shouldCreateConfigMap = true } else if err != nil { return nil, nil, fmt.Errorf("getting certificate chain cache: %w", err) } if cacheAsk != nil && cacheArk != nil { - c.log.Debugf("ASK and ARK present in cache, returning cached values") + c.log.Debug("ASK and ARK present in cache, returning cached values") return cacheAsk, cacheArk, nil } if cacheAsk != nil { @@ -110,7 +110,7 @@ func (c *Client) createCertChainCache(ctx context.Context, signingType abi.Repor } // If only one certificate is cached, retrieve the other one from the KDS. - c.log.Debugf("Retrieving certificate chain from KDS") + c.log.Debug("Retrieving certificate chain from KDS") kdsAsk, kdsArk, err := c.kdsClient.CertChain(signingType) if err != nil { return nil, nil, fmt.Errorf("retrieving certificate chain from KDS: %w", err) @@ -133,7 +133,7 @@ func (c *Client) createCertChainCache(ctx context.Context, signingType abi.Repor if shouldCreateConfigMap { // ConfigMap does not exist, create it. - c.log.Debugf("Creating certificate chain cache configmap") + c.log.Debug("Creating certificate chain cache configmap") if err := c.kubeClient.CreateConfigMap(ctx, constants.SevSnpCertCacheConfigMapName, map[string]string{ constants.CertCacheAskKey: string(askPem), constants.CertCacheArkKey: string(arkPem), @@ -141,7 +141,7 @@ func (c *Client) createCertChainCache(ctx context.Context, signingType abi.Repor // If the ConfigMap already exists, another JoinService instance created the certificate cache while this operation was running. // Calling this function again should now retrieve the cached certificates. if k8serrors.IsAlreadyExists(err) { - c.log.Debugf("Certificate chain cache configmap already exists, retrieving cached certificates") + c.log.Debug("Certificate chain cache configmap already exists, retrieving cached certificates") return c.getCertChainCache(ctx) } return nil, nil, fmt.Errorf("creating certificate chain cache configmap: %w", err) @@ -168,13 +168,13 @@ func (c *Client) createCertChainCache(ctx context.Context, signingType abi.Repor // getCertChainCache returns the cached ASK and ARK certificate, if available. If either of the keys // is not present in the configmap, no error is returned. func (c *Client) getCertChainCache(ctx context.Context) (ask, ark *x509.Certificate, err error) { - c.log.Debugf("Retrieving certificate chain from cache") + c.log.Debug("Retrieving certificate chain from cache") askRaw, err := c.kubeClient.GetConfigMapData(ctx, constants.SevSnpCertCacheConfigMapName, constants.CertCacheAskKey) if err != nil { return nil, nil, fmt.Errorf("getting ASK from configmap: %w", err) } if askRaw != "" { - c.log.Debugf("ASK cache hit") + c.log.Debug("ASK cache hit") ask, err = crypto.PemToX509Cert([]byte(askRaw)) if err != nil { return nil, nil, fmt.Errorf("parsing ASK: %w", err) @@ -186,7 +186,7 @@ func (c *Client) getCertChainCache(ctx context.Context) (ask, ark *x509.Certific return nil, nil, fmt.Errorf("getting ARK from configmap: %w", err) } if arkRaw != "" { - c.log.Debugf("ARK cache hit") + c.log.Debug("ARK cache hit") ark, err = crypto.PemToX509Cert([]byte(arkRaw)) if err != nil { return nil, nil, fmt.Errorf("parsing ARK: %w", err) diff --git a/joinservice/internal/kms/BUILD.bazel b/joinservice/internal/kms/BUILD.bazel index 966f0ad11a..35f6a6bbbf 100644 --- a/joinservice/internal/kms/BUILD.bazel +++ b/joinservice/internal/kms/BUILD.bazel @@ -7,11 +7,9 @@ go_library( importpath = "github.com/edgelesssys/constellation/v2/joinservice/internal/kms", visibility = ["//joinservice:__subpackages__"], deps = [ - "//internal/logger", "//keyservice/keyserviceproto", "@org_golang_google_grpc//:go_default_library", "@org_golang_google_grpc//credentials/insecure", - "@org_uber_go_zap//:zap", ], ) diff --git a/joinservice/internal/kms/kms.go b/joinservice/internal/kms/kms.go index f8bc3f87b5..a3a28147dd 100644 --- a/joinservice/internal/kms/kms.go +++ b/joinservice/internal/kms/kms.go @@ -10,23 +10,22 @@ package kms import ( "context" "fmt" + "log/slog" - "github.com/edgelesssys/constellation/v2/internal/logger" "github.com/edgelesssys/constellation/v2/keyservice/keyserviceproto" - "go.uber.org/zap" "google.golang.org/grpc" "google.golang.org/grpc/credentials/insecure" ) // Client interacts with Constellation's keyservice. type Client struct { - log *logger.Logger + log *slog.Logger endpoint string grpc grpcClient } // New creates a new KMS. -func New(log *logger.Logger, endpoint string) Client { +func New(log *slog.Logger, endpoint string) Client { return Client{ log: log, endpoint: endpoint, @@ -36,17 +35,17 @@ func New(log *logger.Logger, endpoint string) Client { // GetDataKey returns a data encryption key for the given UUID. func (c Client) GetDataKey(ctx context.Context, keyID string, length int) ([]byte, error) { - log := c.log.With(zap.String("keyID", keyID), zap.String("endpoint", c.endpoint)) + log := c.log.With(slog.String("keyID", keyID), slog.String("endpoint", c.endpoint)) // the KMS does not use aTLS since traffic is only routed through the Constellation cluster // cluster internal connections are considered trustworthy - log.Infof("Connecting to KMS at %s", c.endpoint) + log.Info(fmt.Sprintf("Connecting to KMS at %s", c.endpoint)) conn, err := grpc.DialContext(ctx, c.endpoint, grpc.WithTransportCredentials(insecure.NewCredentials())) if err != nil { return nil, err } defer conn.Close() - log.Infof("Requesting data key") + log.Info("Requesting data key") res, err := c.grpc.GetDataKey( ctx, &keyserviceproto.GetDataKeyRequest{ @@ -59,7 +58,7 @@ func (c Client) GetDataKey(ctx context.Context, keyID string, length int) ([]byt return nil, fmt.Errorf("fetching data encryption key from Constellation KMS: %w", err) } - log.Infof("Data key request successful") + log.Info("Data key request successful") return res.DataKey, nil } diff --git a/joinservice/internal/kubeadm/BUILD.bazel b/joinservice/internal/kubeadm/BUILD.bazel index 10fb69340b..3bd980ac04 100644 --- a/joinservice/internal/kubeadm/BUILD.bazel +++ b/joinservice/internal/kubeadm/BUILD.bazel @@ -9,7 +9,6 @@ go_library( deps = [ "//internal/constants", "//internal/file", - "//internal/logger", "@com_github_spf13_afero//:afero", "@io_k8s_apimachinery//pkg/apis/meta/v1:meta", "@io_k8s_client_go//kubernetes", diff --git a/joinservice/internal/kubeadm/kubeadm.go b/joinservice/internal/kubeadm/kubeadm.go index 88b5019168..28ac087404 100644 --- a/joinservice/internal/kubeadm/kubeadm.go +++ b/joinservice/internal/kubeadm/kubeadm.go @@ -10,12 +10,12 @@ package kubeadm import ( "errors" "fmt" + "log/slog" "path/filepath" "time" "github.com/edgelesssys/constellation/v2/internal/constants" "github.com/edgelesssys/constellation/v2/internal/file" - "github.com/edgelesssys/constellation/v2/internal/logger" "github.com/spf13/afero" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" clientset "k8s.io/client-go/kubernetes" @@ -35,13 +35,13 @@ import ( // Kubeadm manages joining of new nodes. type Kubeadm struct { apiServerEndpoint string - log *logger.Logger + log *slog.Logger client clientset.Interface file file.Handler } // New creates a new Kubeadm instance. -func New(apiServerEndpoint string, log *logger.Logger) (*Kubeadm, error) { +func New(apiServerEndpoint string, log *slog.Logger) (*Kubeadm, error) { config, err := rest.InClusterConfig() if err != nil { return nil, fmt.Errorf("failed to get in-cluster config: %w", err) @@ -62,7 +62,7 @@ func New(apiServerEndpoint string, log *logger.Logger) (*Kubeadm, error) { // GetJoinToken creates a new bootstrap (join) token, which a node can use to join the cluster. func (k *Kubeadm) GetJoinToken(ttl time.Duration) (*kubeadm.BootstrapTokenDiscovery, error) { - k.log.Infof("Generating new random bootstrap token") + k.log.Info("Generating new random bootstrap token") rawToken, err := bootstraputil.GenerateBootstrapToken() if err != nil { return nil, fmt.Errorf("couldn't generate random token: %w", err) @@ -80,13 +80,13 @@ func (k *Kubeadm) GetJoinToken(ttl time.Duration) (*kubeadm.BootstrapTokenDiscov } // create the token in Kubernetes - k.log.Infof("Creating bootstrap token in Kubernetes") + k.log.Info("Creating bootstrap token in Kubernetes") if err := tokenphase.CreateNewTokens(k.client, []bootstraptoken.BootstrapToken{token}); err != nil { return nil, fmt.Errorf("creating bootstrap token: %w", err) } // parse Kubernetes CA certs - k.log.Infof("Preparing join token for new node") + k.log.Info("Preparing join token for new node") rawConfig, err := k.file.Read(constants.ControlPlaneAdminConfFilename) if err != nil { return nil, fmt.Errorf("loading kubeconfig file: %w", err) @@ -108,7 +108,7 @@ func (k *Kubeadm) GetJoinToken(ttl time.Duration) (*kubeadm.BootstrapTokenDiscov publicKeyPins = append(publicKeyPins, pubkeypin.Hash(caCert)) } - k.log.Infof("Join token creation successful") + k.log.Info("Join token creation successful") return &kubeadm.BootstrapTokenDiscovery{ Token: tokenStr.String(), APIServerEndpoint: k.apiServerEndpoint, @@ -118,7 +118,7 @@ func (k *Kubeadm) GetJoinToken(ttl time.Duration) (*kubeadm.BootstrapTokenDiscov // GetControlPlaneCertificatesAndKeys loads the Kubernetes CA certificates and keys. func (k *Kubeadm) GetControlPlaneCertificatesAndKeys() (map[string][]byte, error) { - k.log.Infof("Loading control plane certificates and keys") + k.log.Info("Loading control plane certificates and keys") controlPlaneFiles := make(map[string][]byte) filenames := []string{ diff --git a/joinservice/internal/kubernetesca/BUILD.bazel b/joinservice/internal/kubernetesca/BUILD.bazel index 5b6edac35e..9de8ec46dc 100644 --- a/joinservice/internal/kubernetesca/BUILD.bazel +++ b/joinservice/internal/kubernetesca/BUILD.bazel @@ -9,7 +9,6 @@ go_library( deps = [ "//internal/crypto", "//internal/file", - "//internal/logger", "@io_k8s_kubernetes//cmd/kubeadm/app/constants", ], ) diff --git a/joinservice/internal/kubernetesca/kubernetesca.go b/joinservice/internal/kubernetesca/kubernetesca.go index 5a12e3f3bd..f9173b2f60 100644 --- a/joinservice/internal/kubernetesca/kubernetesca.go +++ b/joinservice/internal/kubernetesca/kubernetesca.go @@ -13,12 +13,12 @@ import ( "encoding/pem" "errors" "fmt" + "log/slog" "strings" "time" "github.com/edgelesssys/constellation/v2/internal/crypto" "github.com/edgelesssys/constellation/v2/internal/file" - "github.com/edgelesssys/constellation/v2/internal/logger" kubeconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants" ) @@ -29,12 +29,12 @@ const ( // KubernetesCA handles signing of certificates using the Kubernetes root CA. type KubernetesCA struct { - log *logger.Logger + log *slog.Logger file file.Handler } // New creates a new KubernetesCA. -func New(log *logger.Logger, fileHandler file.Handler) *KubernetesCA { +func New(log *slog.Logger, fileHandler file.Handler) *KubernetesCA { return &KubernetesCA{ log: log, file: fileHandler, @@ -56,7 +56,7 @@ func (c KubernetesCA) GetNodeNameFromCSR(csr []byte) (string, error) { // GetCertificate creates a certificate for a node and signs it using the Kubernetes root CA. func (c KubernetesCA) GetCertificate(csr []byte) (cert []byte, err error) { - c.log.Debugf("Loading Kubernetes CA certificate") + c.log.Debug("Loading Kubernetes CA certificate") parentCertRaw, err := c.file.Read(caCertFilename) if err != nil { return nil, err @@ -66,7 +66,7 @@ func (c KubernetesCA) GetCertificate(csr []byte) (cert []byte, err error) { return nil, err } - c.log.Debugf("Loading Kubernetes CA private key") + c.log.Debug("Loading Kubernetes CA private key") parentKeyRaw, err := c.file.Read(caKeyFilename) if err != nil { return nil, err @@ -95,7 +95,7 @@ func (c KubernetesCA) GetCertificate(csr []byte) (cert []byte, err error) { return nil, err } - c.log.Infof("Creating kubelet certificate") + c.log.Info("Creating kubelet certificate") if len(certRequest.Subject.Organization) != 1 { return nil, errors.New("certificate request must have exactly one organization") } diff --git a/joinservice/internal/server/BUILD.bazel b/joinservice/internal/server/BUILD.bazel index f8ffb5dfb6..409c50c0e1 100644 --- a/joinservice/internal/server/BUILD.bazel +++ b/joinservice/internal/server/BUILD.bazel @@ -19,7 +19,6 @@ go_library( "@org_golang_google_grpc//codes", "@org_golang_google_grpc//credentials", "@org_golang_google_grpc//status", - "@org_uber_go_zap//:zap", ], ) diff --git a/joinservice/internal/server/server.go b/joinservice/internal/server/server.go index af80e96f47..0b8a98f106 100644 --- a/joinservice/internal/server/server.go +++ b/joinservice/internal/server/server.go @@ -10,6 +10,7 @@ package server import ( "context" "fmt" + "log/slog" "net" "time" @@ -20,7 +21,6 @@ import ( "github.com/edgelesssys/constellation/v2/internal/logger" "github.com/edgelesssys/constellation/v2/internal/versions/components" "github.com/edgelesssys/constellation/v2/joinservice/joinproto" - "go.uber.org/zap" "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" @@ -33,7 +33,7 @@ import ( type Server struct { measurementSalt []byte - log *logger.Logger + log *slog.Logger joinTokenGetter joinTokenGetter dataKeyGetter dataKeyGetter ca certificateAuthority @@ -44,7 +44,7 @@ type Server struct { // New initializes a new Server. func New( measurementSalt []byte, ca certificateAuthority, - joinTokenGetter joinTokenGetter, dataKeyGetter dataKeyGetter, kubeClient kubeClient, log *logger.Logger, + joinTokenGetter joinTokenGetter, dataKeyGetter dataKeyGetter, kubeClient kubeClient, log *slog.Logger, ) (*Server, error) { return &Server{ measurementSalt: measurementSalt, @@ -58,10 +58,10 @@ func New( // Run starts the gRPC server on the given port, using the provided tlsConfig. func (s *Server) Run(creds credentials.TransportCredentials, port string) error { - s.log.WithIncreasedLevel(zap.WarnLevel).Named("gRPC").ReplaceGRPCLogger() + logger.ReplaceGRPCLogger(slog.New(logger.NewLevelHandler(slog.LevelWarn, s.log.Handler())).WithGroup("gRPC")) grpcServer := grpc.NewServer( grpc.Creds(creds), - s.log.Named("gRPC").GetServerUnaryInterceptor(), + logger.GetServerUnaryInterceptor(s.log.WithGroup("gRPC")), ) joinproto.RegisterAPIServer(grpcServer, s) @@ -70,7 +70,7 @@ func (s *Server) Run(creds credentials.TransportCredentials, port string) error if err != nil { return fmt.Errorf("failed to listen: %s", err) } - s.log.Infof("Starting join service on %s", lis.Addr().String()) + s.log.Info(fmt.Sprintf("Starting join service on %s", lis.Addr().String())) return grpcServer.Serve(lis) } @@ -82,57 +82,57 @@ func (s *Server) Run(creds credentials.TransportCredentials, port string) error // In addition, control plane nodes receive: // - a decryption key for CA certificates uploaded to the Kubernetes cluster. func (s *Server) IssueJoinTicket(ctx context.Context, req *joinproto.IssueJoinTicketRequest) (*joinproto.IssueJoinTicketResponse, error) { - log := s.log.With(zap.String("peerAddress", grpclog.PeerAddrFromContext(ctx))) - log.Infof("IssueJoinTicket called") + log := s.log.With(slog.String("peerAddress", grpclog.PeerAddrFromContext(ctx))) + log.Info("IssueJoinTicket called") - log.Infof("Requesting measurement secret") + log.Info("Requesting measurement secret") measurementSecret, err := s.dataKeyGetter.GetDataKey(ctx, attestation.MeasurementSecretContext, crypto.DerivedKeyLengthDefault) if err != nil { - log.With(zap.Error(err)).Errorf("Failed to get measurement secret") + log.With(slog.Any("error", err)).Error("Failed to get measurement secret") return nil, status.Errorf(codes.Internal, "getting measurement secret: %s", err) } - log.Infof("Requesting disk encryption key") + log.Info("Requesting disk encryption key") stateDiskKey, err := s.dataKeyGetter.GetDataKey(ctx, req.DiskUuid, crypto.StateDiskKeyLength) if err != nil { - log.With(zap.Error(err)).Errorf("Failed to get key for stateful disk") + log.With(slog.Any("error", err)).Error("Failed to get key for stateful disk") return nil, status.Errorf(codes.Internal, "getting key for stateful disk: %s", err) } - log.Infof("Creating Kubernetes join token") + log.Info("Creating Kubernetes join token") kubeArgs, err := s.joinTokenGetter.GetJoinToken(constants.KubernetesJoinTokenTTL) if err != nil { - log.With(zap.Error(err)).Errorf("Failed to generate Kubernetes join arguments") + log.With(slog.Any("error", err)).Error("Failed to generate Kubernetes join arguments") return nil, status.Errorf(codes.Internal, "generating Kubernetes join arguments: %s", err) } - log.Infof("Querying NodeVersion custom resource for components ConfigMap name") + log.Info("Querying NodeVersion custom resource for components ConfigMap name") componentsConfigMapName, err := s.getK8sComponentsConfigMapName(ctx) if err != nil { - log.With(zap.Error(err)).Errorf("Failed getting components ConfigMap name") + log.With(slog.Any("error", err)).Error("Failed getting components ConfigMap name") return nil, status.Errorf(codes.Internal, "getting components ConfigMap name: %s", err) } - log.Infof("Querying %s ConfigMap for components", componentsConfigMapName) + log.Info(fmt.Sprintf("Querying %s ConfigMap for components", componentsConfigMapName)) components, err := s.kubeClient.GetComponents(ctx, componentsConfigMapName) if err != nil { - log.With(zap.Error(err)).Errorf("Failed getting components from ConfigMap") + log.With(slog.Any("error", err)).Error("Failed getting components from ConfigMap") return nil, status.Errorf(codes.Internal, "getting components: %s", err) } - log.Infof("Creating signed kubelet certificate") + log.Info("Creating signed kubelet certificate") kubeletCert, err := s.ca.GetCertificate(req.CertificateRequest) if err != nil { - log.With(zap.Error(err)).Errorf("Failed generating kubelet certificate") + log.With(slog.Any("error", err)).Error("Failed generating kubelet certificate") return nil, status.Errorf(codes.Internal, "Generating kubelet certificate: %s", err) } var controlPlaneFiles []*joinproto.ControlPlaneCertOrKey if req.IsControlPlane { - log.Infof("Loading control plane certificates and keys") + log.Info("Loading control plane certificates and keys") filesMap, err := s.joinTokenGetter.GetControlPlaneCertificatesAndKeys() if err != nil { - log.With(zap.Error(err)).Errorf("Failed to load control plane certificates and keys") + log.With(slog.Any("error", err)).Error("Failed to load control plane certificates and keys") return nil, status.Errorf(codes.Internal, "loading control-plane certificates and keys: %s", err) } @@ -146,16 +146,16 @@ func (s *Server) IssueJoinTicket(ctx context.Context, req *joinproto.IssueJoinTi nodeName, err := s.ca.GetNodeNameFromCSR(req.CertificateRequest) if err != nil { - log.With(zap.Error(err)).Errorf("Failed getting node name from CSR") + log.With(slog.Any("error", err)).Error("Failed getting node name from CSR") return nil, status.Errorf(codes.Internal, "getting node name from CSR: %s", err) } if err := s.kubeClient.AddNodeToJoiningNodes(ctx, nodeName, componentsConfigMapName, req.IsControlPlane); err != nil { - log.With(zap.Error(err)).Errorf("Failed adding node to joining nodes") + log.With(slog.Any("error", err)).Error("Failed adding node to joining nodes") return nil, status.Errorf(codes.Internal, "adding node to joining nodes: %s", err) } - log.Infof("IssueJoinTicket successful") + log.Info("IssueJoinTicket successful") return &joinproto.IssueJoinTicketResponse{ StateDiskKey: stateDiskKey, MeasurementSalt: s.measurementSalt, @@ -171,24 +171,24 @@ func (s *Server) IssueJoinTicket(ctx context.Context, req *joinproto.IssueJoinTi // IssueRejoinTicket issues a ticket for nodes to rejoin cluster. func (s *Server) IssueRejoinTicket(ctx context.Context, req *joinproto.IssueRejoinTicketRequest) (*joinproto.IssueRejoinTicketResponse, error) { - log := s.log.With(zap.String("peerAddress", grpclog.PeerAddrFromContext(ctx))) - log.Infof("IssueRejoinTicket called") + log := s.log.With(slog.String("peerAddress", grpclog.PeerAddrFromContext(ctx))) + log.Info("IssueRejoinTicket called") - log.Infof("Requesting measurement secret") + log.Info("Requesting measurement secret") measurementSecret, err := s.dataKeyGetter.GetDataKey(ctx, attestation.MeasurementSecretContext, crypto.DerivedKeyLengthDefault) if err != nil { - log.With(zap.Error(err)).Errorf("Unable to get measurement secret") + log.With(slog.Any("error", err)).Error("Unable to get measurement secret") return nil, status.Errorf(codes.Internal, "unable to get measurement secret: %s", err) } - log.Infof("Requesting disk encryption key") + log.Info("Requesting disk encryption key") stateDiskKey, err := s.dataKeyGetter.GetDataKey(ctx, req.DiskUuid, crypto.StateDiskKeyLength) if err != nil { - log.With(zap.Error(err)).Errorf("Unable to get key for stateful disk") + log.With(slog.Any("error", err)).Error("Unable to get key for stateful disk") return nil, status.Errorf(codes.Internal, "unable to get key for stateful disk: %s", err) } - log.Infof("IssueRejoinTicket successful") + log.Info("IssueRejoinTicket successful") return &joinproto.IssueRejoinTicketResponse{ StateDiskKey: stateDiskKey, MeasurementSecret: measurementSecret, diff --git a/joinservice/internal/watcher/BUILD.bazel b/joinservice/internal/watcher/BUILD.bazel index bd6366f51f..6ac22574b2 100644 --- a/joinservice/internal/watcher/BUILD.bazel +++ b/joinservice/internal/watcher/BUILD.bazel @@ -16,9 +16,7 @@ go_library( "//internal/config", "//internal/constants", "//internal/file", - "//internal/logger", "@com_github_fsnotify_fsnotify//:fsnotify", - "@org_uber_go_zap//:zap", ], ) diff --git a/joinservice/internal/watcher/validator.go b/joinservice/internal/watcher/validator.go index 65fc9dd76b..6bf43635ee 100644 --- a/joinservice/internal/watcher/validator.go +++ b/joinservice/internal/watcher/validator.go @@ -11,6 +11,7 @@ import ( "crypto/x509" "encoding/asn1" "fmt" + "log/slog" "path/filepath" "sync" @@ -20,12 +21,11 @@ import ( "github.com/edgelesssys/constellation/v2/internal/config" "github.com/edgelesssys/constellation/v2/internal/constants" "github.com/edgelesssys/constellation/v2/internal/file" - "github.com/edgelesssys/constellation/v2/internal/logger" ) // Updatable implements an updatable atls.Validator. type Updatable struct { - log *logger.Logger + log *slog.Logger mux sync.Mutex fileHandler file.Handler variant variant.Variant @@ -34,7 +34,7 @@ type Updatable struct { } // NewValidator initializes a new updatable validator and performs an initial update (aka. initialization). -func NewValidator(log *logger.Logger, variant variant.Variant, fileHandler file.Handler, cachedCerts cachedCerts) (*Updatable, error) { +func NewValidator(log *slog.Logger, variant variant.Variant, fileHandler file.Handler, cachedCerts cachedCerts) (*Updatable, error) { u := &Updatable{ log: log, fileHandler: fileHandler, @@ -69,7 +69,7 @@ func (u *Updatable) Update() error { u.mux.Lock() defer u.mux.Unlock() - u.log.Infof("Updating expected measurements") + u.log.Info("Updating expected measurements") data, err := u.fileHandler.Read(filepath.Join(constants.ServiceBasePath, constants.AttestationConfigFilename)) if err != nil { @@ -79,7 +79,7 @@ func (u *Updatable) Update() error { if err != nil { return fmt.Errorf("unmarshaling config: %w", err) } - u.log.Debugf("New expected measurements: %+v", cfg.GetMeasurements()) + u.log.Debug(fmt.Sprintf("New expected measurements: %+v", cfg.GetMeasurements())) cfgWithCerts, err := u.configWithCerts(cfg) if err != nil { diff --git a/joinservice/internal/watcher/watcher.go b/joinservice/internal/watcher/watcher.go index 07768f6f84..3b0034edcb 100644 --- a/joinservice/internal/watcher/watcher.go +++ b/joinservice/internal/watcher/watcher.go @@ -9,22 +9,21 @@ package watcher import ( "fmt" + "log/slog" - "github.com/edgelesssys/constellation/v2/internal/logger" "github.com/fsnotify/fsnotify" - "go.uber.org/zap" ) // FileWatcher watches for changes to the file and calls the waiter's Update method. type FileWatcher struct { - log *logger.Logger + log *slog.Logger updater updater watcher eventWatcher done chan struct{} } // New creates a new FileWatcher for the given validator. -func New(log *logger.Logger, updater updater) (*FileWatcher, error) { +func New(log *slog.Logger, updater updater) (*FileWatcher, error) { watcher, err := fsnotify.NewWatcher() if err != nil { return nil, err @@ -59,28 +58,28 @@ func (f *FileWatcher) Watch(file string) error { select { case event, ok := <-f.watcher.Events(): if !ok { - log.Infof("Watcher closed") + log.Info("Watcher closed") return nil } // file changes may be indicated by either a WRITE, CHMOD, CREATE or RENAME event if event.Op&(fsnotify.Write|fsnotify.Chmod|fsnotify.Create|fsnotify.Rename) != 0 { if err := f.updater.Update(); err != nil { - log.With(zap.Error(err)).Errorf("Update failed") + log.With(slog.Any("error", err)).Error("Update failed") } } // if a file gets removed, e.g. by a rename event, we need to re-add the file to the watcher if event.Has(fsnotify.Remove) { if err := f.watcher.Add(event.Name); err != nil { - log.With(zap.Error(err)).Errorf("Failed to re-add file to watcher") + log.With(slog.Any("error", err)).Error("Failed to re-add file to watcher") return fmt.Errorf("failed to re-add file %q to watcher: %w", event.Name, err) } } case err := <-f.watcher.Errors(): if err != nil { - log.With(zap.Error(err)).Errorf("Watching for measurements updates") + log.With(slog.Any("error", err)).Error("Watching for measurements updates") return fmt.Errorf("watching for measurements updates: %w", err) } } diff --git a/keyservice/cmd/BUILD.bazel b/keyservice/cmd/BUILD.bazel index fdca4a7249..4a64632f9d 100644 --- a/keyservice/cmd/BUILD.bazel +++ b/keyservice/cmd/BUILD.bazel @@ -16,7 +16,6 @@ go_library( "//internal/logger", "//keyservice/internal/server", "@com_github_spf13_afero//:afero", - "@org_uber_go_zap//:zap", ], ) diff --git a/keyservice/cmd/main.go b/keyservice/cmd/main.go index d58464da45..756509a93f 100644 --- a/keyservice/cmd/main.go +++ b/keyservice/cmd/main.go @@ -10,6 +10,9 @@ import ( "context" "errors" "flag" + "fmt" + "log/slog" + "os" "path/filepath" "strconv" "time" @@ -22,7 +25,6 @@ import ( "github.com/edgelesssys/constellation/v2/internal/logger" "github.com/edgelesssys/constellation/v2/keyservice/internal/server" "github.com/spf13/afero" - "go.uber.org/zap" ) func main() { @@ -32,26 +34,30 @@ func main() { verbosity := flag.Int("v", 0, logger.CmdLineVerbosityDescription) flag.Parse() - log := logger.New(logger.JSONLog, logger.VerbosityFromInt(*verbosity)) + log := logger.NewTextLogger(logger.VerbosityFromInt(*verbosity)) - log.With(zap.String("version", constants.BinaryVersion().String())). - Infof("Constellation Key Management Service") + log.With(slog.String("version", constants.BinaryVersion().String())). + Info("Constellation Key Management Service") // read master secret and salt file := file.NewHandler(afero.NewOsFs()) masterKey, err := file.Read(*masterSecretPath) if err != nil { - log.With(zap.Error(err)).Fatalf("Failed to read master secret") + log.With(slog.Any("error", err)).Error("Failed to read master secret") + os.Exit(1) } if len(masterKey) < crypto.MasterSecretLengthMin { - log.With(zap.Error(errors.New("invalid key length"))).Fatalf("Provided master secret is smaller than the required minimum of %d bytes", crypto.MasterSecretLengthMin) + log.With(slog.Any("error", errors.New("invalid key length"))).Error(fmt.Sprintf("Provided master secret is smaller than the required minimum of %d bytes", crypto.MasterSecretLengthMin)) + os.Exit(1) } salt, err := file.Read(*saltPath) if err != nil { - log.With(zap.Error(err)).Fatalf("Failed to read salt") + log.With(slog.Any("error", err)).Error("Failed to read salt") + os.Exit(1) } if len(salt) < crypto.RNGLengthDefault { - log.With(zap.Error(errors.New("invalid salt length"))).Fatalf("Expected salt to be %d bytes, but got %d", crypto.RNGLengthDefault, len(salt)) + log.With(slog.Any("error", errors.New("invalid salt length"))).Error(fmt.Sprintf("Expected salt to be %d bytes, but got %d", crypto.RNGLengthDefault, len(salt))) + os.Exit(1) } masterSecret := uri.MasterSecret{Key: masterKey, Salt: salt} @@ -60,11 +66,13 @@ func main() { defer cancel() conKMS, err := setup.KMS(ctx, uri.NoStoreURI, masterSecret.EncodeToURI()) if err != nil { - log.With(zap.Error(err)).Fatalf("Failed to setup KMS") + log.With(slog.Any("error", err)).Error("Failed to setup KMS") + os.Exit(1) } defer conKMS.Close() - if err := server.New(log.Named("keyService"), conKMS).Run(*port); err != nil { - log.With(zap.Error(err)).Fatalf("Failed to run key-service server") + if err := server.New(log.WithGroup("keyService"), conKMS).Run(*port); err != nil { + log.With(slog.Any("error", err)).Error("Failed to run key-service server") + os.Exit(1) } } diff --git a/keyservice/internal/server/BUILD.bazel b/keyservice/internal/server/BUILD.bazel index 9e20af1943..756c76f6d0 100644 --- a/keyservice/internal/server/BUILD.bazel +++ b/keyservice/internal/server/BUILD.bazel @@ -15,8 +15,6 @@ go_library( "@org_golang_google_grpc//:go_default_library", "@org_golang_google_grpc//codes", "@org_golang_google_grpc//status", - "@org_uber_go_zap//:zap", - "@org_uber_go_zap//zapcore", ], ) diff --git a/keyservice/internal/server/server.go b/keyservice/internal/server/server.go index e6d5dd23d0..b7517bceb3 100644 --- a/keyservice/internal/server/server.go +++ b/keyservice/internal/server/server.go @@ -10,6 +10,7 @@ package server import ( "context" "fmt" + "log/slog" "net" "github.com/edgelesssys/constellation/v2/internal/crypto" @@ -17,8 +18,6 @@ import ( "github.com/edgelesssys/constellation/v2/internal/kms/kms" "github.com/edgelesssys/constellation/v2/internal/logger" "github.com/edgelesssys/constellation/v2/keyservice/keyserviceproto" - "go.uber.org/zap" - "go.uber.org/zap/zapcore" "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" @@ -28,13 +27,13 @@ import ( // The server serves aTLS for cluster external requests // and plain gRPC for cluster internal requests. type Server struct { - log *logger.Logger + log *slog.Logger conKMS kms.CloudKMS keyserviceproto.UnimplementedAPIServer } // New creates a new Server. -func New(log *logger.Logger, conKMS kms.CloudKMS) *Server { +func New(log *slog.Logger, conKMS kms.CloudKMS) *Server { return &Server{ log: log, conKMS: conKMS, @@ -49,12 +48,12 @@ func (s *Server) Run(port string) error { return fmt.Errorf("failed to listen on port %s: %v", port, err) } - server := grpc.NewServer(s.log.Named("gRPC").GetServerUnaryInterceptor()) + server := grpc.NewServer(logger.GetServerUnaryInterceptor(s.log.WithGroup("gRPC"))) keyserviceproto.RegisterAPIServer(server, s) - s.log.Named("gRPC").WithIncreasedLevel(zapcore.WarnLevel).ReplaceGRPCLogger() + logger.ReplaceGRPCLogger(slog.New(logger.NewLevelHandler(slog.LevelWarn, s.log.Handler())).WithGroup("gRPC")) // start the server - s.log.Infof("Starting Constellation key management service on %s", listener.Addr().String()) + s.log.Info(fmt.Sprintf("Starting Constellation key management service on %s", listener.Addr().String())) return server.Serve(listener) } @@ -64,19 +63,19 @@ func (s *Server) GetDataKey(ctx context.Context, in *keyserviceproto.GetDataKeyR // Error on 0 key length if in.Length == 0 { - log.Errorf("Requested key length is zero") + log.Error("Requested key length is zero") return nil, status.Error(codes.InvalidArgument, "can't derive key with length zero") } // Error on empty DataKeyId if in.DataKeyId == "" { - log.Errorf("No data key ID specified") + log.Error("No data key ID specified") return nil, status.Error(codes.InvalidArgument, "no data key ID specified") } key, err := s.conKMS.GetDEK(ctx, crypto.DEKPrefix+in.DataKeyId, int(in.Length)) if err != nil { - log.With(zap.Error(err)).Errorf("Failed to get data key") + log.With(slog.Any("error", err)).Error("Failed to get data key") return nil, status.Errorf(codes.Internal, "%v", err) } return &keyserviceproto.GetDataKeyResponse{DataKey: key}, nil diff --git a/measurement-reader/cmd/BUILD.bazel b/measurement-reader/cmd/BUILD.bazel index db81ce3d90..0de7c3822b 100644 --- a/measurement-reader/cmd/BUILD.bazel +++ b/measurement-reader/cmd/BUILD.bazel @@ -13,8 +13,6 @@ go_library( "//measurement-reader/internal/sorted", "//measurement-reader/internal/tdx", "//measurement-reader/internal/tpm", - "@org_uber_go_zap//:zap", - "@org_uber_go_zap//zapcore", ], ) diff --git a/measurement-reader/cmd/main.go b/measurement-reader/cmd/main.go index 7fb96a3b1b..15ce68d2ea 100644 --- a/measurement-reader/cmd/main.go +++ b/measurement-reader/cmd/main.go @@ -8,6 +8,7 @@ package main import ( "fmt" + "log/slog" "os" "github.com/edgelesssys/constellation/v2/internal/attestation/variant" @@ -16,16 +17,15 @@ import ( "github.com/edgelesssys/constellation/v2/measurement-reader/internal/sorted" "github.com/edgelesssys/constellation/v2/measurement-reader/internal/tdx" "github.com/edgelesssys/constellation/v2/measurement-reader/internal/tpm" - "go.uber.org/zap" - "go.uber.org/zap/zapcore" ) func main() { - log := logger.New(logger.JSONLog, zapcore.InfoLevel) + log := logger.NewJSONLogger(slog.LevelInfo) variantString := os.Getenv(constants.AttestationVariant) attestationVariant, err := variant.FromString(variantString) if err != nil { - log.With(zap.Error(err)).Fatalf("Failed to parse attestation variant") + log.With(slog.Any("error", err)).Error("Failed to parse attestation variant") + os.Exit(1) } var m []sorted.Measurement @@ -33,15 +33,18 @@ func main() { case variant.AWSNitroTPM{}, variant.AWSSEVSNP{}, variant.AzureSEVSNP{}, variant.AzureTrustedLaunch{}, variant.GCPSEVES{}, variant.QEMUVTPM{}: m, err = tpm.Measurements() if err != nil { - log.With(zap.Error(err)).Fatalf("Failed to read TPM measurements") + log.With(slog.Any("error", err)).Error("Failed to read TPM measurements") + os.Exit(1) } case variant.QEMUTDX{}: m, err = tdx.Measurements() if err != nil { - log.With(zap.Error(err)).Fatalf("Failed to read Intel TDX measurements") + log.With(slog.Any("error", err)).Error("Failed to read Intel TDX measurements") + os.Exit(1) } default: - log.With(zap.String("attestationVariant", variantString)).Fatalf("Unsupported attestation variant") + log.With(slog.String("attestationVariant", variantString)).Error("Unsupported attestation variant") + os.Exit(1) } fmt.Println("Measurements:") diff --git a/s3proxy/cmd/BUILD.bazel b/s3proxy/cmd/BUILD.bazel index a601597b6d..d426ff5bb5 100644 --- a/s3proxy/cmd/BUILD.bazel +++ b/s3proxy/cmd/BUILD.bazel @@ -10,7 +10,6 @@ go_library( deps = [ "//internal/logger", "//s3proxy/internal/router", - "@org_uber_go_zap//:zap", ], ) diff --git a/s3proxy/cmd/main.go b/s3proxy/cmd/main.go index 12a72622d3..b0a017856c 100644 --- a/s3proxy/cmd/main.go +++ b/s3proxy/cmd/main.go @@ -13,12 +13,12 @@ import ( "crypto/tls" "flag" "fmt" + "log/slog" "net" "net/http" "github.com/edgelesssys/constellation/v2/internal/logger" "github.com/edgelesssys/constellation/v2/s3proxy/internal/router" - "go.uber.org/zap" ) const ( @@ -43,14 +43,14 @@ func main() { // logLevel can be made a public variable so logging level can be changed dynamically. // TODO (derpsteb): enable once we are on go 1.21. // logLevel := new(slog.LevelVar) - // handler := slog.NewTextHandler(os.Stdout, &slog.HandlerOptions{Level: logLevel}) + // handler := slog.NewTextHandler(os.Stderr, &slog.HandlerOptions{Level: logLevel}) // logger := slog.New(handler) // logLevel.Set(flags.logLevel) - logger := logger.New(logger.JSONLog, logger.VerbosityFromInt(flags.logLevel)) + logger := logger.NewJSONLogger(logger.VerbosityFromInt(flags.logLevel)) if flags.forwardMultipartReqs { - logger.Warnf("configured to forward multipart uploads, this may leak data to AWS") + logger.Warn("configured to forward multipart uploads, this may leak data to AWS") } if err := runServer(flags, logger); err != nil { @@ -58,8 +58,8 @@ func main() { } } -func runServer(flags cmdFlags, log *logger.Logger) error { - log.With(zap.String("ip", flags.ip), zap.Int("port", defaultPort), zap.String("region", flags.region)).Infof("listening") +func runServer(flags cmdFlags, log *slog.Logger) error { + log.With(slog.String("ip", flags.ip), slog.Int("port", defaultPort), slog.String("region", flags.region)).Info("listening") router, err := router.New(flags.region, flags.kmsEndpoint, flags.forwardMultipartReqs, log) if err != nil { @@ -90,7 +90,7 @@ func runServer(flags cmdFlags, log *logger.Logger) error { return server.ListenAndServeTLS("", "") } - log.Warnf("TLS is disabled") + log.Warn("TLS is disabled") return server.ListenAndServe() } diff --git a/s3proxy/internal/kms/BUILD.bazel b/s3proxy/internal/kms/BUILD.bazel index e4d4d25b82..bb7bc1cd41 100644 --- a/s3proxy/internal/kms/BUILD.bazel +++ b/s3proxy/internal/kms/BUILD.bazel @@ -7,7 +7,6 @@ go_library( importpath = "github.com/edgelesssys/constellation/v2/s3proxy/internal/kms", visibility = ["//s3proxy:__subpackages__"], deps = [ - "//internal/logger", "//keyservice/keyserviceproto", "@org_golang_google_grpc//:go_default_library", "@org_golang_google_grpc//credentials/insecure", diff --git a/s3proxy/internal/kms/kms.go b/s3proxy/internal/kms/kms.go index 24e53ed5c8..d6c100b20d 100644 --- a/s3proxy/internal/kms/kms.go +++ b/s3proxy/internal/kms/kms.go @@ -13,8 +13,8 @@ package kms import ( "context" "fmt" + "log/slog" - "github.com/edgelesssys/constellation/v2/internal/logger" "github.com/edgelesssys/constellation/v2/keyservice/keyserviceproto" "google.golang.org/grpc" "google.golang.org/grpc/credentials/insecure" @@ -22,13 +22,13 @@ import ( // Client interacts with Constellation's keyservice. type Client struct { - log *logger.Logger + log *slog.Logger endpoint string grpc grpcClient } // New creates a new KMS. -func New(log *logger.Logger, endpoint string) Client { +func New(log *slog.Logger, endpoint string) Client { return Client{ log: log, endpoint: endpoint, @@ -41,14 +41,14 @@ func (c Client) GetDataKey(ctx context.Context, keyID string, length int) ([]byt log := c.log.With("keyID", keyID, "endpoint", c.endpoint) // the KMS does not use aTLS since traffic is only routed through the Constellation cluster // cluster internal connections are considered trustworthy - log.Infof("Connecting to KMS") + log.Info("Connecting to KMS") conn, err := grpc.DialContext(ctx, c.endpoint, grpc.WithTransportCredentials(insecure.NewCredentials())) if err != nil { return nil, err } defer conn.Close() - log.Infof("Requesting data key") + log.Info("Requesting data key") res, err := c.grpc.GetDataKey( ctx, &keyserviceproto.GetDataKeyRequest{ @@ -61,7 +61,7 @@ func (c Client) GetDataKey(ctx context.Context, keyID string, length int) ([]byt return nil, fmt.Errorf("fetching data encryption key from Constellation KMS: %w", err) } - log.Infof("Data key request successful") + log.Info("Data key request successful") return res.DataKey, nil } diff --git a/s3proxy/internal/router/BUILD.bazel b/s3proxy/internal/router/BUILD.bazel index 8768bc4936..fe9cbad1ff 100644 --- a/s3proxy/internal/router/BUILD.bazel +++ b/s3proxy/internal/router/BUILD.bazel @@ -11,12 +11,10 @@ go_library( importpath = "github.com/edgelesssys/constellation/v2/s3proxy/internal/router", visibility = ["//s3proxy:__subpackages__"], deps = [ - "//internal/logger", "//s3proxy/internal/crypto", "//s3proxy/internal/kms", "//s3proxy/internal/s3", "@com_github_aws_aws_sdk_go_v2_service_s3//:s3", - "@org_uber_go_zap//:zap", ], ) diff --git a/s3proxy/internal/router/handler.go b/s3proxy/internal/router/handler.go index 75cc0fbe21..a85b97a1ae 100644 --- a/s3proxy/internal/router/handler.go +++ b/s3proxy/internal/router/handler.go @@ -10,18 +10,17 @@ import ( "encoding/xml" "fmt" "io" + "log/slog" "net/http" - "github.com/edgelesssys/constellation/v2/internal/logger" "github.com/edgelesssys/constellation/v2/s3proxy/internal/s3" - "go.uber.org/zap" ) -func handleGetObject(client *s3.Client, key string, bucket string, log *logger.Logger) http.HandlerFunc { +func handleGetObject(client *s3.Client, key string, bucket string, log *slog.Logger) http.HandlerFunc { return func(w http.ResponseWriter, req *http.Request) { - log.With(zap.String("path", req.URL.Path), zap.String("method", req.Method), zap.String("host", req.Host)).Debugf("intercepting") + log.With(slog.String("path", req.URL.Path), slog.String("method", req.Method), slog.String("host", req.Host)).Debug("intercepting") if req.Header.Get("Range") != "" { - log.Errorf("GetObject Range header unsupported") + log.Error("GetObject Range header unsupported") http.Error(w, "s3proxy currently does not support Range headers", http.StatusNotImplemented) return } @@ -40,12 +39,12 @@ func handleGetObject(client *s3.Client, key string, bucket string, log *logger.L } } -func handlePutObject(client *s3.Client, key string, bucket string, log *logger.Logger) http.HandlerFunc { +func handlePutObject(client *s3.Client, key string, bucket string, log *slog.Logger) http.HandlerFunc { return func(w http.ResponseWriter, req *http.Request) { - log.With(zap.String("path", req.URL.Path), zap.String("method", req.Method), zap.String("host", req.Host)).Debugf("intercepting") + log.With(slog.String("path", req.URL.Path), slog.String("method", req.Method), slog.String("host", req.Host)).Debug("intercepting") body, err := io.ReadAll(req.Body) if err != nil { - log.With(zap.Error(err)).Errorf("PutObject") + log.With(slog.Any("error", err)).Error("PutObject") http.Error(w, fmt.Sprintf("reading body: %s", err.Error()), http.StatusInternalServerError) return } @@ -60,12 +59,12 @@ func handlePutObject(client *s3.Client, key string, bucket string, log *logger.L // Thus we have to check incoming requets for matching content digests. // UNSIGNED-PAYLOAD can be used to disabled payload signing. In that case we don't check the content digest. if clientDigest != "" && clientDigest != "UNSIGNED-PAYLOAD" && clientDigest != serverDigest { - log.Debugf("PutObject", "error", "x-amz-content-sha256 mismatch") + log.Debug("PutObject", "error", "x-amz-content-sha256 mismatch") // The S3 API responds with an XML formatted error message. mismatchErr := NewContentSHA256MismatchError(clientDigest, serverDigest) marshalled, err := xml.Marshal(mismatchErr) if err != nil { - log.With(zap.Error(err)).Errorf("PutObject") + log.With(slog.Any("error", err)).Error("PutObject") http.Error(w, fmt.Sprintf("marshalling error: %s", err.Error()), http.StatusInternalServerError) return } @@ -79,14 +78,14 @@ func handlePutObject(client *s3.Client, key string, bucket string, log *logger.L raw := req.Header.Get("x-amz-object-lock-retain-until-date") retentionTime, err := parseRetentionTime(raw) if err != nil { - log.With(zap.String("data", raw), zap.Error(err)).Errorf("parsing lock retention time") + log.With(slog.String("data", raw), slog.Any("error", err)).Error("parsing lock retention time") http.Error(w, fmt.Sprintf("parsing x-amz-object-lock-retain-until-date: %s", err.Error()), http.StatusInternalServerError) return } err = validateContentMD5(req.Header.Get("content-md5"), body) if err != nil { - log.With(zap.Error(err)).Errorf("validating content md5") + log.With(slog.Any("error", err)).Error("validating content md5") http.Error(w, fmt.Sprintf("validating content md5: %s", err.Error()), http.StatusBadRequest) return } @@ -113,16 +112,16 @@ func handlePutObject(client *s3.Client, key string, bucket string, log *logger.L } } -func handleForwards(log *logger.Logger) http.HandlerFunc { +func handleForwards(log *slog.Logger) http.HandlerFunc { return func(w http.ResponseWriter, req *http.Request) { - log.With(zap.String("path", req.URL.Path), zap.String("method", req.Method), zap.String("host", req.Host)).Debugf("forwarding") + log.With(slog.String("path", req.URL.Path), slog.String("method", req.Method), slog.String("host", req.Host)).Debug("forwarding") newReq := repackage(req) httpClient := http.DefaultClient resp, err := httpClient.Do(&newReq) if err != nil { - log.With(zap.Error(err)).Errorf("do request") + log.With(slog.Any("error", err)).Error("do request") http.Error(w, fmt.Sprintf("do request: %s", err.Error()), http.StatusInternalServerError) return } @@ -133,7 +132,7 @@ func handleForwards(log *logger.Logger) http.HandlerFunc { } body, err := io.ReadAll(resp.Body) if err != nil { - log.With(zap.Error(err)).Errorf("ReadAll") + log.With(slog.Any("error", err)).Error("ReadAll") http.Error(w, fmt.Sprintf("reading body: %s", err.Error()), http.StatusInternalServerError) return } @@ -143,7 +142,7 @@ func handleForwards(log *logger.Logger) http.HandlerFunc { } if _, err := w.Write(body); err != nil { - log.With(zap.Error(err)).Errorf("Write") + log.With(slog.Any("error", err)).Error("Write") http.Error(w, fmt.Sprintf("writing body: %s", err.Error()), http.StatusInternalServerError) return } @@ -151,41 +150,41 @@ func handleForwards(log *logger.Logger) http.HandlerFunc { } // handleCreateMultipartUpload logs the request and blocks with an error message. -func handleCreateMultipartUpload(log *logger.Logger) http.HandlerFunc { +func handleCreateMultipartUpload(log *slog.Logger) http.HandlerFunc { return func(w http.ResponseWriter, req *http.Request) { - log.With(zap.String("path", req.URL.Path), zap.String("method", req.Method), zap.String("host", req.Host)).Debugf("intercepting CreateMultipartUpload") + log.With(slog.String("path", req.URL.Path), slog.String("method", req.Method), slog.String("host", req.Host)).Debug("intercepting CreateMultipartUpload") - log.Errorf("Blocking CreateMultipartUpload request") + log.Error("Blocking CreateMultipartUpload request") http.Error(w, "s3proxy is configured to block CreateMultipartUpload requests", http.StatusNotImplemented) } } // handleUploadPart logs the request and blocks with an error message. -func handleUploadPart(log *logger.Logger) http.HandlerFunc { +func handleUploadPart(log *slog.Logger) http.HandlerFunc { return func(w http.ResponseWriter, req *http.Request) { - log.With(zap.String("path", req.URL.Path), zap.String("method", req.Method), zap.String("host", req.Host)).Debugf("intercepting UploadPart") + log.With(slog.String("path", req.URL.Path), slog.String("method", req.Method), slog.String("host", req.Host)).Debug("intercepting UploadPart") - log.Errorf("Blocking UploadPart request") + log.Error("Blocking UploadPart request") http.Error(w, "s3proxy is configured to block UploadPart requests", http.StatusNotImplemented) } } // handleCompleteMultipartUpload logs the request and blocks with an error message. -func handleCompleteMultipartUpload(log *logger.Logger) http.HandlerFunc { +func handleCompleteMultipartUpload(log *slog.Logger) http.HandlerFunc { return func(w http.ResponseWriter, req *http.Request) { - log.With(zap.String("path", req.URL.Path), zap.String("method", req.Method), zap.String("host", req.Host)).Debugf("intercepting CompleteMultipartUpload") + log.With(slog.String("path", req.URL.Path), slog.String("method", req.Method), slog.String("host", req.Host)).Debug("intercepting CompleteMultipartUpload") - log.Errorf("Blocking CompleteMultipartUpload request") + log.Error("Blocking CompleteMultipartUpload request") http.Error(w, "s3proxy is configured to block CompleteMultipartUpload requests", http.StatusNotImplemented) } } // handleAbortMultipartUpload logs the request and blocks with an error message. -func handleAbortMultipartUpload(log *logger.Logger) http.HandlerFunc { +func handleAbortMultipartUpload(log *slog.Logger) http.HandlerFunc { return func(w http.ResponseWriter, req *http.Request) { - log.With(zap.String("path", req.URL.Path), zap.String("method", req.Method), zap.String("host", req.Host)).Debugf("intercepting AbortMultipartUpload") + log.With(slog.String("path", req.URL.Path), slog.String("method", req.Method), slog.String("host", req.Host)).Debug("intercepting AbortMultipartUpload") - log.Errorf("Blocking AbortMultipartUpload request") + log.Error("Blocking AbortMultipartUpload request") http.Error(w, "s3proxy is configured to block AbortMultipartUpload requests", http.StatusNotImplemented) } } diff --git a/s3proxy/internal/router/object.go b/s3proxy/internal/router/object.go index 0f58a2900d..d7f6779f15 100644 --- a/s3proxy/internal/router/object.go +++ b/s3proxy/internal/router/object.go @@ -10,6 +10,7 @@ import ( "context" "encoding/hex" "io" + "log/slog" "net/http" "net/url" "regexp" @@ -18,9 +19,7 @@ import ( "time" "github.com/aws/aws-sdk-go-v2/service/s3" - "github.com/edgelesssys/constellation/v2/internal/logger" "github.com/edgelesssys/constellation/v2/s3proxy/internal/crypto" - "go.uber.org/zap" ) const ( @@ -46,12 +45,12 @@ type object struct { sseCustomerAlgorithm string sseCustomerKey string sseCustomerKeyMD5 string - log *logger.Logger + log *slog.Logger } // get is a http.HandlerFunc that implements the GET method for objects. func (o object) get(w http.ResponseWriter, r *http.Request) { - o.log.With(zap.String("key", o.key), zap.String("host", o.bucket)).Debugf("getObject") + o.log.With(slog.String("key", o.key), slog.String("host", o.bucket)).Debug("getObject") versionID, ok := o.query["versionId"] if !ok { @@ -61,7 +60,7 @@ func (o object) get(w http.ResponseWriter, r *http.Request) { output, err := o.client.GetObject(r.Context(), o.bucket, o.key, versionID[0], o.sseCustomerAlgorithm, o.sseCustomerKey, o.sseCustomerKeyMD5) if err != nil { // log with Info as it might be expected behavior (e.g. object not found). - o.log.With(zap.Error(err)).Errorf("GetObject sending request to S3") + o.log.With(slog.Any("error", err)).Error("GetObject sending request to S3") // We want to forward error codes from the s3 API to clients as much as possible. code := parseErrorCode(err) @@ -107,7 +106,7 @@ func (o object) get(w http.ResponseWriter, r *http.Request) { body, err := io.ReadAll(output.Body) if err != nil { - o.log.With(zap.Error(err)).Errorf("GetObject reading S3 response") + o.log.With(slog.Any("error", err)).Error("GetObject reading S3 response") http.Error(w, err.Error(), http.StatusInternalServerError) return } @@ -117,14 +116,14 @@ func (o object) get(w http.ResponseWriter, r *http.Request) { if ok { encryptedDEK, err := hex.DecodeString(rawEncryptedDEK) if err != nil { - o.log.Errorf("GetObject decoding DEK", "error", err) + o.log.Error("GetObject decoding DEK", "error", err) http.Error(w, err.Error(), http.StatusInternalServerError) return } plaintext, err = crypto.Decrypt(body, encryptedDEK, o.kek) if err != nil { - o.log.With(zap.Error(err)).Errorf("GetObject decrypting response") + o.log.With(slog.Any("error", err)).Error("GetObject decrypting response") http.Error(w, err.Error(), http.StatusInternalServerError) return } @@ -132,7 +131,7 @@ func (o object) get(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusOK) if _, err := w.Write(plaintext); err != nil { - o.log.With(zap.Error(err)).Errorf("GetObject sending response") + o.log.With(slog.Any("error", err)).Error("GetObject sending response") } } @@ -140,7 +139,7 @@ func (o object) get(w http.ResponseWriter, r *http.Request) { func (o object) put(w http.ResponseWriter, r *http.Request) { ciphertext, encryptedDEK, err := crypto.Encrypt(o.data, o.kek) if err != nil { - o.log.With(zap.Error(err)).Errorf("PutObject") + o.log.With(slog.Any("error", err)).Error("PutObject") http.Error(w, err.Error(), http.StatusInternalServerError) return } @@ -148,7 +147,7 @@ func (o object) put(w http.ResponseWriter, r *http.Request) { output, err := o.client.PutObject(r.Context(), o.bucket, o.key, o.tags, o.contentType, o.objectLockLegalHoldStatus, o.objectLockMode, o.sseCustomerAlgorithm, o.sseCustomerKey, o.sseCustomerKeyMD5, o.objectLockRetainUntilDate, o.metadata, ciphertext) if err != nil { - o.log.With(zap.Error(err)).Errorf("PutObject sending request to S3") + o.log.With(slog.Any("error", err)).Error("PutObject sending request to S3") // We want to forward error codes from the s3 API to clients whenever possible. code := parseErrorCode(err) @@ -199,7 +198,7 @@ func (o object) put(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusOK) if _, err := w.Write(nil); err != nil { - o.log.With(zap.Error(err)).Errorf("PutObject sending response") + o.log.With(slog.Any("error", err)).Error("PutObject sending response") } } diff --git a/s3proxy/internal/router/router.go b/s3proxy/internal/router/router.go index 1680e44de6..0efa4302f7 100644 --- a/s3proxy/internal/router/router.go +++ b/s3proxy/internal/router/router.go @@ -27,13 +27,13 @@ import ( "encoding/base64" "encoding/xml" "fmt" + "log/slog" "net/http" "net/url" "regexp" "strings" "time" - "github.com/edgelesssys/constellation/v2/internal/logger" "github.com/edgelesssys/constellation/v2/s3proxy/internal/kms" "github.com/edgelesssys/constellation/v2/s3proxy/internal/s3" ) @@ -57,11 +57,11 @@ type Router struct { // s3proxy does not implement those yet. // Setting forwardMultipartReqs to true will forward those requests to the S3 API, otherwise we block them (secure defaults). forwardMultipartReqs bool - log *logger.Logger + log *slog.Logger } // New creates a new Router. -func New(region, endpoint string, forwardMultipartReqs bool, log *logger.Logger) (Router, error) { +func New(region, endpoint string, forwardMultipartReqs bool, log *slog.Logger) (Router, error) { kms := kms.New(log, endpoint) // Get the key encryption key that encrypts all DEKs. diff --git a/terraform-provider-constellation/internal/provider/cluster_resource.go b/terraform-provider-constellation/internal/provider/cluster_resource.go index 785914808d..35c682a44d 100644 --- a/terraform-provider-constellation/internal/provider/cluster_resource.go +++ b/terraform-provider-constellation/internal/provider/cluster_resource.go @@ -1240,15 +1240,21 @@ type tfContextLogger struct { ctx context.Context // bind context to struct to satisfy interface } -func (l *tfContextLogger) Debugf(format string, args ...any) { +// Debug takes a format string and arguments as an input and logs +// them using tflog.Debug. +func (l *tfContextLogger) Debug(format string, args ...any) { tflog.Debug(l.ctx, fmt.Sprintf(format, args...)) } -func (l *tfContextLogger) Infof(format string, args ...any) { +// Info takes a format string and arguments as an input and logs +// them using tflog.Info. +func (l *tfContextLogger) Info(format string, args ...any) { tflog.Info(l.ctx, fmt.Sprintf(format, args...)) } -func (l *tfContextLogger) Warnf(format string, args ...any) { +// Warn takes a format string and arguments as an input and logs +// them using tflog.Warn. +func (l *tfContextLogger) Warn(format string, args ...any) { tflog.Warn(l.ctx, fmt.Sprintf(format, args...)) } diff --git a/upgrade-agent/cmd/BUILD.bazel b/upgrade-agent/cmd/BUILD.bazel index ea7b3e2d3b..3e8224ac7a 100644 --- a/upgrade-agent/cmd/BUILD.bazel +++ b/upgrade-agent/cmd/BUILD.bazel @@ -12,7 +12,6 @@ go_library( "//internal/logger", "//upgrade-agent/internal/server", "@com_github_spf13_afero//:afero", - "@org_uber_go_zap//:zap", ], ) diff --git a/upgrade-agent/cmd/main.go b/upgrade-agent/cmd/main.go index 55df367f6e..2eaabb91dc 100644 --- a/upgrade-agent/cmd/main.go +++ b/upgrade-agent/cmd/main.go @@ -8,13 +8,14 @@ package main import ( "flag" + "log/slog" + "os" "github.com/edgelesssys/constellation/v2/internal/constants" "github.com/edgelesssys/constellation/v2/internal/file" "github.com/edgelesssys/constellation/v2/internal/logger" "github.com/edgelesssys/constellation/v2/upgrade-agent/internal/server" "github.com/spf13/afero" - "go.uber.org/zap" ) const ( @@ -26,23 +27,24 @@ func main() { verbosity := flag.Int("v", 0, logger.CmdLineVerbosityDescription) flag.Parse() - log := logger.New(logger.JSONLog, logger.VerbosityFromInt(*verbosity)).Named("bootstrapper") - defer log.Sync() + log := logger.NewJSONLogger(logger.VerbosityFromInt(*verbosity)).WithGroup("bootstrapper") if *gRPCDebug { - log.Named("gRPC").ReplaceGRPCLogger() + logger.ReplaceGRPCLogger(log.WithGroup("gRPC")) } else { - log.Named("gRPC").WithIncreasedLevel(zap.WarnLevel).ReplaceGRPCLogger() + logger.ReplaceGRPCLogger(slog.New(logger.NewLevelHandler(slog.LevelWarn, log.Handler())).WithGroup("gRPC")) } handler := file.NewHandler(afero.NewOsFs()) server, err := server.New(log, handler) if err != nil { - log.With(zap.Error(err)).Fatalf("Failed to create update server") + log.With(slog.Any("error", err)).Error("Failed to create update server") + os.Exit(1) } err = server.Run(protocol, constants.UpgradeAgentSocketPath) if err != nil { - log.With(zap.Error(err)).Fatalf("Failed to start update server") + log.With(slog.Any("error", err)).Error("Failed to start update server") + os.Exit(1) } } diff --git a/upgrade-agent/internal/server/server.go b/upgrade-agent/internal/server/server.go index 61ddcb6fa9..ea18e3d575 100644 --- a/upgrade-agent/internal/server/server.go +++ b/upgrade-agent/internal/server/server.go @@ -17,6 +17,7 @@ import ( "errors" "fmt" "io/fs" + "log/slog" "net" "os" "os/exec" @@ -39,13 +40,13 @@ var errInvalidKubernetesVersion = errors.New("invalid kubernetes version") type Server struct { file file.Handler grpcServer serveStopper - log *logger.Logger + log *slog.Logger upgradeproto.UnimplementedUpdateServer } // New creates a new upgrade-agent server. -func New(log *logger.Logger, fileHandler file.Handler) (*Server, error) { - log = log.Named("upgradeServer") +func New(log *slog.Logger, fileHandler file.Handler) (*Server, error) { + log = log.WithGroup("upgradeServer") server := &Server{ log: log, @@ -53,7 +54,7 @@ func New(log *logger.Logger, fileHandler file.Handler) (*Server, error) { } grpcServer := grpc.NewServer( - log.Named("gRPC").GetServerUnaryInterceptor(), + logger.GetServerUnaryInterceptor(log.WithGroup("gRPC")), ) upgradeproto.RegisterUpdateServer(grpcServer, server) @@ -85,22 +86,22 @@ func (s *Server) Run(protocol string, sockAddr string) error { return fmt.Errorf("failed to listen: %s", err) } - s.log.Infof("Starting") + s.log.Info("Starting") return grpcServer.Serve(lis) } // Stop stops the upgrade-agent server gracefully. func (s *Server) Stop() { - s.log.Infof("Stopping") + s.log.Info("Stopping") s.grpcServer.GracefulStop() - s.log.Infof("Stopped") + s.log.Info("Stopped") } // ExecuteUpdate installs & verifies the provided kubeadm, then executes `kubeadm upgrade plan` & `kubeadm upgrade apply {wanted_Kubernetes_Version}` to upgrade to the specified version. func (s *Server) ExecuteUpdate(ctx context.Context, updateRequest *upgradeproto.ExecuteUpdateRequest) (*upgradeproto.ExecuteUpdateResponse, error) { - s.log.Infof("Upgrade to Kubernetes version started: %s", updateRequest.WantedKubernetesVersion) + s.log.Info(fmt.Sprintf("Upgrade to Kubernetes version started: %s", updateRequest.WantedKubernetesVersion)) installer := installer.NewOSInstaller() err := prepareUpdate(ctx, installer, updateRequest) @@ -120,7 +121,7 @@ func (s *Server) ExecuteUpdate(ctx context.Context, updateRequest *upgradeproto. return nil, status.Errorf(codes.Internal, "unable to execute kubeadm upgrade apply: %s: %s", err, string(out)) } - s.log.Infof("Upgrade to Kubernetes version succeeded: %s", updateRequest.WantedKubernetesVersion) + s.log.Info(fmt.Sprintf("Upgrade to Kubernetes version succeeded: %s", updateRequest.WantedKubernetesVersion)) return &upgradeproto.ExecuteUpdateResponse{}, nil } diff --git a/verify/cmd/BUILD.bazel b/verify/cmd/BUILD.bazel index fe9d856c21..de4c0a65b8 100644 --- a/verify/cmd/BUILD.bazel +++ b/verify/cmd/BUILD.bazel @@ -13,7 +13,6 @@ go_library( "//internal/constants", "//internal/logger", "//verify/server", - "@org_uber_go_zap//:zap", ], ) diff --git a/verify/cmd/main.go b/verify/cmd/main.go index cd131eabd6..58310ff81c 100644 --- a/verify/cmd/main.go +++ b/verify/cmd/main.go @@ -8,7 +8,9 @@ package main import ( "flag" + "log/slog" "net" + "os" "strconv" "github.com/edgelesssys/constellation/v2/internal/attestation/choose" @@ -16,7 +18,6 @@ import ( "github.com/edgelesssys/constellation/v2/internal/constants" "github.com/edgelesssys/constellation/v2/internal/logger" "github.com/edgelesssys/constellation/v2/verify/server" - "go.uber.org/zap" ) func main() { @@ -24,33 +25,38 @@ func main() { verbosity := flag.Int("v", 0, logger.CmdLineVerbosityDescription) flag.Parse() - log := logger.New(logger.JSONLog, logger.VerbosityFromInt(*verbosity)) + log := logger.NewJSONLogger(logger.VerbosityFromInt(*verbosity)) - log.With(zap.String("version", constants.BinaryVersion().String()), zap.String("attestationVariant", *attestationVariant)). - Infof("Constellation Verification Service") + log.With(slog.String("version", constants.BinaryVersion().String()), slog.String("attestationVariant", *attestationVariant)). + Info("Constellation Verification Service") variant, err := variant.FromString(*attestationVariant) if err != nil { - log.With(zap.Error(err)).Fatalf("Failed to parse attestation variant") + log.With(slog.Any("error", err)).Error("Failed to parse attestation variant") + os.Exit(1) } - issuer, err := choose.Issuer(variant, log.Named("issuer")) + issuer, err := choose.Issuer(variant, log.WithGroup("issuer")) if err != nil { - log.With(zap.Error(err)).Fatalf("Failed to create issuer") + log.With(slog.Any("error", err)).Error("Failed to create issuer") + os.Exit(1) } - server := server.New(log.Named("server"), issuer) + server := server.New(log.WithGroup("server"), issuer) httpListener, err := net.Listen("tcp", net.JoinHostPort("", strconv.Itoa(constants.VerifyServicePortHTTP))) if err != nil { - log.With(zap.Error(err), zap.Int("port", constants.VerifyServicePortHTTP)). - Fatalf("Failed to listen") + log.With(slog.Any("error", err), slog.Int("port", constants.VerifyServicePortHTTP)). + Error("Failed to listen") + os.Exit(1) } grpcListener, err := net.Listen("tcp", net.JoinHostPort("", strconv.Itoa(constants.VerifyServicePortGRPC))) if err != nil { - log.With(zap.Error(err), zap.Int("port", constants.VerifyServicePortGRPC)). - Fatalf("Failed to listen") + log.With(slog.Any("error", err), slog.Int("port", constants.VerifyServicePortGRPC)). + Error("Failed to listen") + os.Exit(1) } if err := server.Run(httpListener, grpcListener); err != nil { - log.With(zap.Error(err)).Fatalf("Failed to run server") + log.With(slog.Any("error", err)).Error("Failed to run server") + os.Exit(1) } } diff --git a/verify/server/BUILD.bazel b/verify/server/BUILD.bazel index 37f614631e..3cd658b83e 100644 --- a/verify/server/BUILD.bazel +++ b/verify/server/BUILD.bazel @@ -15,8 +15,6 @@ go_library( "@org_golang_google_grpc//keepalive", "@org_golang_google_grpc//peer", "@org_golang_google_grpc//status", - "@org_uber_go_zap//:zap", - "@org_uber_go_zap//zapcore", ], ) diff --git a/verify/server/server.go b/verify/server/server.go index 39790b9473..c8d7b2c828 100644 --- a/verify/server/server.go +++ b/verify/server/server.go @@ -12,6 +12,7 @@ import ( "encoding/base64" "encoding/json" "fmt" + "log/slog" "net" "net/http" "sync" @@ -20,8 +21,6 @@ import ( "github.com/edgelesssys/constellation/v2/internal/constants" "github.com/edgelesssys/constellation/v2/internal/logger" "github.com/edgelesssys/constellation/v2/verify/verifyproto" - "go.uber.org/zap" - "go.uber.org/zap/zapcore" "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/keepalive" @@ -37,13 +36,13 @@ type attestation struct { // The server exposes both HTTP and gRPC endpoints // to retrieve attestation statements. type Server struct { - log *logger.Logger + log *slog.Logger issuer AttestationIssuer verifyproto.UnimplementedAPIServer } // New initializes a new verification server. -func New(log *logger.Logger, issuer AttestationIssuer) *Server { +func New(log *slog.Logger, issuer AttestationIssuer) *Server { return &Server{ log: log, issuer: issuer, @@ -57,9 +56,9 @@ func (s *Server) Run(httpListener, grpcListener net.Listener) error { var wg sync.WaitGroup var once sync.Once - s.log.WithIncreasedLevel(zapcore.WarnLevel).Named("grpc").ReplaceGRPCLogger() + logger.ReplaceGRPCLogger(slog.New(logger.NewLevelHandler(slog.LevelWarn, s.log.Handler()).WithGroup("grpc"))) grpcServer := grpc.NewServer( - s.log.Named("gRPC").GetServerUnaryInterceptor(), + logger.GetServerUnaryInterceptor(s.log.WithGroup("gRPC")), grpc.KeepaliveParams(keepalive.ServerParameters{Time: 15 * time.Second}), ) verifyproto.RegisterAPIServer(grpcServer, s) @@ -73,7 +72,7 @@ func (s *Server) Run(httpListener, grpcListener net.Listener) error { defer wg.Done() defer grpcServer.GracefulStop() - s.log.Infof("Starting HTTP server on %s", httpListener.Addr().String()) + s.log.Info(fmt.Sprintf("Starting HTTP server on %s", httpListener.Addr().String())) httpErr := httpServer.Serve(httpListener) if httpErr != nil && httpErr != http.ErrServerClosed { once.Do(func() { err = httpErr }) @@ -85,7 +84,7 @@ func (s *Server) Run(httpListener, grpcListener net.Listener) error { defer wg.Done() defer func() { _ = httpServer.Shutdown(context.Background()) }() - s.log.Infof("Starting gRPC server on %s", grpcListener.Addr().String()) + s.log.Info(fmt.Sprintf("Starting gRPC server on %s", grpcListener.Addr().String())) grpcErr := grpcServer.Serve(grpcListener) if grpcErr != nil { once.Do(func() { err = grpcErr }) @@ -103,49 +102,49 @@ func (s *Server) GetAttestation(ctx context.Context, req *verifyproto.GetAttesta peerAddr = peer.Addr.String() } - log := s.log.With(zap.String("peerAddress", peerAddr)).Named("gRPC") - s.log.Infof("Received attestation request") + log := s.log.With(slog.String("peerAddress", peerAddr)).WithGroup("gRPC") + s.log.Info("Received attestation request") if len(req.Nonce) == 0 { - log.Errorf("Received attestation request with empty nonce") + log.Error("Received attestation request with empty nonce") return nil, status.Error(codes.InvalidArgument, "nonce is required to issue attestation") } - log.Infof("Creating attestation") + log.Info("Creating attestation") statement, err := s.issuer.Issue(ctx, []byte(constants.ConstellationVerifyServiceUserData), req.Nonce) if err != nil { return nil, status.Errorf(codes.Internal, "issuing attestation statement: %v", err) } - log.Infof("Attestation request successful") + log.Info("Attestation request successful") return &verifyproto.GetAttestationResponse{Attestation: statement}, nil } // getAttestationHTTP implements the HTTP endpoint for retrieving attestation statements. func (s *Server) getAttestationHTTP(w http.ResponseWriter, r *http.Request) { - log := s.log.With(zap.String("peerAddress", r.RemoteAddr)).Named("http") + log := s.log.With(slog.String("peerAddress", r.RemoteAddr)).WithGroup("http") nonceB64 := r.URL.Query()["nonce"] if len(nonceB64) != 1 || nonceB64[0] == "" { - log.Errorf("Received attestation request with empty or multiple nonce parameter") + log.Error("Received attestation request with empty or multiple nonce parameter") http.Error(w, "nonce parameter is required exactly once", http.StatusBadRequest) return } nonce, err := base64.URLEncoding.DecodeString(nonceB64[0]) if err != nil { - log.With(zap.Error(err)).Errorf("Received attestation request with invalid nonce") + log.With(slog.Any("error", err)).Error("Received attestation request with invalid nonce") http.Error(w, fmt.Sprintf("invalid base64 encoding for nonce: %v", err), http.StatusBadRequest) return } - log.Infof("Creating attestation") + log.Info("Creating attestation") quote, err := s.issuer.Issue(r.Context(), []byte(constants.ConstellationVerifyServiceUserData), nonce) if err != nil { http.Error(w, fmt.Sprintf("issuing attestation statement: %v", err), http.StatusInternalServerError) return } - log.Infof("Attestation request successful") + log.Info("Attestation request successful") w.Header().Set("Content-Type", "application/json") if err := json.NewEncoder(w).Encode(attestation{quote}); err != nil { http.Error(w, err.Error(), http.StatusInternalServerError)