diff --git a/.github/workflows/golangci-lint.yaml b/.github/workflows/golangci-lint.yaml index 5e27731e8b..d5045911d0 100644 --- a/.github/workflows/golangci-lint.yaml +++ b/.github/workflows/golangci-lint.yaml @@ -45,3 +45,13 @@ jobs: - name: Run linter from make target run: | make check + - name: Run log linter + run: | + set +e + + # log messages should never start upper-cased + find . -name '*.go' | grep -v '_test.go' | xargs grep -n "Msg(\"[A-Z]" | grep -v -E "Msg\(\"HTTP|OpenID|OAuth|TLS" + if [ $? -eq 0 ]; then + exit 1 + fi + exit 0 diff --git a/errors/errors.go b/errors/errors.go index da77ea84d1..433743528d 100644 --- a/errors/errors.go +++ b/errors/errors.go @@ -89,8 +89,8 @@ var ( ErrImgStoreNotFound = errors.New("routes: image store not found corresponding to given route") ErrEmptyValue = errors.New("cache: empty value") ErrEmptyRepoList = errors.New("search: no repository found") - ErrCVESearchDisabled = errors.New("search: CVE search is disabled") - ErrCVEDBNotFound = errors.New("cve: CVE DB is not present") + ErrCVESearchDisabled = errors.New("search: cve search is disabled") + ErrCVEDBNotFound = errors.New("cve: cve-db is not present") ErrInvalidRepositoryName = errors.New("repository: not a valid repository name") ErrSyncMissingCatalog = errors.New("sync: couldn't fetch upstream registry's catalog") ErrMethodNotSupported = errors.New("storage: method not supported") diff --git a/pkg/api/authn.go b/pkg/api/authn.go index d3252ff396..f3a6e93a45 100644 --- a/pkg/api/authn.go +++ b/pkg/api/authn.go @@ -419,7 +419,7 @@ func bearerAuthHandler(ctlr *Controller) mux.MiddlewareFunc { EmptyDefaultNamespace: true, }) if err != nil { - ctlr.Log.Panic().Err(err).Msg("error creating bearer authorizer") + ctlr.Log.Panic().Err(err).Msg("failed to create bearer authorizer") } return func(next http.Handler) http.Handler { diff --git a/pkg/api/routes.go b/pkg/api/routes.go index 82160ef3f4..13edb61651 100644 --- a/pkg/api/routes.go +++ b/pkg/api/routes.go @@ -549,7 +549,7 @@ func getReferrers(ctx context.Context, routeHandler *RouteHandler, if errSync := routeHandler.c.SyncOnDemand.SyncReference(ctx, name, digest.String(), syncConstants.OCI); errSync != nil { routeHandler.c.Log.Err(errSync).Str("repository", name).Str("reference", digest.String()). - Msg("error encounter while syncing OCI reference for image") + Msg("failed to sync OCI reference for image") } refs, err = imgStore.GetReferrers(name, digest, artifactTypes) @@ -607,7 +607,7 @@ func (rh *RouteHandler) GetReferrers(response http.ResponseWriter, request *http rh.c.Log.Error().Err(err).Str("name", name).Str("digest", digest.String()).Msg("manifest not found") response.WriteHeader(http.StatusNotFound) } else { - rh.c.Log.Error().Err(err).Str("name", name).Str("digest", digest.String()).Msg("unable to get references") + rh.c.Log.Error().Err(err).Str("name", name).Str("digest", digest.String()).Msg("failed to get references") response.WriteHeader(http.StatusInternalServerError) } @@ -616,7 +616,7 @@ func (rh *RouteHandler) GetReferrers(response http.ResponseWriter, request *http out, err := json.Marshal(referrers) if err != nil { - rh.c.Log.Error().Err(err).Str("name", name).Str("digest", digest.String()).Msg("unable to marshal json") + rh.c.Log.Error().Err(err).Str("name", name).Str("digest", digest.String()).Msg("failed to marshal json") response.WriteHeader(http.StatusInternalServerError) return @@ -1223,8 +1223,6 @@ func (rh *RouteHandler) CreateBlobUpload(response http.ResponseWriter, request * return } - rh.c.Log.Info().Int64("r.ContentLength", request.ContentLength).Msg("DEBUG") - digestStr := digests[0] digest := godigest.Digest(digestStr) @@ -1496,8 +1494,6 @@ func (rh *RouteHandler) UpdateBlobUpload(response http.ResponseWriter, request * return } - rh.c.Log.Info().Int64("r.ContentLength", request.ContentLength).Msg("DEBUG") - contentPresent := true contentLen, err := strconv.ParseInt(request.Header.Get("Content-Length"), 10, 64) @@ -1920,7 +1916,7 @@ func getImageManifest(ctx context.Context, routeHandler *RouteHandler, imgStore if errSync := routeHandler.c.SyncOnDemand.SyncImage(ctx, name, reference); errSync != nil { routeHandler.c.Log.Err(errSync).Str("repository", name).Str("reference", reference). - Msg("error encounter while syncing image") + Msg("failed to sync image") } } @@ -1941,7 +1937,7 @@ func getOrasReferrers(ctx context.Context, routeHandler *RouteHandler, if errSync := routeHandler.c.SyncOnDemand.SyncReference(ctx, name, digest.String(), syncConstants.Oras); errSync != nil { routeHandler.c.Log.Error().Err(err).Str("name", name).Str("digest", digest.String()). - Msg("unable to get references") + Msg("failed to get references") } refs, err = imgStore.GetOrasReferrers(name, digest, artifactType) @@ -2011,7 +2007,7 @@ func (rh *RouteHandler) GetOrasReferrers(response http.ResponseWriter, request * rh.c.Log.Error().Err(err).Str("name", name).Str("digest", digest.String()).Msg("manifest not found") response.WriteHeader(http.StatusNotFound) } else { - rh.c.Log.Error().Err(err).Str("name", name).Str("digest", digest.String()).Msg("unable to get references") + rh.c.Log.Error().Err(err).Str("name", name).Str("digest", digest.String()).Msg("failed to get references") response.WriteHeader(http.StatusInternalServerError) } @@ -2041,7 +2037,7 @@ type APIKeyPayload struct { //nolint:revive func (rh *RouteHandler) GetAPIKeys(resp http.ResponseWriter, req *http.Request) { apiKeys, err := rh.c.MetaDB.GetUserAPIKeys(req.Context()) if err != nil { - rh.c.Log.Error().Err(err).Msg("error getting list of API keys for user") + rh.c.Log.Error().Err(err).Msg("failed to get list of api keys for user") resp.WriteHeader(http.StatusInternalServerError) return @@ -2057,7 +2053,7 @@ func (rh *RouteHandler) GetAPIKeys(resp http.ResponseWriter, req *http.Request) data, err := json.Marshal(apiKeyResponse) if err != nil { - rh.c.Log.Error().Err(err).Msg("unable to marshal api key response") + rh.c.Log.Error().Err(err).Msg("failed to marshal api key response") resp.WriteHeader(http.StatusInternalServerError) @@ -2085,7 +2081,7 @@ func (rh *RouteHandler) CreateAPIKey(resp http.ResponseWriter, req *http.Request body, err := io.ReadAll(req.Body) if err != nil { - rh.c.Log.Error().Msg("unable to read request body") + rh.c.Log.Error().Msg("failed to read request body") resp.WriteHeader(http.StatusInternalServerError) return @@ -2141,7 +2137,7 @@ func (rh *RouteHandler) CreateAPIKey(resp http.ResponseWriter, req *http.Request err = rh.c.MetaDB.AddUserAPIKey(req.Context(), hashedAPIKey, apiKeyDetails) if err != nil { - rh.c.Log.Error().Err(err).Msg("error storing API key") + rh.c.Log.Error().Err(err).Msg("failed to store api key") resp.WriteHeader(http.StatusInternalServerError) return @@ -2159,7 +2155,7 @@ func (rh *RouteHandler) CreateAPIKey(resp http.ResponseWriter, req *http.Request data, err := json.Marshal(apiKeyResponse) if err != nil { - rh.c.Log.Error().Err(err).Msg("unable to marshal api key response") + rh.c.Log.Error().Err(err).Msg("failed to marshal api key response") resp.WriteHeader(http.StatusInternalServerError) @@ -2194,7 +2190,7 @@ func (rh *RouteHandler) RevokeAPIKey(resp http.ResponseWriter, req *http.Request err := rh.c.MetaDB.DeleteUserAPIKey(req.Context(), keyID) if err != nil { - rh.c.Log.Error().Err(err).Str("keyID", keyID).Msg("error deleting API key") + rh.c.Log.Error().Err(err).Str("keyID", keyID).Msg("failed to delete api key") resp.WriteHeader(http.StatusInternalServerError) return diff --git a/pkg/api/session.go b/pkg/api/session.go index c5f46f2261..35b32682d8 100644 --- a/pkg/api/session.go +++ b/pkg/api/session.go @@ -121,7 +121,8 @@ func SessionLogger(ctlr *Controller) mux.MiddlewareFunc { monitoring.ObserveHTTPMethodLatency(ctlr.Metrics, method, latency) // histogram } - log.Str("clientIP", clientIP). + log.Str("component", "session"). + Str("clientIP", clientIP). Str("method", method). Str("path", path). Int("statusCode", statusCode). @@ -172,6 +173,7 @@ func SessionAuditLogger(audit *log.Logger) mux.MiddlewareFunc { method == http.MethodPatch || method == http.MethodDelete) && (statusCode == http.StatusOK || statusCode == http.StatusCreated || statusCode == http.StatusAccepted) { audit.Info(). + Str("component", "session"). Str("clientIP", clientIP). Str("subject", username). Str("action", method). diff --git a/pkg/cli/client/cve_cmd_test.go b/pkg/cli/client/cve_cmd_test.go index 6c46968d9a..1075017c31 100644 --- a/pkg/cli/client/cve_cmd_test.go +++ b/pkg/cli/client/cve_cmd_test.go @@ -181,7 +181,7 @@ func TestNegativeServerResponse(t *testing.T) { test.WaitTillServerReady(url) - _, err = test.ReadLogFileAndSearchString(logPath, "DB update completed, next update scheduled", 90*time.Second) + _, err = test.ReadLogFileAndSearchString(logPath, "cve-db update completed, next update scheduled after interval", 90*time.Second) if err != nil { panic(err) } @@ -263,7 +263,7 @@ func TestServerCVEResponse(t *testing.T) { panic(err) } - _, err = test.ReadLogFileAndSearchString(logPath, "DB update completed, next update scheduled", 90*time.Second) + _, err = test.ReadLogFileAndSearchString(logPath, "cve-db update completed, next update scheduled after interval", 90*time.Second) if err != nil { panic(err) } diff --git a/pkg/cli/server/config_reloader.go b/pkg/cli/server/config_reloader.go index 2c90499ea1..fc6d9966d1 100644 --- a/pkg/cli/server/config_reloader.go +++ b/pkg/cli/server/config_reloader.go @@ -111,7 +111,7 @@ func (hr *HotReloader) Start() context.Context { }() if err := hr.watcher.Add(hr.filePath); err != nil { - log.Panic().Err(err).Str("config", hr.filePath).Msg("error adding config file to FsNotify watcher") + log.Panic().Err(err).Str("config", hr.filePath).Msg("failed to add config file to fsnotity watcher") } <-done diff --git a/pkg/cli/server/config_reloader_test.go b/pkg/cli/server/config_reloader_test.go index d95e111d4d..b7f260c42f 100644 --- a/pkg/cli/server/config_reloader_test.go +++ b/pkg/cli/server/config_reloader_test.go @@ -491,7 +491,7 @@ func TestConfigReloader(t *testing.T) { time.Sleep(5 * time.Second) found, err := test.ReadLogFileAndSearchString(logFile.Name(), - "Error downloading Trivy DB to destination dir", 30*time.Second) + "failed to download trivy-db to destination dir", 30*time.Second) So(err, ShouldBeNil) So(found, ShouldBeTrue) diff --git a/pkg/cli/server/extensions_test.go b/pkg/cli/server/extensions_test.go index 991902a0b5..4c9988206b 100644 --- a/pkg/cli/server/extensions_test.go +++ b/pkg/cli/server/extensions_test.go @@ -805,7 +805,7 @@ func TestServeScrubExtension(t *testing.T) { So(dataStr, ShouldContainSubstring, "\"Extensions\":{\"Search\":null,\"Sync\":null,\"Metrics\":null,\"Scrub\":{\"Enable\":true,\"Interval\":86400000000000},\"Lint\":null") //nolint:lll // gofumpt conflicts with lll So(dataStr, ShouldNotContainSubstring, - "Scrub interval set to too-short interval < 2h, changing scrub duration to 2 hours and continuing.") + "scrub interval set to too-short interval < 2h, changing scrub duration to 2 hours and continuing.") }) Convey("scrub implicitly enabled, but with scrub interval param set", t, func(c C) { @@ -837,7 +837,7 @@ func TestServeScrubExtension(t *testing.T) { dataStr := string(data) So(dataStr, ShouldContainSubstring, "\"Scrub\":{\"Enable\":true,\"Interval\":3600000000000}") So(dataStr, ShouldContainSubstring, - "Scrub interval set to too-short interval < 2h, changing scrub duration to 2 hours and continuing.") + "scrub interval set to too-short interval < 2h, changing scrub duration to 2 hours and continuing.") }) Convey("scrub explicitly enabled, but without scrub interval param set", t, func(c C) { @@ -869,7 +869,7 @@ func TestServeScrubExtension(t *testing.T) { So(dataStr, ShouldContainSubstring, "\"Extensions\":{\"Search\":null,\"Sync\":null,\"Metrics\":null,\"Scrub\":{\"Enable\":true,\"Interval\":86400000000000},\"Lint\":null") //nolint:lll // gofumpt conflicts with lll So(dataStr, ShouldNotContainSubstring, - "Scrub interval set to too-short interval < 2h, changing scrub duration to 2 hours and continuing.") + "scrub interval set to too-short interval < 2h, changing scrub duration to 2 hours and continuing.") }) Convey("scrub explicitly disabled", t, func(c C) { @@ -899,9 +899,9 @@ func TestServeScrubExtension(t *testing.T) { defer os.Remove(logPath) // clean up dataStr := string(data) So(dataStr, ShouldContainSubstring, "\"Scrub\":{\"Enable\":false,\"Interval\":86400000000000}") - So(dataStr, ShouldContainSubstring, "Scrub config not provided, skipping scrub") + So(dataStr, ShouldContainSubstring, "scrub config not provided, skipping scrub") So(dataStr, ShouldNotContainSubstring, - "Scrub interval set to too-short interval < 2h, changing scrub duration to 2 hours and continuing.") + "scrub interval set to too-short interval < 2h, changing scrub duration to 2 hours and continuing.") }) } @@ -1066,7 +1066,7 @@ func TestServeSearchEnabledCVE(t *testing.T) { So(found, ShouldBeTrue) So(err, ShouldBeNil) - found, err = ReadLogFileAndSearchString(logPath, "updating the CVE database", readLogFileTimeout) + found, err = ReadLogFileAndSearchString(logPath, "updating cve-db", readLogFileTimeout) So(found, ShouldBeTrue) So(err, ShouldBeNil) }) @@ -1151,8 +1151,8 @@ func TestServeSearchDisabled(t *testing.T) { defer os.Remove(logPath) // clean up dataStr := string(data) So(dataStr, ShouldContainSubstring, - "\"Search\":{\"Enable\":false,\"CVE\":{\"UpdateInterval\":10800000000000,\"Trivy\":null}") - So(dataStr, ShouldContainSubstring, "CVE config not provided, skipping CVE update") + `"Search":{"Enable":false,"CVE":{"UpdateInterval":10800000000000,"Trivy":null}`) + So(dataStr, ShouldContainSubstring, "cve config not provided, skipping cve-db update") So(dataStr, ShouldNotContainSubstring, "CVE update interval set to too-short interval < 2h, changing update duration to 2 hours and continuing.") }) diff --git a/pkg/cli/server/root.go b/pkg/cli/server/root.go index 6a0a89ab2e..6ab1534c32 100644 --- a/pkg/cli/server/root.go +++ b/pkg/cli/server/root.go @@ -72,7 +72,9 @@ func newServeCmd(conf *config.Config) *cobra.Command { } if err := ctlr.Run(reloaderCtx); err != nil { - log.Error().Err(err).Msg("unable to start controller, exiting") + log.Error().Err(err).Msg("failed to start controller, exiting") + + return err } return nil @@ -108,7 +110,7 @@ func newScrubCmd(conf *config.Config) *cobra.Command { fmt.Sprintf("http://%s/v2", net.JoinHostPort(conf.HTTP.Address, conf.HTTP.Port)), nil) if err != nil { - log.Error().Err(err).Msg("unable to create a new http request") + log.Error().Err(err).Msg("failed to create a new http request") return err } @@ -116,7 +118,8 @@ func newScrubCmd(conf *config.Config) *cobra.Command { response, err := http.DefaultClient.Do(req) if err == nil { response.Body.Close() - log.Warn().Msg("The server is running, in order to perform the scrub command the server should be shut down") + log.Warn().Err(zerr.ErrServerIsRunning). + Msg("server is running, in order to perform the scrub command the server should be shut down") return zerr.ErrServerIsRunning } else { @@ -153,12 +156,12 @@ func newVerifyCmd(conf *config.Config) *cobra.Command { RunE: func(cmd *cobra.Command, args []string) error { if len(args) > 0 { if err := LoadConfiguration(conf, args[0]); err != nil { - log.Error().Str("config", args[0]).Msg("Config file is invalid") + log.Error().Str("config", args[0]).Msg("config file is invalid") return err } - log.Info().Str("config", args[0]).Msg("Config file is valid") + log.Info().Str("config", args[0]).Msg("config file is valid") } return nil @@ -303,11 +306,11 @@ func validateCacheConfig(cfg *config.Config, log zlog.Logger) error { func validateExtensionsConfig(cfg *config.Config, log zlog.Logger) error { if cfg.Extensions != nil && cfg.Extensions.Mgmt != nil { - log.Warn().Msg("The mgmt extensions configuration option has been made redundant and will be ignored.") + log.Warn().Msg("mgmt extensions configuration option has been made redundant and will be ignored.") } if cfg.Extensions != nil && cfg.Extensions.APIKey != nil { - log.Warn().Msg("The apikey extension configuration will be ignored as API keys " + + log.Warn().Msg("apikey extension configuration will be ignored as API keys " + "are now configurable in the HTTP settings.") } @@ -315,7 +318,7 @@ func validateExtensionsConfig(cfg *config.Config, log zlog.Logger) error { // it would make sense to also check for mgmt and user prefs to be enabled, // but those are both enabled by having the search and ui extensions enabled if cfg.Extensions.Search == nil || !*cfg.Extensions.Search.Enable { - log.Error().Err(zerr.ErrBadConfig).Msg("UI functionality can't be used without search extension.") + log.Error().Err(zerr.ErrBadConfig).Msg("ui functionality can't be used without search extension.") return zerr.ErrBadConfig } @@ -324,7 +327,7 @@ func validateExtensionsConfig(cfg *config.Config, log zlog.Logger) error { //nolint:lll if cfg.Storage.StorageDriver != nil && cfg.Extensions != nil && cfg.Extensions.Search != nil && cfg.Extensions.Search.Enable != nil && *cfg.Extensions.Search.Enable && cfg.Extensions.Search.CVE != nil { - log.Error().Err(zerr.ErrBadConfig).Msg("CVE functionality can't be used with remote storage. Please disable CVE") + log.Error().Err(zerr.ErrBadConfig).Msg("cve functionality can't be used with remote storage - please disable cve") return zerr.ErrBadConfig } @@ -333,7 +336,7 @@ func validateExtensionsConfig(cfg *config.Config, log zlog.Logger) error { //nolint:lll if subPath.StorageDriver != nil && cfg.Extensions != nil && cfg.Extensions.Search != nil && cfg.Extensions.Search.Enable != nil && *cfg.Extensions.Search.Enable && cfg.Extensions.Search.CVE != nil { - log.Error().Err(zerr.ErrBadConfig).Msg("CVE functionality can't be used with remote storage. Please disable CVE") + log.Error().Err(zerr.ErrBadConfig).Msg("cve functionality can't be used with remote storage - please disable cve") return zerr.ErrBadConfig } @@ -546,7 +549,7 @@ func applyDefaultValues(config *config.Config, viperInstance *viper.Viper, log z if config.Extensions.Search.CVE.UpdateInterval < defaultUpdateInterval { config.Extensions.Search.CVE.UpdateInterval = defaultUpdateInterval - log.Warn().Msg("CVE update interval set to too-short interval < 2h, " + + log.Warn().Msg("cve update interval set to too-short interval < 2h, " + "changing update duration to 2 hours and continuing.") } @@ -556,15 +559,15 @@ func applyDefaultValues(config *config.Config, viperInstance *viper.Viper, log z if config.Extensions.Search.CVE.Trivy.DBRepository == "" { defaultDBDownloadURL := "ghcr.io/aquasecurity/trivy-db" - log.Info().Str("trivyDownloadURL", defaultDBDownloadURL). - Msg("Config: using default Trivy DB download URL.") + log.Info().Str("url", defaultDBDownloadURL). + Msg("config: using default trivy-db download URL.") config.Extensions.Search.CVE.Trivy.DBRepository = defaultDBDownloadURL } if config.Extensions.Search.CVE.Trivy.JavaDBRepository == "" { defaultJavaDBDownloadURL := "ghcr.io/aquasecurity/trivy-java-db" - log.Info().Str("trivyJavaDownloadURL", defaultJavaDBDownloadURL). - Msg("Config: using default Trivy Java DB download URL.") + log.Info().Str("url", defaultJavaDBDownloadURL). + Msg("config: using default trivy-java-db download URL.") config.Extensions.Search.CVE.Trivy.JavaDBRepository = defaultJavaDBDownloadURL } } @@ -640,7 +643,7 @@ func applyDefaultValues(config *config.Config, viperInstance *viper.Viper, log z cachePath := path.Join(cacheDir, storageConstants.BoltdbName+storageConstants.DBExtensionName) if _, err := os.Stat(cachePath); err == nil { - log.Info().Msg("Config: dedupe set to false for s3 driver but used to be true.") + log.Info().Msg("config: dedupe set to false for s3 driver but used to be true.") log.Info().Str("cache path", cachePath).Msg("found cache database") config.Storage.RemoteCache = false @@ -661,7 +664,7 @@ func applyDefaultValues(config *config.Config, viperInstance *viper.Viper, log z subpathCachePath := path.Join(subpathCacheDir, storageConstants.BoltdbName+storageConstants.DBExtensionName) if _, err := os.Stat(subpathCachePath); err == nil { - log.Info().Msg("Config: dedupe set to false for s3 driver but used to be true. ") + log.Info().Msg("config: dedupe set to false for s3 driver but used to be true. ") log.Info().Str("cache path", subpathCachePath).Msg("found cache database") storageConfig.RemoteCache = false @@ -723,14 +726,14 @@ func LoadConfiguration(config *config.Config, configPath string) error { viperInstance.SetConfigFile(configPath) if err := viperInstance.ReadInConfig(); err != nil { - log.Error().Err(err).Msg("error while reading configuration") + log.Error().Err(err).Msg("failed to read configuration") return err } metaData := &mapstructure.Metadata{} if err := viperInstance.Unmarshal(&config, metadataConfig(metaData)); err != nil { - log.Error().Err(err).Msg("error while unmarshaling new config") + log.Error().Err(err).Msg("failed to unmarshaling new config") return err } diff --git a/pkg/exporter/api/exporter.go b/pkg/exporter/api/exporter.go index 59a8991e4d..16dae68c6b 100644 --- a/pkg/exporter/api/exporter.go +++ b/pkg/exporter/api/exporter.go @@ -101,7 +101,7 @@ func (zc Collector) Collect(ch chan<- prometheus.Metric) { func panicOnDuplicateMetricName(m map[string]*prometheus.Desc, name string, log log.Logger) { if _, present := m[name]; present { - log.Fatal().Msg("Duplicate keys: metric " + name + " already present") + log.Fatal().Str("metric", name).Msg("duplicate key found") } } @@ -180,16 +180,16 @@ func runExporter(c *Controller) { err := prometheus.Register(GetCollector(c)) if err != nil { - c.Log.Error().Err(err).Msg("Expected error in testing") + c.Log.Debug().Err(err).Msg("ignoring error") } http.Handle(c.Config.Exporter.Metrics.Path, promhttp.Handler()) - c.Log.Info().Str("exporter addr", exporterAddr). - Str("exporter metrics path", c.Config.Exporter.Metrics.Path). - Msg("Exporter is listening on exporter addr & exposes metrics on exporter metrics path") + c.Log.Info().Str("addr", exporterAddr). + Str("path", c.Config.Exporter.Metrics.Path). + Msg("exporter listening") serverAddr := fmt.Sprintf("%s://%s:%s", c.Config.Server.Protocol, c.Config.Server.Host, c.Config.Server.Port) - c.Log.Info().Str("serverAddr", serverAddr).Msg("Scraping metrics") - c.Log.Fatal().Err(server.ListenAndServe()).Msg("Exporter stopped") + c.Log.Info().Str("serverAddr", serverAddr).Msg("scraping metrics") + c.Log.Fatal().Err(server.ListenAndServe()).Msg("exporter stopped") } diff --git a/pkg/exporter/cli/cli.go b/pkg/exporter/cli/cli.go index 8bf9e59364..c5f05e99ed 100644 --- a/pkg/exporter/cli/cli.go +++ b/pkg/exporter/cli/cli.go @@ -60,15 +60,20 @@ func loadConfiguration(config *api.Config, configPath string) { viper.SetConfigFile(configPath) if err := viper.ReadInConfig(); err != nil { - log.Panic().Err(err).Msg("Error while reading configuration") + log.Panic().Err(err).Str("config", configPath).Msg("failed to read configuration") } metaData := &mapstructure.Metadata{} if err := viper.Unmarshal(&config, metadataConfig(metaData)); err != nil { - log.Panic().Err(err).Msg("Error while unmarshaling new config") + log.Panic().Err(err).Str("config", configPath).Msg("failed to unmarshal config") } - if len(metaData.Keys) == 0 || len(metaData.Unused) > 0 { - log.Panic().Err(zerr.ErrBadConfig).Msg("Bad configuration, retry writing it") + if len(metaData.Keys) == 0 { + log.Panic().Err(zerr.ErrBadConfig).Str("config", configPath).Msg("bad configuration") + } + + if len(metaData.Unused) > 0 { + log.Panic().Err(zerr.ErrBadConfig).Interface("unknown fields", metaData.Unused). + Str("config", configPath).Msg("bad configuration") } } diff --git a/pkg/extensions/extension_metrics.go b/pkg/extensions/extension_metrics.go index 1837d7ef78..fe0a1325e4 100644 --- a/pkg/extensions/extension_metrics.go +++ b/pkg/extensions/extension_metrics.go @@ -18,10 +18,10 @@ func EnableMetricsExtension(config *config.Config, log log.Logger, rootDir strin if config.Extensions.Metrics.Prometheus.Path == "" { config.Extensions.Metrics.Prometheus.Path = "/metrics" - log.Warn().Msg("Prometheus instrumentation Path not set, changing to '/metrics'.") + log.Warn().Msg("prometheus instrumentation path not set, changing to '/metrics'.") } } else { - log.Info().Msg("Metrics config not provided, skipping Metrics config update") + log.Info().Msg("metrics config not provided, skipping metrics config update") } } diff --git a/pkg/extensions/extension_scrub.go b/pkg/extensions/extension_scrub.go index bc61ad1fa1..045d725db1 100644 --- a/pkg/extensions/extension_scrub.go +++ b/pkg/extensions/extension_scrub.go @@ -25,7 +25,7 @@ func EnableScrubExtension(config *config.Config, log log.Logger, storeController if config.Extensions.Scrub.Interval < minScrubInterval { config.Extensions.Scrub.Interval = minScrubInterval - log.Warn().Msg("Scrub interval set to too-short interval < 2h, changing scrub duration to 2 hours and continuing.") //nolint:lll // gofumpt conflicts with lll + log.Warn().Msg("scrub interval set to too-short interval < 2h, changing scrub duration to 2 hours and continuing.") //nolint:lll // gofumpt conflicts with lll } generator := &taskGenerator{ @@ -44,7 +44,7 @@ func EnableScrubExtension(config *config.Config, log log.Logger, storeController } } } else { - log.Info().Msg("Scrub config not provided, skipping scrub") + log.Info().Msg("scrub config not provided, skipping scrub") } } diff --git a/pkg/extensions/extension_search.go b/pkg/extensions/extension_search.go index ecfa04612e..a1a04a13dc 100644 --- a/pkg/extensions/extension_search.go +++ b/pkg/extensions/extension_search.go @@ -52,14 +52,14 @@ func EnableSearchExtension(conf *config.Config, storeController storage.StoreCon downloadTrivyDB(updateInterval, taskScheduler, cveScanner, log) startScanner(scanInterval, metaDB, taskScheduler, cveScanner, log) } else { - log.Info().Msg("CVE config not provided, skipping CVE update") + log.Info().Msg("cve config not provided, skipping cve-db update") } } func downloadTrivyDB(interval time.Duration, sch *scheduler.Scheduler, cveScanner CveScanner, log log.Logger) { generator := cveinfo.NewDBUpdateTaskGenerator(interval, cveScanner, log) - log.Info().Msg("Submitting CVE DB update scheduler") + log.Info().Msg("submitting cve-db update generator to scheduler") sch.SubmitGenerator(generator, interval, scheduler.HighPriority) } @@ -68,7 +68,7 @@ func startScanner(interval time.Duration, metaDB mTypes.MetaDB, sch *scheduler.S ) { generator := cveinfo.NewScanTaskGenerator(metaDB, cveScanner, log) - log.Info().Msg("Submitting CVE scan scheduler") + log.Info().Msg("submitting cve-scan generator to scheduler") sch.SubmitGenerator(generator, interval, scheduler.MediumPriority) } diff --git a/pkg/extensions/extension_sync.go b/pkg/extensions/extension_sync.go index 852112dc0d..a7f141e4a1 100644 --- a/pkg/extensions/extension_sync.go +++ b/pkg/extensions/extension_sync.go @@ -64,7 +64,7 @@ func EnableSyncExtension(config *config.Config, metaDB mTypes.MetaDB, return onDemand, nil } - log.Info().Msg("Sync registries config not provided or disabled, skipping sync") + log.Info().Msg("sync config not provided or disabled, so not enabling sync") return nil, nil //nolint: nilnil } diff --git a/pkg/extensions/extensions_test.go b/pkg/extensions/extensions_test.go index 593bccfc2d..4f3f8b21d3 100644 --- a/pkg/extensions/extensions_test.go +++ b/pkg/extensions/extensions_test.go @@ -65,7 +65,7 @@ func TestEnableExtension(t *testing.T) { data, err := os.ReadFile(logFile.Name()) So(err, ShouldBeNil) So(string(data), ShouldContainSubstring, - "Sync registries config not provided or disabled, skipping sync") + "sync config not provided or disabled, so not enabling sync") }) } @@ -106,7 +106,7 @@ func TestMetricsExtension(t *testing.T) { data, _ := os.ReadFile(logFile.Name()) So(string(data), ShouldContainSubstring, - "Prometheus instrumentation Path not set, changing to '/metrics'.") + "prometheus instrumentation path not set, changing to '/metrics'.") }) } diff --git a/pkg/extensions/imagetrust/image_trust.go b/pkg/extensions/imagetrust/image_trust.go index 3fd06fa141..abb91dafb7 100644 --- a/pkg/extensions/imagetrust/image_trust.go +++ b/pkg/extensions/imagetrust/image_trust.go @@ -259,7 +259,7 @@ func (validityT *validityTask) DoWork(ctx context.Context) error { if len(sigs[zcommon.CosignSignature]) != 0 || len(sigs[zcommon.NotationSignature]) != 0 { err := validityT.metaDB.UpdateSignaturesValidity(validityT.repo.Name, godigest.Digest(signedManifest)) if err != nil { - validityT.log.Info().Msg("error while verifying signatures") + validityT.log.Info().Msg("failed to verify signatures") return err } diff --git a/pkg/extensions/monitoring/minimal.go b/pkg/extensions/monitoring/minimal.go index bd38b8c131..9bb373faec 100644 --- a/pkg/extensions/monitoring/minimal.go +++ b/pkg/extensions/monitoring/minimal.go @@ -314,7 +314,7 @@ func (ms *metricServer) CounterInc(cv *CounterValue) { if err != nil { // The last thing we want is to panic/stop the server due to instrumentation // thus log a message (should be detected during development of new metrics) - ms.log.Error().Err(err).Msg("Instrumentation error") + ms.log.Error().Err(err).Msg("failed due to instrumentation error") return } @@ -334,7 +334,7 @@ func (ms *metricServer) GaugeSet(gv *GaugeValue) { err := sanityChecks(gv.Name, labels, ok, gv.LabelNames, gv.LabelValues) if err != nil { - ms.log.Error().Err(err).Msg("Instrumentation error") + ms.log.Error().Err(err).Msg("failed due to instrumentation error") return } @@ -353,7 +353,7 @@ func (ms *metricServer) SummaryObserve(sv *SummaryValue) { err := sanityChecks(sv.Name, labels, ok, sv.LabelNames, sv.LabelValues) if err != nil { - ms.log.Error().Err(err).Msg("Instrumentation error") + ms.log.Error().Err(err).Msg("failed due to instrumentation error") return } @@ -374,7 +374,7 @@ func (ms *metricServer) HistogramObserve(hv *HistogramValue) { err := sanityChecks(hv.Name, labels, ok, hv.LabelNames, hv.LabelValues) if err != nil { - ms.log.Error().Err(err).Msg("Instrumentation error") + ms.log.Error().Err(err).Msg("failed due to instrumentation error") return } diff --git a/pkg/extensions/scrub/scrub_test.go b/pkg/extensions/scrub/scrub_test.go index 34db265eef..892652e529 100644 --- a/pkg/extensions/scrub/scrub_test.go +++ b/pkg/extensions/scrub/scrub_test.go @@ -176,7 +176,7 @@ func TestScrubExtension(t *testing.T) { data, err := os.ReadFile(logFile.Name()) So(err, ShouldBeNil) - So(string(data), ShouldContainSubstring, "error while executing generator") + So(string(data), ShouldContainSubstring, "failed to execute generator") So(os.Chmod(path.Join(dir, repoName), 0o755), ShouldBeNil) }) diff --git a/pkg/extensions/search/cve/cve.go b/pkg/extensions/search/cve/cve.go index a4acb8b459..6ac9d2dcae 100644 --- a/pkg/extensions/search/cve/cve.go +++ b/pkg/extensions/search/cve/cve.go @@ -200,10 +200,10 @@ func (cveinfo BaseCveInfo) GetImageListWithCVEFixed(repo, cveID string) ([]cvemo if len(vulnerableTags) != 0 { cveinfo.Log.Info().Str("repository", repo).Str("cve-id", cveID). - Interface("vulnerableTags", vulnerableTags).Msg("Vulnerable tags") + Interface("tags", vulnerableTags).Msg("vulnerable tags") fixedTags = GetFixedTags(allTags, vulnerableTags) cveinfo.Log.Info().Str("repository", repo).Str("cve-id", cveID). - Interface("fixedTags", fixedTags).Msg("Fixed tags") + Interface("tags", fixedTags).Msg("fixed tags") } else { cveinfo.Log.Info().Str("repository", repo).Str("cve-id", cveID). Msg("image does not contain any tag that have given cve") diff --git a/pkg/extensions/search/cve/cve_test.go b/pkg/extensions/search/cve/cve_test.go index 46fbba0dc3..ab5f46c4a2 100644 --- a/pkg/extensions/search/cve/cve_test.go +++ b/pkg/extensions/search/cve/cve_test.go @@ -462,23 +462,23 @@ func TestCVESearchDisabled(t *testing.T) { ctrlManager.StartAndWait(port) // Wait for trivy db to download - found, err := test.ReadLogFileAndSearchString(logPath, "CVE config not provided, skipping CVE update", 90*time.Second) + found, err := test.ReadLogFileAndSearchString(logPath, "cve config not provided, skipping cve-db update", 90*time.Second) So(err, ShouldBeNil) So(found, ShouldBeTrue) defer ctrlManager.StopServer() resp, _ := resty.R().SetBasicAuth(username, password).Get(baseURL + constants.FullSearchPrefix + "?query={CVEListForImage(image:\"zot-test\"){Tag%20CVEList{Id%20Description%20Severity%20PackageList{Name%20InstalledVersion%20FixedVersion}}}}") - So(string(resp.Body()), ShouldContainSubstring, "search: CVE search is disabled") + So(string(resp.Body()), ShouldContainSubstring, "search: cve search is disabled") So(resp.StatusCode(), ShouldEqual, 200) resp, _ = resty.R().SetBasicAuth(username, password).Get(baseURL + constants.FullSearchPrefix + "?query={ImageListForCVE(id:\"CVE-201-20482\"){Results{RepoName%20Tag}}}") - So(string(resp.Body()), ShouldContainSubstring, "search: CVE search is disabled") + So(string(resp.Body()), ShouldContainSubstring, "search: cve search is disabled") So(resp.StatusCode(), ShouldEqual, 200) resp, _ = resty.R().SetBasicAuth(username, password).Get(baseURL + constants.FullSearchPrefix + "?query={ImageListWithCVEFixed(id:\"" + "randomId" + "\",image:\"zot-test\"){Results{RepoName%20LastUpdated}}}") So(resp, ShouldNotBeNil) - So(string(resp.Body()), ShouldContainSubstring, "search: CVE search is disabled") + So(string(resp.Body()), ShouldContainSubstring, "search: cve search is disabled") So(resp.StatusCode(), ShouldEqual, 200) }) } @@ -542,7 +542,7 @@ func TestCVESearch(t *testing.T) { // trivy db download fail err = os.Mkdir(dbDir+"/_trivy", 0o000) So(err, ShouldBeNil) - found, err := test.ReadLogFileAndSearchString(logPath, "Error downloading Trivy DB to destination dir", 180*time.Second) + found, err := test.ReadLogFileAndSearchString(logPath, "failed to download trivy-db to destination dir", 180*time.Second) So(err, ShouldBeNil) So(found, ShouldBeTrue) @@ -550,7 +550,7 @@ func TestCVESearch(t *testing.T) { So(err, ShouldBeNil) // Wait for trivy db to download - found, err = test.ReadLogFileAndSearchString(logPath, "DB update completed, next update scheduled", 180*time.Second) + found, err = test.ReadLogFileAndSearchString(logPath, "cve-db update completed, next update scheduled after interval", 180*time.Second) So(err, ShouldBeNil) So(found, ShouldBeTrue) @@ -1539,7 +1539,7 @@ func TestFixedTagsWithIndex(t *testing.T) { So(err, ShouldBeNil) // Wait for trivy db to download - found, err := test.ReadLogFileAndSearchString(logPath, "DB update completed, next update scheduled", 180*time.Second) + found, err := test.ReadLogFileAndSearchString(logPath, "cve-db update completed, next update scheduled after interval", 180*time.Second) So(err, ShouldBeNil) So(found, ShouldBeTrue) diff --git a/pkg/extensions/search/cve/scan.go b/pkg/extensions/search/cve/scan.go index 3f3e1980d2..20aa2d5c46 100644 --- a/pkg/extensions/search/cve/scan.go +++ b/pkg/extensions/search/cve/scan.go @@ -13,10 +13,12 @@ import ( func NewScanTaskGenerator( metaDB mTypes.MetaDB, scanner Scanner, - log log.Logger, + logC log.Logger, ) scheduler.TaskGenerator { + sublogger := logC.With().Str("component", "cve").Logger() + return &scanTaskGenerator{ - log: log, + log: log.Logger{Logger: sublogger}, metaDB: metaDB, scanner: scanner, lock: &sync.Mutex{}, @@ -125,13 +127,13 @@ func (gen *scanTaskGenerator) Next() (scheduler.Task, error) { if err != nil { // Do not crash the generator for potential metadb inconsistencies // as there may be scannable images not yet scanned - gen.log.Warn().Err(err).Msg("Scheduled CVE scan: error while obtaining repo metadata") + gen.log.Warn().Err(err).Msg("failed to obtain repo metadata during scheduled cve scan") } // no imageMeta are returned, all results are in already in cache // or manifests cannot be scanned if len(imageMeta) == 0 { - gen.log.Info().Msg("Scheduled CVE scan: finished for available images") + gen.log.Info().Msg("finished scanning available images during scheduled cve scan") gen.done = true @@ -184,13 +186,13 @@ func (st *scanTask) DoWork(ctx context.Context) error { // We cache the results internally in the scanner // so we can discard the actual results for now if _, err := st.generator.scanner.ScanImage(image); err != nil { - st.generator.log.Error().Err(err).Str("image", image).Msg("Scheduled CVE scan errored for image") + st.generator.log.Error().Err(err).Str("image", image).Msg("scheduled cve scan failed for image") st.generator.addError(st.digest, err) return err } - st.generator.log.Debug().Str("image", image).Msg("Scheduled CVE scan completed successfully for image") + st.generator.log.Debug().Str("image", image).Msg("scheduled cve scan completed successfully for image") return nil } diff --git a/pkg/extensions/search/cve/scan_test.go b/pkg/extensions/search/cve/scan_test.go index e6078a5206..afc496ad12 100644 --- a/pkg/extensions/search/cve/scan_test.go +++ b/pkg/extensions/search/cve/scan_test.go @@ -439,7 +439,7 @@ func TestScanGeneratorWithMockedData(t *testing.T) { //nolint: gocyclo // Make sure the scanner generator has completed despite errors found, err := test.ReadLogFileAndSearchString(logPath, - "Scheduled CVE scan: finished for available images", 40*time.Second) + "finished scanning available images during scheduled cve scan", 40*time.Second) So(err, ShouldBeNil) So(found, ShouldBeTrue) @@ -461,19 +461,19 @@ func TestScanGeneratorWithMockedData(t *testing.T) { //nolint: gocyclo } found, err = test.ReadLogFileAndSearchString(logPath, - "Scheduled CVE scan: error while obtaining repo metadata", 20*time.Second) + "failed to obtain repo metadata during scheduled cve scan", 20*time.Second) So(err, ShouldBeNil) So(found, ShouldBeTrue) // Make sure the scanner generator is catching the scanning error for repo7 found, err = test.ReadLogFileAndSearchString(logPath, - "Scheduled CVE scan errored for image", 20*time.Second) + "scheduled cve scan failed for image", 20*time.Second) So(err, ShouldBeNil) So(found, ShouldBeTrue) // Make sure the scanner generator is triggered at least twice found, err = test.ReadLogFileAndCountStringOccurence(logPath, - "Scheduled CVE scan: finished for available images", 30*time.Second, 2) + "finished scanning available images during scheduled cve scan", 30*time.Second, 2) So(err, ShouldBeNil) So(found, ShouldBeTrue) }) @@ -535,7 +535,7 @@ func TestScanGeneratorWithRealData(t *testing.T) { // Make sure the scanner generator has completed found, err := test.ReadLogFileAndSearchString(logPath, - "Scheduled CVE scan: finished for available images", 120*time.Second) + "finished scanning available images during scheduled cve scan", 120*time.Second) So(err, ShouldBeNil) So(found, ShouldBeTrue) @@ -545,7 +545,7 @@ func TestScanGeneratorWithRealData(t *testing.T) { So(found, ShouldBeTrue) found, err = test.ReadLogFileAndSearchString(logPath, - "Scheduled CVE scan completed successfully for image", 120*time.Second) + "scheduled cve scan completed successfully for image", 120*time.Second) So(err, ShouldBeNil) So(found, ShouldBeTrue) diff --git a/pkg/extensions/search/cve/trivy/scanner.go b/pkg/extensions/search/cve/trivy/scanner.go index 38c6f1e9b9..cc192877d5 100644 --- a/pkg/extensions/search/cve/trivy/scanner.go +++ b/pkg/extensions/search/cve/trivy/scanner.go @@ -357,7 +357,7 @@ func (scanner Scanner) ScanImage(image string) (map[string]cvemodel.CVE, error) } if err != nil { - scanner.log.Error().Err(err).Str("image", originalImageInput).Msg("unable to scan image") + scanner.log.Error().Err(err).Str("image", originalImageInput).Msg("failed to scan image") return map[string]cvemodel.CVE{}, err } @@ -509,18 +509,16 @@ func (scanner Scanner) UpdateDB() error { } func (scanner Scanner) updateDB(dbDir string) error { - scanner.log.Debug().Str("dbDir", dbDir).Msg("Download Trivy DB to destination dir") - ctx := context.Background() registryOpts := fanalTypes.RegistryOptions{Insecure: false} - scanner.log.Debug().Str("dbDir", dbDir).Msg("Started downloading Trivy DB to destination dir") + scanner.log.Debug().Str("dbDir", dbDir).Msg("started downloading trivy-db to destination dir") err := operation.DownloadDB(ctx, "dev", dbDir, scanner.dbRepository, false, false, registryOpts) if err != nil { scanner.log.Error().Err(err).Str("dbDir", dbDir). - Str("dbRepository", scanner.dbRepository).Msg("Error downloading Trivy DB to destination dir") + Str("dbRepository", scanner.dbRepository).Msg("failed to download trivy-db to destination dir") return err } @@ -530,13 +528,13 @@ func (scanner Scanner) updateDB(dbDir string) error { if err := javadb.Update(); err != nil { scanner.log.Error().Err(err).Str("dbDir", dbDir). - Str("javaDBRepository", scanner.javaDBRepository).Msg("Error downloading Trivy Java DB to destination dir") + Str("javaDBRepository", scanner.javaDBRepository).Msg("failed to download trivy-java-db to destination dir") return err } } - scanner.log.Debug().Str("dbDir", dbDir).Msg("Finished downloading Trivy DB to destination dir") + scanner.log.Debug().Str("dbDir", dbDir).Msg("finished downloading trivy-db to destination dir") return nil } diff --git a/pkg/extensions/search/cve/update.go b/pkg/extensions/search/cve/update.go index 871bb97436..1c095fab28 100644 --- a/pkg/extensions/search/cve/update.go +++ b/pkg/extensions/search/cve/update.go @@ -92,7 +92,7 @@ func newDBUpdadeTask(interval time.Duration, scanner Scanner, } func (dbt *dbUpdateTask) DoWork(ctx context.Context) error { - dbt.log.Info().Msg("updating the CVE database") + dbt.log.Info().Msg("updating cve-db") err := dbt.scanner.UpdateDB() if err != nil { @@ -114,7 +114,8 @@ func (dbt *dbUpdateTask) DoWork(ctx context.Context) error { dbt.generator.lastTaskTime = time.Now() dbt.generator.status = done dbt.generator.lock.Unlock() - dbt.log.Info().Str("DB update completed, next update scheduled after", dbt.interval.String()).Msg("") + + dbt.log.Info().Interface("interval", dbt.interval).Msg("cve-db update completed, next update scheduled after interval") return nil } diff --git a/pkg/extensions/search/cve/update_test.go b/pkg/extensions/search/cve/update_test.go index 8cdf9554a1..9f749d7fc6 100644 --- a/pkg/extensions/search/cve/update_test.go +++ b/pkg/extensions/search/cve/update_test.go @@ -69,7 +69,7 @@ func TestCVEDBGenerator(t *testing.T) { // Wait for trivy db to download found, err := test.ReadLogFileAndCountStringOccurence(logPath, - "DB update completed, next update scheduled", 140*time.Second, 2) + "cve-db update completed, next update scheduled after interval", 140*time.Second, 2) So(err, ShouldBeNil) So(found, ShouldBeTrue) }) diff --git a/pkg/extensions/search/resolver.go b/pkg/extensions/search/resolver.go index a6949ac814..a8ca67726d 100644 --- a/pkg/extensions/search/resolver.go +++ b/pkg/extensions/search/resolver.go @@ -332,12 +332,12 @@ func getImageListForCVE( for _, repoMeta := range reposMeta { repo := repoMeta.Name - log.Info().Str("repository", repo).Str("CVE", cveID).Msg("extracting list of tags affected by CVE") + log.Info().Str("repository", repo).Str("CVE", cveID).Msg("extracting list of tags affected by this cve") tagsInfo, err := cveInfo.GetImageListForCVE(repo, cveID) if err != nil { log.Error().Str("repository", repo).Str("CVE", cveID).Err(err). - Msg("error getting image list for CVE from repo") + Msg("failed to get image list for this cve from repository") return &gql_generated.PaginatedImagesResult{}, err } @@ -405,12 +405,12 @@ func getImageListWithCVEFixed( ) (*gql_generated.PaginatedImagesResult, error) { imageList := make([]*gql_generated.ImageSummary, 0) - log.Info().Str("repository", repo).Str("CVE", cveID).Msg("extracting list of tags where CVE is fixed") + log.Info().Str("repository", repo).Str("CVE", cveID).Msg("extracting list of tags where this cve is fixed") tagsInfo, err := cveInfo.GetImageListWithCVEFixed(repo, cveID) if err != nil { log.Error().Str("repository", repo).Str("CVE", cveID).Err(err). - Msg("error getting image list with CVE fixed from repo") + Msg("failed to get image list with this cve fixed from repository") return &gql_generated.PaginatedImagesResult{ Page: &gql_generated.PageInfo{}, @@ -1038,7 +1038,7 @@ func expandedRepoInfo(ctx context.Context, repo string, metaDB mTypes.MetaDB, cv repoMeta, err := metaDB.GetRepoMeta(ctx, repo) if err != nil { - log.Error().Err(err).Str("repository", repo).Msg("resolver: can't retrieve repoMeta for repo") + log.Error().Err(err).Str("repository", repo).Msg("resolver: can't retrieve repoMeta for repository") return &gql_generated.RepoInfo{}, err } diff --git a/pkg/extensions/search/search_test.go b/pkg/extensions/search/search_test.go index 9d3d92401c..16cab71ce0 100644 --- a/pkg/extensions/search/search_test.go +++ b/pkg/extensions/search/search_test.go @@ -703,11 +703,12 @@ func TestRepoListWithNewestImage(t *testing.T) { So(found, ShouldBeTrue) So(err, ShouldBeNil) - found, err = readFileAndSearchString(logPath, "updating the CVE database", 2*time.Minute) + found, err = readFileAndSearchString(logPath, "updating cve-db", 2*time.Minute) So(found, ShouldBeTrue) So(err, ShouldBeNil) - found, err = readFileAndSearchString(logPath, "DB update completed, next update scheduled", 4*time.Minute) + found, err = readFileAndSearchString(logPath, + "cve-db update completed, next update scheduled after interval", 4*time.Minute) So(found, ShouldBeTrue) So(err, ShouldBeNil) @@ -3395,11 +3396,12 @@ func TestGlobalSearch(t *testing.T) { So(found, ShouldBeTrue) So(err, ShouldBeNil) - found, err = readFileAndSearchString(logPath, "updating the CVE database", 2*time.Minute) + found, err = readFileAndSearchString(logPath, "updating cve-db", 2*time.Minute) So(found, ShouldBeTrue) So(err, ShouldBeNil) - found, err = readFileAndSearchString(logPath, "DB update completed, next update scheduled", 4*time.Minute) + found, err = readFileAndSearchString(logPath, "cve-db update completed, next update scheduled after interval", + 4*time.Minute) So(found, ShouldBeTrue) So(err, ShouldBeNil) diff --git a/pkg/extensions/sync/content.go b/pkg/extensions/sync/content.go index 7ef2c9eaf2..bbe0120a7b 100644 --- a/pkg/extensions/sync/content.go +++ b/pkg/extensions/sync/content.go @@ -113,7 +113,7 @@ func (cm ContentManager) getContentByUpstreamRepo(repo string) *syncconf.Content if err != nil { cm.log.Error().Str("errorType", common.TypeOf(err)). Err(err).Str("pattern", - prefix).Msg("error while parsing glob pattern, skipping it...") + prefix).Msg("failed to parse glob pattern, skipping it") continue } diff --git a/pkg/extensions/sync/references/references.go b/pkg/extensions/sync/references/references.go index c1e5bcb0ad..5a2057bb45 100644 --- a/pkg/extensions/sync/references/references.go +++ b/pkg/extensions/sync/references/references.go @@ -80,7 +80,7 @@ func (refs References) syncAll(ctx context.Context, localRepo, upstreamRepo, for _, ref := range refs.referenceList { syncedRefsDigests, err = ref.SyncReferences(ctx, localRepo, upstreamRepo, subjectDigestStr) if err != nil { - refs.log.Error().Err(err). + refs.log.Warn().Err(err). Str("reference type", ref.Name()). Str("image", fmt.Sprintf("%s:%s", upstreamRepo, subjectDigestStr)). Msg("couldn't sync image referrer") @@ -109,7 +109,7 @@ func (refs References) SyncReference(ctx context.Context, localRepo, upstreamRep if ref.Name() == referenceType { syncedRefsDigests, err = ref.SyncReferences(ctx, localRepo, upstreamRepo, subjectDigestStr) if err != nil { - refs.log.Error().Err(err). + refs.log.Warn().Err(err). Str("reference type", ref.Name()). Str("image", fmt.Sprintf("%s:%s", upstreamRepo, subjectDigestStr)). Msg("couldn't sync image referrer") diff --git a/pkg/extensions/sync/service.go b/pkg/extensions/sync/service.go index a121c7cef1..f87cca84cb 100644 --- a/pkg/extensions/sync/service.go +++ b/pkg/extensions/sync/service.go @@ -125,7 +125,7 @@ func (service *BaseService) SetNextAvailableClient() error { } if err != nil { - service.log.Error().Err(err).Str("url", url).Msg("sync: failed to initialize http client") + service.log.Error().Err(err).Str("url", url).Msg("failed to initialize http client") continue } @@ -184,7 +184,7 @@ func (service *BaseService) GetNextRepo(lastRepo string) (string, error) { return err }, service.retryOptions); err != nil { service.log.Error().Str("errorType", common.TypeOf(err)).Str("remote registry", service.client.GetConfig().URL). - Err(err).Msg("error while getting repositories from remote registry") + Err(err).Msg("failed to get repository list from remote registry") return "", err } @@ -215,15 +215,15 @@ func (service *BaseService) SyncReference(ctx context.Context, repo string, if len(service.config.Content) > 0 { remoteRepo = service.contentManager.GetRepoSource(repo) if remoteRepo == "" { - service.log.Info().Str("remote", remoteURL).Str("repo", repo).Str("subject", subjectDigestStr). + service.log.Info().Str("remote", remoteURL).Str("repository", repo).Str("subject", subjectDigestStr). Str("reference type", referenceType).Msg("will not sync reference for image, filtered out by content") return zerr.ErrSyncImageFilteredOut } } - service.log.Info().Str("remote", remoteURL).Str("repo", repo).Str("subject", subjectDigestStr). - Str("reference type", referenceType).Msg("sync: syncing reference for image") + service.log.Info().Str("remote", remoteURL).Str("repository", repo).Str("subject", subjectDigestStr). + Str("reference type", referenceType).Msg("syncing reference for image") return service.references.SyncReference(ctx, repo, remoteRepo, subjectDigestStr, referenceType) } @@ -237,15 +237,15 @@ func (service *BaseService) SyncImage(ctx context.Context, repo, reference strin if len(service.config.Content) > 0 { remoteRepo = service.contentManager.GetRepoSource(repo) if remoteRepo == "" { - service.log.Info().Str("remote", remoteURL).Str("repo", repo).Str("reference", reference). + service.log.Info().Str("remote", remoteURL).Str("repository", repo).Str("reference", reference). Msg("will not sync image, filtered out by content") return zerr.ErrSyncImageFilteredOut } } - service.log.Info().Str("remote", remoteURL).Str("repo", repo).Str("reference", reference). - Msg("sync: syncing image") + service.log.Info().Str("remote", remoteURL).Str("repository", repo).Str("reference", reference). + Msg("syncing image") manifestDigest, err := service.syncTag(ctx, repo, remoteRepo, reference) if err != nil { @@ -262,8 +262,8 @@ func (service *BaseService) SyncImage(ctx context.Context, repo, reference strin // sync repo periodically. func (service *BaseService) SyncRepo(ctx context.Context, repo string) error { - service.log.Info().Str("repo", repo).Str("registry", service.client.GetConfig().URL). - Msg("sync: syncing repo") + service.log.Info().Str("repository", repo).Str("registry", service.client.GetConfig().URL). + Msg("syncing repo") var err error @@ -274,8 +274,8 @@ func (service *BaseService) SyncRepo(ctx context.Context, repo string) error { return err }, service.retryOptions); err != nil { - service.log.Error().Str("errorType", common.TypeOf(err)).Str("repo", repo). - Err(err).Msg("error while getting tags for repo") + service.log.Error().Str("errorType", common.TypeOf(err)).Str("repository", repo). + Err(err).Msg("failed to get tags for repository") return err } @@ -286,7 +286,7 @@ func (service *BaseService) SyncRepo(ctx context.Context, repo string) error { return err } - service.log.Info().Str("repo", repo).Msgf("sync: syncing tags %v", tags) + service.log.Info().Str("repository", repo).Msgf("syncing tags %v", tags) // apply content.destination rule localRepo := service.contentManager.GetRepoDestination(repo) @@ -314,8 +314,8 @@ func (service *BaseService) SyncRepo(ctx context.Context, repo string) error { continue } - service.log.Error().Str("errorType", common.TypeOf(err)).Str("repo", repo). - Err(err).Msg("error while syncing tags for repo") + service.log.Error().Str("errorType", common.TypeOf(err)).Str("repository", repo). + Err(err).Msg("failed to sync tags for repository") return err } @@ -329,15 +329,15 @@ func (service *BaseService) SyncRepo(ctx context.Context, repo string) error { return err }, service.retryOptions); err != nil { - service.log.Error().Str("errorType", common.TypeOf(err)).Str("repo", repo). - Err(err).Msg("error while syncing tags for repo") + service.log.Error().Str("errorType", common.TypeOf(err)).Str("repository", repo). + Err(err).Msg("failed to sync tags for repository") return err } } } - service.log.Info().Str("repo", repo).Msg("sync: finished syncing repo") + service.log.Info().Str("component", "sync").Str("repository", repo).Msg("finished syncing repository") return nil } @@ -357,14 +357,14 @@ func (service *BaseService) syncTag(ctx context.Context, localRepo, remoteRepo, remoteImageRef, err := service.remote.GetImageReference(remoteRepo, tag) if err != nil { service.log.Error().Err(err).Str("errortype", common.TypeOf(err)). - Str("repo", remoteRepo).Str("reference", tag).Msg("couldn't get a remote image reference") + Str("repository", remoteRepo).Str("reference", tag).Msg("couldn't get a remote image reference") return "", err } _, mediaType, manifestDigest, err := service.remote.GetManifestContent(remoteImageRef) if err != nil { - service.log.Error().Err(err).Str("repo", remoteRepo).Str("reference", tag). + service.log.Error().Err(err).Str("repository", remoteRepo).Str("reference", tag). Msg("couldn't get upstream image manifest details") return "", err @@ -389,7 +389,7 @@ func (service *BaseService) syncTag(ctx context.Context, localRepo, remoteRepo, skipImage, err := service.local.CanSkipImage(localRepo, tag, manifestDigest) if err != nil { service.log.Error().Err(err).Str("errortype", common.TypeOf(err)). - Str("repo", localRepo).Str("reference", tag). + Str("repository", localRepo).Str("reference", tag). Msg("couldn't check if the local image can be skipped") } @@ -397,7 +397,7 @@ func (service *BaseService) syncTag(ctx context.Context, localRepo, remoteRepo, localImageRef, err := service.local.GetImageReference(localRepo, tag) if err != nil { service.log.Error().Err(err).Str("errortype", common.TypeOf(err)). - Str("repo", localRepo).Str("reference", tag).Msg("couldn't get a local image reference") + Str("repository", localRepo).Str("reference", tag).Msg("couldn't get a local image reference") return "", err } @@ -417,7 +417,7 @@ func (service *BaseService) syncTag(ctx context.Context, localRepo, remoteRepo, err = service.local.CommitImage(localImageRef, localRepo, tag) if err != nil { service.log.Error().Err(err).Str("errortype", common.TypeOf(err)). - Str("repo", localRepo).Str("reference", tag).Msg("couldn't commit image to local image store") + Str("repository", localRepo).Str("reference", tag).Msg("couldn't commit image to local image store") return "", err } @@ -426,7 +426,8 @@ func (service *BaseService) syncTag(ctx context.Context, localRepo, remoteRepo, Msg("skipping image because it's already synced") } - service.log.Info().Str("image", remoteImageRef.DockerReference().String()).Msg("sync: finished syncing image") + service.log.Info().Str("component", "sync"). + Str("image", remoteImageRef.DockerReference().String()).Msg("finished syncing image") return manifestDigest, nil } diff --git a/pkg/log/guidelines.md b/pkg/log/guidelines.md index a01c660751..3d62ee3e97 100644 --- a/pkg/log/guidelines.md +++ b/pkg/log/guidelines.md @@ -61,3 +61,5 @@ For example, lookup a cache (fast path) and it throws a not-found error, and we expect to handle it and perform a slow path lookup. Instead of logging the lookup failure at ERROR level, it may be more appropriate to log at DEBUG level and then handle the error. + +Also, instead of `Msg("error at something")` standardize on `Msg("failed at something")`. diff --git a/pkg/meta/boltdb/boltdb.go b/pkg/meta/boltdb/boltdb.go index a7a24298cf..eb7835b180 100644 --- a/pkg/meta/boltdb/boltdb.go +++ b/pkg/meta/boltdb/boltdb.go @@ -498,7 +498,7 @@ func (bdw *BoltDB) SearchTags(ctx context.Context, searchText string, protoImageMeta = imageIndexData default: - bdw.Log.Error().Str("mediaType", descriptor.MediaType).Msg("Unsupported media type") + bdw.Log.Error().Str("mediaType", descriptor.MediaType).Msg("unsupported media type") continue } @@ -602,7 +602,7 @@ func (bdw *BoltDB) FilterTags(ctx context.Context, filterRepoTag mTypes.FilterRe images = append(images, mConvert.GetFullImageMetaFromProto(tag, protoRepoMeta, imageIndexData)) } default: - bdw.Log.Error().Str("mediaType", descriptor.MediaType).Msg("Unsupported media type") + bdw.Log.Error().Str("mediaType", descriptor.MediaType).Msg("unsupported media type") continue } diff --git a/pkg/meta/dynamodb/dynamodb.go b/pkg/meta/dynamodb/dynamodb.go index cfb84e0ec7..2b8606d2e5 100644 --- a/pkg/meta/dynamodb/dynamodb.go +++ b/pkg/meta/dynamodb/dynamodb.go @@ -551,7 +551,7 @@ func (dwr *DynamoDB) SearchTags(ctx context.Context, searchText string) ([]mType protoImageMeta = imageIndexData default: - dwr.Log.Error().Str("mediaType", descriptor.MediaType).Msg("Unsupported media type") + dwr.Log.Error().Str("mediaType", descriptor.MediaType).Msg("unsupported media type") continue } @@ -655,7 +655,7 @@ func (dwr *DynamoDB) FilterTags(ctx context.Context, filterRepoTag mTypes.Filter images = append(images, mConvert.GetFullImageMetaFromProto(tag, protoRepoMeta, imageIndexData)) } default: - dwr.Log.Error().Str("mediaType", descriptor.MediaType).Msg("Unsupported media type") + dwr.Log.Error().Str("mediaType", descriptor.MediaType).Msg("unsupported media type") continue } diff --git a/pkg/meta/parse.go b/pkg/meta/parse.go index f6979fbdd2..186137c1dd 100644 --- a/pkg/meta/parse.go +++ b/pkg/meta/parse.go @@ -26,13 +26,13 @@ const ( // ParseStorage will sync all repos found in the rootdirectory of the oci layout that zot was deployed on with the // ParseStorage database. func ParseStorage(metaDB mTypes.MetaDB, storeController storage.StoreController, log log.Logger) error { - log.Info().Msg("Started parsing storage and updating MetaDB") + log.Info().Str("component", "metadb").Msg("parsing storage and initializing") allRepos, err := getAllRepos(storeController) if err != nil { rootDir := storeController.DefaultStore.RootDir() - log.Error().Err(err).Str("rootDir", rootDir). - Msg("load-local-layout: failed to get all repo names present under rootDir") + log.Error().Err(err).Str("component", "metadb").Str("rootDir", rootDir). + Msg("failed to get all repo names present under rootDir") return err } @@ -43,13 +43,13 @@ func ParseStorage(metaDB mTypes.MetaDB, storeController storage.StoreController, err := ParseRepo(repo, metaDB, storeController, log) if err != nil { - log.Error().Err(err).Str("repository", repo).Msg("load-local-layout: failed to sync repo") + log.Error().Err(err).Str("component", "metadb").Str("repository", repo).Msg("failed to parse repo") return err } } - log.Info().Msg("Done parsing storage and updating MetaDB") + log.Info().Str("component", "metadb").Msg("successfully initialized") return nil } @@ -65,7 +65,7 @@ func ParseRepo(repo string, metaDB mTypes.MetaDB, storeController storage.StoreC indexBlob, err := imageStore.GetIndexContent(repo) if err != nil { - log.Error().Err(err).Str("repository", repo).Msg("load-repo: failed to read index.json for repo") + log.Error().Err(err).Str("repository", repo).Msg("failed to read index.json for repo") return err } @@ -74,14 +74,14 @@ func ParseRepo(repo string, metaDB mTypes.MetaDB, storeController storage.StoreC err = json.Unmarshal(indexBlob, &indexContent) if err != nil { - log.Error().Err(err).Str("repository", repo).Msg("load-repo: failed to unmarshal index.json for repo") + log.Error().Err(err).Str("repository", repo).Msg("failed to unmarshal index.json for repo") return err } err = metaDB.ResetRepoReferences(repo) if err != nil && !errors.Is(err, zerr.ErrRepoMetaNotFound) { - log.Error().Err(err).Str("repository", repo).Msg("load-repo: failed to reset tag field in RepoMetadata for repo") + log.Error().Err(err).Str("repository", repo).Msg("failed to reset tag field in RepoMetadata for repo") return err } @@ -96,7 +96,7 @@ func ParseRepo(repo string, metaDB mTypes.MetaDB, storeController storage.StoreC manifestBlob, _, _, err := imageStore.GetImageManifest(repo, manifest.Digest.String()) if err != nil { log.Error().Err(err).Str("repository", repo).Str("digest", manifest.Digest.String()). - Msg("load-repo: failed to get blob for image") + Msg("failed to get blob for image") return err } @@ -111,7 +111,7 @@ func ParseRepo(repo string, metaDB mTypes.MetaDB, storeController storage.StoreC imageStore, metaDB, log) if err != nil { log.Error().Err(err).Str("repository", repo).Str("tag", tag). - Msg("load-repo: failed to set metadata for image") + Msg("failed to set metadata for image") return err } @@ -161,7 +161,7 @@ func getCosignSignatureLayersInfo( var manifestContent ispec.Manifest if err := json.Unmarshal(manifestBlob, &manifestContent); err != nil { log.Error().Err(err).Str("repository", repo).Str("reference", tag).Str("digest", manifestDigest).Msg( - "load-repo: unable to marshal blob index") + "failed to marshal blob index") return layers, err } @@ -175,7 +175,7 @@ func getCosignSignatureLayersInfo( layerContent, err := imageStore.GetBlobContent(repo, layer.Digest) if err != nil { log.Error().Err(err).Str("repository", repo).Str("reference", tag).Str("layerDigest", layer.Digest.String()).Msg( - "load-repo: unable to get cosign signature layer content") + "failed to get cosign signature layer content") return layers, err } @@ -183,7 +183,7 @@ func getCosignSignatureLayersInfo( layerSigKey, ok := layer.Annotations[zcommon.CosignSigKey] if !ok { log.Error().Err(err).Str("repository", repo).Str("reference", tag).Str("layerDigest", layer.Digest.String()).Msg( - "load-repo: unable to get specific annotation of cosign signature") + "failed to get specific annotation of cosign signature") } layers = append(layers, mTypes.LayerInfo{ @@ -204,7 +204,7 @@ func getNotationSignatureLayersInfo( var manifestContent ispec.Manifest if err := json.Unmarshal(manifestBlob, &manifestContent); err != nil { log.Error().Err(err).Str("repository", repo).Str("reference", manifestDigest).Msg( - "load-repo: unable to marshal blob index") + "failed to marshal blob index") return layers, err } @@ -216,7 +216,7 @@ func getNotationSignatureLayersInfo( if len(manifestContent.Layers) != 1 { log.Error().Err(zerr.ErrBadManifest).Str("repository", repo).Str("reference", manifestDigest). - Msg("load-repo: notation signature manifest requires exactly one layer but it does not") + Msg("notation signature manifest requires exactly one layer but it does not") return layers, zerr.ErrBadManifest } @@ -231,7 +231,7 @@ func getNotationSignatureLayersInfo( layerContent, err := imageStore.GetBlobContent(repo, layer) if err != nil { log.Error().Err(err).Str("repository", repo).Str("reference", manifestDigest).Str("layerDigest", layer.String()).Msg( - "load-repo: unable to get notation signature blob content") + "failed to get notation signature blob content") return layers, err } diff --git a/pkg/scheduler/scheduler.go b/pkg/scheduler/scheduler.go index cc771d065c..812162c202 100644 --- a/pkg/scheduler/scheduler.go +++ b/pkg/scheduler/scheduler.go @@ -101,13 +101,13 @@ func (scheduler *Scheduler) poolWorker(ctx context.Context, numWorkers int, task for i := 0; i < numWorkers; i++ { go func(workerID int) { for task := range tasks { - scheduler.log.Debug().Int("worker", workerID).Msg("scheduler: starting task") + scheduler.log.Debug().Int("worker", workerID).Msg("starting task") if err := task.DoWork(ctx); err != nil { - scheduler.log.Error().Int("worker", workerID).Err(err).Msg("scheduler: error while executing task") + scheduler.log.Error().Int("worker", workerID).Err(err).Msg("failed to execute task") } - scheduler.log.Debug().Int("worker", workerID).Msg("scheduler: finished task") + scheduler.log.Debug().Int("worker", workerID).Msg("finished task") } }(i + 1) } @@ -129,7 +129,7 @@ func (scheduler *Scheduler) RunScheduler(ctx context.Context) { close(tasksWorker) close(scheduler.stopCh) - scheduler.log.Debug().Msg("scheduler: received stop signal, exiting...") + scheduler.log.Debug().Msg("received stop signal, exiting...") return default: @@ -138,7 +138,7 @@ func (scheduler *Scheduler) RunScheduler(ctx context.Context) { task := scheduler.getTask() if task != nil { // push tasks into worker pool - scheduler.log.Debug().Msg("scheduler: pushing task into worker pool") + scheduler.log.Debug().Msg("pushing task into worker pool") tasksWorker <- task } i++ @@ -162,7 +162,7 @@ func (scheduler *Scheduler) pushReadyGenerators() { scheduler.waitingGenerators = append(scheduler.waitingGenerators[:i], scheduler.waitingGenerators[i+1:]...) modified = true - scheduler.log.Debug().Msg("scheduler: waiting generator is ready, pushing to ready generators") + scheduler.log.Debug().Msg("waiting generator is ready, pushing to ready generators") break } @@ -261,7 +261,7 @@ func (scheduler *Scheduler) SubmitTask(task Task, priority Priority) { case <-scheduler.stopCh: return case tasksQ <- task: - scheduler.log.Info().Msg("scheduler: adding a new task") + scheduler.log.Info().Msg("adding a new task") } } @@ -308,7 +308,7 @@ func (gen *generator) generate(sch *Scheduler) { if gen.remainingTask == nil { nextTask, err := gen.taskGenerator.Next() if err != nil { - sch.log.Error().Err(err).Msg("scheduler: error while executing generator") + sch.log.Error().Err(err).Msg("failed to execute generator") return } diff --git a/pkg/scheduler/scheduler_test.go b/pkg/scheduler/scheduler_test.go index 8cb59b74ef..283ab96a8d 100644 --- a/pkg/scheduler/scheduler_test.go +++ b/pkg/scheduler/scheduler_test.go @@ -150,7 +150,7 @@ func TestScheduler(t *testing.T) { So(string(data), ShouldContainSubstring, "executing high priority task; index: 1") So(string(data), ShouldContainSubstring, "executing high priority task; index: 2") So(string(data), ShouldNotContainSubstring, "executing medium priority task; index: 1") - So(string(data), ShouldNotContainSubstring, "error while executing task") + So(string(data), ShouldNotContainSubstring, "failed to execute task") }) Convey("Test task returning an error", t, func() { @@ -173,8 +173,8 @@ func TestScheduler(t *testing.T) { data, err := os.ReadFile(logFile.Name()) So(err, ShouldBeNil) - So(string(data), ShouldContainSubstring, "scheduler: adding a new task") - So(string(data), ShouldContainSubstring, "error while executing task") + So(string(data), ShouldContainSubstring, "adding a new task") + So(string(data), ShouldContainSubstring, "failed to execute task") }) Convey("Test resubmit generator", t, func() { @@ -215,7 +215,7 @@ func TestScheduler(t *testing.T) { data, err := os.ReadFile(logFile.Name()) So(err, ShouldBeNil) - So(string(data), ShouldNotContainSubstring, "scheduler: adding a new task") + So(string(data), ShouldNotContainSubstring, "adding a new task") }) Convey("Test adding a new task when context is done", t, func() { @@ -238,7 +238,7 @@ func TestScheduler(t *testing.T) { data, err := os.ReadFile(logFile.Name()) So(err, ShouldBeNil) - So(string(data), ShouldNotContainSubstring, "scheduler: adding a new task") + So(string(data), ShouldNotContainSubstring, "adding a new task") }) } diff --git a/pkg/storage/common/common.go b/pkg/storage/common/common.go index b058e81f35..31bc12fb40 100644 --- a/pkg/storage/common/common.go +++ b/pkg/storage/common/common.go @@ -85,7 +85,7 @@ func ValidateManifest(imgStore storageTypes.ImageStore, repo, reference, mediaTy // validate manifest if err := ValidateManifestSchema(body); err != nil { - log.Error().Err(err).Msg("OCIv1 image manifest schema validation failed") + log.Error().Err(err).Msg("failed to validate OCIv1 image manifest schema") return "", zerr.NewError(zerr.ErrBadManifest).AddDetail("jsonSchemaValidation", err.Error()) } @@ -134,7 +134,7 @@ func ValidateManifest(imgStore storageTypes.ImageStore, repo, reference, mediaTy case ispec.MediaTypeImageIndex: // validate manifest if err := ValidateImageIndexSchema(body); err != nil { - log.Error().Err(err).Msg("OCIv1 image index manifest schema validation failed") + log.Error().Err(err).Msg("failed to validate OCIv1 image index manifest schema") return "", zerr.NewError(zerr.ErrBadManifest).AddDetail("jsonSchemaValidation", err.Error()) } @@ -1086,7 +1086,7 @@ func (gen *StorageMetricsInitGenerator) Next() (scheduler.Task, error) { return nil, err } - gen.Log.Debug().Str("repo", repo).Int("randomDelay", delay).Msg("StorageMetricsInitGenerator") + gen.Log.Debug().Str("repo", repo).Int("randomDelay", delay).Msg("storageMetricsInitGenerator") if repo == "" { gen.done = true diff --git a/pkg/storage/imagestore/imagestore.go b/pkg/storage/imagestore/imagestore.go index 57a845f73c..2ee1931de9 100644 --- a/pkg/storage/imagestore/imagestore.go +++ b/pkg/storage/imagestore/imagestore.go @@ -71,7 +71,7 @@ func NewImageStore(rootDir string, cacheDir string, dedupe, commit bool, log zlo metrics monitoring.MetricServer, linter common.Lint, storeDriver storageTypes.Driver, cacheDriver cache.Cache, ) storageTypes.ImageStore { if err := storeDriver.EnsureDir(rootDir); err != nil { - log.Error().Err(err).Str("rootDir", rootDir).Msg("unable to create root dir") + log.Error().Err(err).Str("rootDir", rootDir).Msg("failed to create root dir") return nil } @@ -143,14 +143,14 @@ func (is *ImageStore) initRepo(name string) error { // create "blobs" subdir err := is.storeDriver.EnsureDir(path.Join(repoDir, "blobs")) if err != nil { - is.log.Error().Err(err).Msg("error creating blobs subdir") + is.log.Error().Err(err).Str("repository", repoDir).Msg("failed to create blobs subdir") return err } // create BlobUploadDir subdir err = is.storeDriver.EnsureDir(path.Join(repoDir, storageConstants.BlobUploadDir)) if err != nil { - is.log.Error().Err(err).Msg("error creating blob upload subdir") + is.log.Error().Err(err).Msg("failed to create blob upload subdir") return err } @@ -162,13 +162,13 @@ func (is *ImageStore) initRepo(name string) error { buf, err := json.Marshal(il) if err != nil { - is.log.Error().Err(err).Msg("unable to marshal JSON") + is.log.Error().Err(err).Msg("failed to marshal JSON") return err } if _, err := is.storeDriver.WriteFile(ilPath, buf); err != nil { - is.log.Error().Err(err).Str("file", ilPath).Msg("unable to write file") + is.log.Error().Err(err).Str("file", ilPath).Msg("failed to write file") return err } @@ -182,13 +182,13 @@ func (is *ImageStore) initRepo(name string) error { buf, err := json.Marshal(index) if err != nil { - is.log.Error().Err(err).Msg("unable to marshal JSON") + is.log.Error().Err(err).Msg("failed to marshal JSON") return err } if _, err := is.storeDriver.WriteFile(indexPath, buf); err != nil { - is.log.Error().Err(err).Str("file", ilPath).Msg("unable to write file") + is.log.Error().Err(err).Str("file", ilPath).Msg("failed to write file") return err } @@ -224,7 +224,7 @@ func (is *ImageStore) ValidateRepo(name string) (bool, error) { files, err := is.storeDriver.List(dir) if err != nil { - is.log.Error().Err(err).Str("dir", dir).Msg("unable to read directory") + is.log.Error().Err(err).Str("dir", dir).Msg("failed to read directory") return false, zerr.ErrRepoNotFound } @@ -580,7 +580,7 @@ func (is *ImageStore) PutImageManifest(repo, reference, mediaType string, //noli manifestPath := path.Join(dir, mDigest.Encoded()) if _, err = is.storeDriver.WriteFile(manifestPath, body); err != nil { - is.log.Error().Err(err).Str("file", manifestPath).Msg("unable to write") + is.log.Error().Err(err).Str("file", manifestPath).Msg("failed to write") return "", "", err } @@ -677,7 +677,7 @@ func (is *ImageStore) deleteImageManifest(repo, reference string, detectCollisio } if _, err := is.storeDriver.WriteFile(file, buf); err != nil { - is.log.Debug().Str("reference", reference).Str("repo", repo).Msg("error while updating index.json") + is.log.Debug().Str("reference", reference).Str("repository", repo).Msg("failed to update index.json") return err } @@ -717,7 +717,7 @@ func (is *ImageStore) BlobUploadPath(repo, uuid string) string { // NewBlobUpload returns the unique ID for an upload in progress. func (is *ImageStore) NewBlobUpload(repo string) (string, error) { if err := is.InitRepo(repo); err != nil { - is.log.Error().Err(err).Msg("error initializing repo") + is.log.Error().Err(err).Msg("failed to initialize repo") return "", err } @@ -899,7 +899,7 @@ func (is *ImageStore) FinishBlobUpload(repo, uuid string, body io.Reader, dstDig err = is.storeDriver.EnsureDir(dir) if err != nil { - is.log.Error().Err(err).Msg("error creating blobs/sha256 dir") + is.log.Error().Err(err).Str("dir", dir).Msg("failed to create dir") return err } @@ -915,14 +915,14 @@ func (is *ImageStore) FinishBlobUpload(repo, uuid string, body io.Reader, dstDig err = is.DedupeBlob(src, dstDigest, repo, dst) if err := inject.Error(err); err != nil { is.log.Error().Err(err).Str("src", src).Str("dstDigest", dstDigest.String()). - Str("dst", dst).Msg("unable to dedupe blob") + Str("dst", dst).Msg("failed to dedupe blob") return err } } else { if err := is.storeDriver.Move(src, dst); err != nil { is.log.Error().Err(err).Str("src", src).Str("dstDigest", dstDigest.String()). - Str("dst", dst).Msg("unable to finish blob") + Str("dst", dst).Msg("failed to finish blob") return err } @@ -993,14 +993,14 @@ func (is *ImageStore) FullBlobUpload(repo string, body io.Reader, dstDigest godi if is.dedupe && fmt.Sprintf("%v", is.cache) != fmt.Sprintf("%v", nil) { if err := is.DedupeBlob(src, dstDigest, repo, dst); err != nil { is.log.Error().Err(err).Str("src", src).Str("dstDigest", dstDigest.String()). - Str("dst", dst).Msg("unable to dedupe blob") + Str("dst", dst).Msg("failed to dedupe blob") return "", -1, err } } else { if err := is.storeDriver.Move(src, dst); err != nil { is.log.Error().Err(err).Str("src", src).Str("dstDigest", dstDigest.String()). - Str("dst", dst).Msg("unable to finish blob") + Str("dst", dst).Msg("failed to finish blob") return "", -1, err } @@ -1015,7 +1015,7 @@ retry: dstRecord, err := is.cache.GetBlob(dstDigest) if err := inject.Error(err); err != nil && !errors.Is(err, zerr.ErrCacheMiss) { - is.log.Error().Err(err).Str("blobPath", dst).Msg("dedupe: unable to lookup blob record") + is.log.Error().Err(err).Str("blobPath", dst).Msg("dedupe: failed to lookup blob record") return err } @@ -1023,14 +1023,14 @@ retry: if dstRecord == "" { // cache record doesn't exist, so first disk and cache entry for this digest if err := is.cache.PutBlob(dstDigest, dst); err != nil { - is.log.Error().Err(err).Str("blobPath", dst).Msg("dedupe: unable to insert blob record") + is.log.Error().Err(err).Str("blobPath", dst).Msg("dedupe: failed to insert blob record") return err } // move the blob from uploads to final dest if err := is.storeDriver.Move(src, dst); err != nil { - is.log.Error().Err(err).Str("src", src).Str("dst", dst).Msg("dedupe: unable to rename blob") + is.log.Error().Err(err).Str("src", src).Str("dst", dst).Msg("dedupe: failed to rename blob") return err } @@ -1045,12 +1045,12 @@ retry: blobInfo, err := is.storeDriver.Stat(dstRecord) if err != nil { - is.log.Error().Err(err).Str("blobPath", dstRecord).Msg("dedupe: unable to stat") + is.log.Error().Err(err).Str("blobPath", dstRecord).Msg("dedupe: failed to stat") // the actual blob on disk may have been removed by GC, so sync the cache err := is.cache.DeleteBlob(dstDigest, dstRecord) if err = inject.Error(err); err != nil { //nolint:lll - is.log.Error().Err(err).Str("dstDigest", dstDigest.String()).Str("dst", dst).Msg("dedupe: unable to delete blob record") + is.log.Error().Err(err).Str("dstDigest", dstDigest.String()).Str("dst", dst).Msg("dedupe: failed to delete blob record") return err } @@ -1061,13 +1061,13 @@ retry: // prevent overwrite original blob if !is.storeDriver.SameFile(dst, dstRecord) { if err := is.storeDriver.Link(dstRecord, dst); err != nil { - is.log.Error().Err(err).Str("blobPath", dstRecord).Msg("dedupe: unable to link blobs") + is.log.Error().Err(err).Str("blobPath", dstRecord).Msg("dedupe: failed to link blobs") return err } if err := is.cache.PutBlob(dstDigest, dst); err != nil { - is.log.Error().Err(err).Str("blobPath", dst).Msg("dedupe: unable to insert blob record") + is.log.Error().Err(err).Str("blobPath", dst).Msg("dedupe: failed to insert blob record") return err } @@ -1091,7 +1091,7 @@ retry: // remove temp blobupload if err := is.storeDriver.Delete(src); err != nil { - is.log.Error().Err(err).Str("src", src).Msg("dedupe: unable to remove blob") + is.log.Error().Err(err).Str("src", src).Msg("dedupe: failed to remove blob") return err } @@ -1118,7 +1118,7 @@ func (is *ImageStore) DeleteBlobUpload(repo, uuid string) error { defer writer.Close() if err := writer.Cancel(); err != nil { - is.log.Error().Err(err).Str("blobUploadPath", blobUploadPath).Msg("error deleting blob upload") + is.log.Error().Err(err).Str("blobUploadPath", blobUploadPath).Msg("failed deleting blob upload") return err } @@ -1175,7 +1175,7 @@ func (is *ImageStore) CheckBlob(repo string, digest godigest.Digest) (bool, int6 // Check blobs in cache dstRecord, err := is.checkCacheBlob(digest) if err != nil { - is.log.Debug().Err(err).Str("digest", digest.String()).Msg("cache: not found") + is.log.Warn().Err(err).Str("digest", digest.String()).Msg("not found in cache") return false, -1, zerr.ErrBlobNotFound } @@ -1187,7 +1187,7 @@ func (is *ImageStore) CheckBlob(repo string, digest godigest.Digest) (bool, int6 // put deduped blob in cache if err := is.cache.PutBlob(digest, blobPath); err != nil { - is.log.Error().Err(err).Str("blobPath", blobPath).Msg("dedupe: unable to insert blob record") + is.log.Error().Err(err).Str("blobPath", blobPath).Msg("dedupe: failed to insert blob record") return false, -1, err } @@ -1221,7 +1221,7 @@ func (is *ImageStore) StatBlob(repo string, digest godigest.Digest) (bool, int64 // Check blobs in cache dstRecord, err := is.checkCacheBlob(digest) if err != nil { - is.log.Debug().Err(err).Str("digest", digest.String()).Msg("cache: not found") + is.log.Warn().Err(err).Str("digest", digest.String()).Msg("not found in cache") return false, -1, time.Time{}, zerr.ErrBlobNotFound } @@ -1260,7 +1260,7 @@ func (is *ImageStore) checkCacheBlob(digest godigest.Digest) (string, error) { // the actual blob on disk may have been removed by GC, so sync the cache if err := is.cache.DeleteBlob(digest, dstRecord); err != nil { is.log.Error().Err(err).Str("digest", digest.String()).Str("blobPath", dstRecord). - Msg("unable to remove blob path from cache") + Msg("failed to remove blob path from cache") return "", err } @@ -1275,7 +1275,7 @@ func (is *ImageStore) checkCacheBlob(digest godigest.Digest) (string, error) { func (is *ImageStore) copyBlob(repo string, blobPath, dstRecord string) (int64, error) { if err := is.initRepo(repo); err != nil { - is.log.Error().Err(err).Str("repository", repo).Msg("unable to initialize an empty repo") + is.log.Error().Err(err).Str("repository", repo).Msg("failed to initialize an empty repo") return -1, err } @@ -1283,7 +1283,7 @@ func (is *ImageStore) copyBlob(repo string, blobPath, dstRecord string) (int64, _ = is.storeDriver.EnsureDir(filepath.Dir(blobPath)) if err := is.storeDriver.Link(dstRecord, blobPath); err != nil { - is.log.Error().Err(err).Str("blobPath", blobPath).Str("link", dstRecord).Msg("dedupe: unable to hard link") + is.log.Error().Err(err).Str("blobPath", blobPath).Str("link", dstRecord).Msg("dedupe: failed to hard link") return -1, zerr.ErrBlobNotFound } @@ -1324,7 +1324,7 @@ func (is *ImageStore) GetBlobPartial(repo string, digest godigest.Digest, mediaT // Check blobs in cache blobPath, err = is.checkCacheBlob(digest) if err != nil { - is.log.Error().Err(err).Str("digest", digest.String()).Msg("cache: not found") + is.log.Warn().Err(err).Str("digest", digest.String()).Msg("not found in cache") return nil, -1, -1, zerr.ErrBlobNotFound } @@ -1394,7 +1394,7 @@ func (is *ImageStore) GetBlob(repo string, digest godigest.Digest, mediaType str // Check blobs in cache dstRecord, err := is.checkCacheBlob(digest) if err != nil { - is.log.Error().Err(err).Str("digest", digest.String()).Msg("cache: not found") + is.log.Warn().Err(err).Str("digest", digest.String()).Msg("not found in cache") return nil, -1, zerr.ErrBlobNotFound } @@ -1447,7 +1447,7 @@ func (is *ImageStore) GetBlobContent(repo string, digest godigest.Digest) ([]byt // Check blobs in cache dstRecord, err := is.checkCacheBlob(digest) if err != nil { - is.log.Error().Err(err).Str("digest", digest.String()).Msg("cache: not found") + is.log.Warn().Err(err).Str("digest", digest.String()).Msg("not found in cache") return nil, zerr.ErrBlobNotFound } @@ -1512,13 +1512,13 @@ func (is *ImageStore) PutIndexContent(repo string, index ispec.Index) error { buf, err := json.Marshal(index) if err != nil { - is.log.Error().Err(err).Str("file", indexPath).Msg("unable to marshal JSON") + is.log.Error().Err(err).Str("file", indexPath).Msg("failed to marshal JSON") return err } if _, err = is.storeDriver.WriteFile(indexPath, buf); err != nil { - is.log.Error().Err(err).Str("file", indexPath).Msg("unable to write") + is.log.Error().Err(err).Str("file", indexPath).Msg("failed to write") return err } @@ -1558,14 +1558,14 @@ func (is *ImageStore) CleanupRepo(repo string, blobs []godigest.Digest, removeRe continue } - is.log.Error().Err(err).Str("repository", repo).Str("digest", digest.String()).Msg("unable to delete manifest") + is.log.Error().Err(err).Str("repository", repo).Str("digest", digest.String()).Msg("failed to delete manifest") return count, err } count++ } else { - is.log.Error().Err(err).Str("repository", repo).Str("digest", digest.String()).Msg("unable to delete blob") + is.log.Error().Err(err).Str("repository", repo).Str("digest", digest.String()).Msg("failed to delete blob") return count, err } @@ -1576,7 +1576,7 @@ func (is *ImageStore) CleanupRepo(repo string, blobs []godigest.Digest, removeRe blobUploads, err := is.storeDriver.List(path.Join(is.RootDir(), repo, storageConstants.BlobUploadDir)) if err != nil { - is.log.Debug().Str("repository", repo).Msg("unable to list .uploads/ dir") + is.log.Debug().Str("repository", repo).Msg("failed to list .uploads/ dir") } // if removeRepo flag is true and we cleanup all blobs and there are no blobs currently being uploaded. @@ -1584,7 +1584,7 @@ func (is *ImageStore) CleanupRepo(repo string, blobs []godigest.Digest, removeRe is.log.Info().Str("repository", repo).Msg("removed all blobs, removing repo") if err := is.storeDriver.Delete(path.Join(is.rootDir, repo)); err != nil { - is.log.Error().Err(err).Str("repository", repo).Msg("unable to remove repo") + is.log.Error().Err(err).Str("repository", repo).Msg("failed to remove repo") return count, err } @@ -1611,7 +1611,7 @@ func (is *ImageStore) deleteBlob(repo string, digest godigest.Digest) error { if fmt.Sprintf("%v", is.cache) != fmt.Sprintf("%v", nil) { dstRecord, err := is.cache.GetBlob(digest) if err != nil && !errors.Is(err, zerr.ErrCacheMiss) { - is.log.Error().Err(err).Str("blobPath", dstRecord).Msg("dedupe: unable to lookup blob record") + is.log.Error().Err(err).Str("blobPath", dstRecord).Msg("dedupe: failed to lookup blob record") return err } @@ -1620,7 +1620,7 @@ func (is *ImageStore) deleteBlob(repo string, digest godigest.Digest) error { if ok := is.cache.HasBlob(digest, blobPath); ok { if err := is.cache.DeleteBlob(digest, blobPath); err != nil { is.log.Error().Err(err).Str("digest", digest.String()).Str("blobPath", blobPath). - Msg("unable to remove blob path from cache") + Msg("failed to remove blob path from cache") return err } @@ -1631,7 +1631,7 @@ func (is *ImageStore) deleteBlob(repo string, digest godigest.Digest) error { // get next candidate dstRecord, err := is.cache.GetBlob(digest) if err != nil && !errors.Is(err, zerr.ErrCacheMiss) { - is.log.Error().Err(err).Str("blobPath", dstRecord).Msg("dedupe: unable to lookup blob record") + is.log.Error().Err(err).Str("blobPath", dstRecord).Msg("dedupe: failed to lookup blob record") return err } @@ -1649,7 +1649,7 @@ func (is *ImageStore) deleteBlob(repo string, digest godigest.Digest) error { if binfo.Size() == 0 { if err := is.storeDriver.Move(blobPath, dstRecord); err != nil { - is.log.Error().Err(err).Str("blobPath", blobPath).Msg("unable to remove blob path") + is.log.Error().Err(err).Str("blobPath", blobPath).Msg("failed to remove blob path") return err } @@ -1661,7 +1661,7 @@ func (is *ImageStore) deleteBlob(repo string, digest godigest.Digest) error { } if err := is.storeDriver.Delete(blobPath); err != nil { - is.log.Error().Err(err).Str("blobPath", blobPath).Msg("unable to remove blob path") + is.log.Error().Err(err).Str("blobPath", blobPath).Msg("failed to remove blob path") return err } @@ -1787,7 +1787,7 @@ func (is *ImageStore) getOriginalBlob(digest godigest.Digest, duplicateBlobs []s originalBlob, err = is.checkCacheBlob(digest) if err != nil && !errors.Is(err, zerr.ErrBlobNotFound) && !errors.Is(err, zerr.ErrCacheMiss) { - is.log.Error().Err(err).Msg("rebuild dedupe: unable to find blob in cache") + is.log.Error().Err(err).Msg("rebuild dedupe: failed to find blob in cache") return originalBlob, err } @@ -1836,7 +1836,7 @@ func (is *ImageStore) dedupeBlobs(digest godigest.Digest, duplicateBlobs []strin if originalBlob == "" { originalBlob, err = is.getOriginalBlob(digest, duplicateBlobs) if err != nil { - is.log.Error().Err(err).Msg("rebuild dedupe: unable to find original blob") + is.log.Error().Err(err).Msg("rebuild dedupe: failed to find original blob") return zerr.ErrDedupeRebuild } @@ -1859,7 +1859,7 @@ func (is *ImageStore) dedupeBlobs(digest godigest.Digest, duplicateBlobs []strin // if we have an original blob cached then we can safely dedupe the rest of them if originalBlob != "" { if err := is.storeDriver.Link(originalBlob, blobPath); err != nil { - is.log.Error().Err(err).Str("path", blobPath).Msg("rebuild dedupe: unable to dedupe blob") + is.log.Error().Err(err).Str("path", blobPath).Msg("rebuild dedupe: failed to dedupe blob") return err } @@ -1888,7 +1888,7 @@ func (is *ImageStore) restoreDedupedBlobs(digest godigest.Digest, duplicateBlobs // first we need to find the original blob, either in cache or by checking each blob size originalBlob, err := is.getOriginalBlob(digest, duplicateBlobs) if err != nil { - is.log.Error().Err(err).Msg("rebuild dedupe: unable to find original blob") + is.log.Error().Err(err).Msg("rebuild dedupe: failed to find original blob") return zerr.ErrDedupeRebuild } diff --git a/pkg/storage/storage.go b/pkg/storage/storage.go index daaf7ed9ac..677e81676b 100644 --- a/pkg/storage/storage.go +++ b/pkg/storage/storage.go @@ -69,7 +69,7 @@ func New(config *config.Config, linter common.Lint, metrics monitoring.MetricSer // Init a Storager from connection string. store, err := factory.Create(storeName, config.Storage.StorageDriver) if err != nil { - log.Error().Err(err).Str("rootDir", config.Storage.RootDirectory).Msg("unable to create s3 service") + log.Error().Err(err).Str("rootDir", config.Storage.RootDirectory).Msg("failed to create s3 service") return storeController, err } @@ -183,7 +183,7 @@ func getSubStore(cfg *config.Config, subPaths map[string]config.StorageConfig, // Init a Storager from connection string. store, err := factory.Create(storeName, storageConfig.StorageDriver) if err != nil { - log.Error().Err(err).Str("rootDir", storageConfig.RootDirectory).Msg("Unable to create s3 service") + log.Error().Err(err).Str("rootDir", storageConfig.RootDirectory).Msg("failed to create s3 service") return nil, err } diff --git a/pkg/test/common/fs_test.go b/pkg/test/common/fs_test.go index 774bcbbd58..590dd1becb 100644 --- a/pkg/test/common/fs_test.go +++ b/pkg/test/common/fs_test.go @@ -132,7 +132,7 @@ func TestReadLogFileAndSearchString(t *testing.T) { Convey("Invalid path", t, func() { _, err = tcommon.ReadLogFileAndSearchString("invalidPath", - "DB update completed, next update scheduled", 1*time.Second) + "cve-db update completed, next update scheduled after interval", 1*time.Second) So(err, ShouldNotBeNil) }) @@ -159,7 +159,7 @@ func TestReadLogFileAndCountStringOccurence(t *testing.T) { Convey("Invalid path", t, func() { _, err = tcommon.ReadLogFileAndCountStringOccurence("invalidPath", - "DB update completed, next update scheduled", 1*time.Second, 1) + "cve-db update completed, next update scheduled after interval", 1*time.Second, 1) So(err, ShouldNotBeNil) }) diff --git a/pkg/test/oci-utils/oci_layout.go b/pkg/test/oci-utils/oci_layout.go index a8c5d54c0a..1f9fe91ded 100644 --- a/pkg/test/oci-utils/oci_layout.go +++ b/pkg/test/oci-utils/oci_layout.go @@ -580,7 +580,7 @@ func (olu BaseOciLayoutUtils) ExtractImageDetails( ) { manifest, dig, err := olu.GetImageManifest(repo, tag) if err != nil { - log.Error().Err(err).Msg("Could not retrieve image ispec manifest") + log.Error().Err(err).Msg("failed to retrieve image manifest") return "", nil, nil, err } @@ -589,7 +589,7 @@ func (olu BaseOciLayoutUtils) ExtractImageDetails( imageConfig, err := olu.GetImageConfigInfo(repo, digest) if err != nil { - log.Error().Err(err).Msg("Could not retrieve image config") + log.Error().Err(err).Msg("failed to retrieve image config") return "", nil, nil, err }