diff --git a/.github/workflows/golangci-lint.yaml b/.github/workflows/golangci-lint.yaml index 631bcd7d6..191bdece8 100644 --- a/.github/workflows/golangci-lint.yaml +++ b/.github/workflows/golangci-lint.yaml @@ -44,3 +44,13 @@ jobs: - name: Run linter from make target run: | make check + - name: Run log linter + run: | + set +e + + # log messages should never start upper-cased + find . -name '*.go' | grep -v '_test.go' | xargs grep -n "Msg(\"[A-Z]" | grep -v -E "Msg\(\"HTTP|OpenID|OAuth|TLS" + if [ $? -eq 0 ]; then + exit 1 + fi + exit 0 diff --git a/errors/errors.go b/errors/errors.go index ed2dcf42c..0b90c585a 100644 --- a/errors/errors.go +++ b/errors/errors.go @@ -89,8 +89,8 @@ var ( ErrImgStoreNotFound = errors.New("routes: image store not found corresponding to given route") ErrEmptyValue = errors.New("cache: empty value") ErrEmptyRepoList = errors.New("search: no repository found") - ErrCVESearchDisabled = errors.New("search: CVE search is disabled") - ErrCVEDBNotFound = errors.New("cve: CVE DB is not present") + ErrCVESearchDisabled = errors.New("search: cve search is disabled") + ErrCVEDBNotFound = errors.New("cve: cve-db is not present") ErrInvalidRepositoryName = errors.New("repository: not a valid repository name") ErrSyncMissingCatalog = errors.New("sync: couldn't fetch upstream registry's catalog") ErrMethodNotSupported = errors.New("storage: method not supported") diff --git a/pkg/api/authn.go b/pkg/api/authn.go index 8a8b42849..6a3dce818 100644 --- a/pkg/api/authn.go +++ b/pkg/api/authn.go @@ -449,7 +449,7 @@ func bearerAuthHandler(ctlr *Controller) mux.MiddlewareFunc { EmptyDefaultNamespace: true, }) if err != nil { - ctlr.Log.Panic().Err(err).Msg("error creating bearer authorizer") + ctlr.Log.Panic().Err(err).Msg("failed to create bearer authorizer") } return func(next http.Handler) http.Handler { diff --git a/pkg/api/routes.go b/pkg/api/routes.go index 1db9a8044..0f0ec482b 100644 --- a/pkg/api/routes.go +++ b/pkg/api/routes.go @@ -547,7 +547,7 @@ func getReferrers(ctx context.Context, routeHandler *RouteHandler, if errSync := routeHandler.c.SyncOnDemand.SyncReference(ctx, name, digest.String(), syncConstants.OCI); errSync != nil { routeHandler.c.Log.Err(errSync).Str("repository", name).Str("reference", digest.String()). - Msg("error encounter while syncing OCI reference for image") + Msg("failed to sync OCI reference for image") } refs, err = imgStore.GetReferrers(name, digest, artifactTypes) @@ -605,7 +605,7 @@ func (rh *RouteHandler) GetReferrers(response http.ResponseWriter, request *http rh.c.Log.Error().Err(err).Str("name", name).Str("digest", digest.String()).Msg("manifest not found") response.WriteHeader(http.StatusNotFound) } else { - rh.c.Log.Error().Err(err).Str("name", name).Str("digest", digest.String()).Msg("unable to get references") + rh.c.Log.Error().Err(err).Str("name", name).Str("digest", digest.String()).Msg("failed to get references") response.WriteHeader(http.StatusInternalServerError) } @@ -614,7 +614,7 @@ func (rh *RouteHandler) GetReferrers(response http.ResponseWriter, request *http out, err := json.Marshal(referrers) if err != nil { - rh.c.Log.Error().Err(err).Str("name", name).Str("digest", digest.String()).Msg("unable to marshal json") + rh.c.Log.Error().Err(err).Str("name", name).Str("digest", digest.String()).Msg("failed to marshal json") response.WriteHeader(http.StatusInternalServerError) return @@ -1221,8 +1221,6 @@ func (rh *RouteHandler) CreateBlobUpload(response http.ResponseWriter, request * return } - rh.c.Log.Info().Int64("r.ContentLength", request.ContentLength).Msg("DEBUG") - digestStr := digests[0] digest := godigest.Digest(digestStr) @@ -1494,8 +1492,6 @@ func (rh *RouteHandler) UpdateBlobUpload(response http.ResponseWriter, request * return } - rh.c.Log.Info().Int64("r.ContentLength", request.ContentLength).Msg("DEBUG") - contentPresent := true contentLen, err := strconv.ParseInt(request.Header.Get("Content-Length"), 10, 64) @@ -1918,7 +1914,7 @@ func getImageManifest(ctx context.Context, routeHandler *RouteHandler, imgStore if errSync := routeHandler.c.SyncOnDemand.SyncImage(ctx, name, reference); errSync != nil { routeHandler.c.Log.Err(errSync).Str("repository", name).Str("reference", reference). - Msg("error encounter while syncing image") + Msg("failed to sync image") } } @@ -1939,7 +1935,7 @@ func getOrasReferrers(ctx context.Context, routeHandler *RouteHandler, if errSync := routeHandler.c.SyncOnDemand.SyncReference(ctx, name, digest.String(), syncConstants.Oras); errSync != nil { routeHandler.c.Log.Error().Err(err).Str("name", name).Str("digest", digest.String()). - Msg("unable to get references") + Msg("failed to get references") } refs, err = imgStore.GetOrasReferrers(name, digest, artifactType) @@ -2009,7 +2005,7 @@ func (rh *RouteHandler) GetOrasReferrers(response http.ResponseWriter, request * rh.c.Log.Error().Err(err).Str("name", name).Str("digest", digest.String()).Msg("manifest not found") response.WriteHeader(http.StatusNotFound) } else { - rh.c.Log.Error().Err(err).Str("name", name).Str("digest", digest.String()).Msg("unable to get references") + rh.c.Log.Error().Err(err).Str("name", name).Str("digest", digest.String()).Msg("failed to get references") response.WriteHeader(http.StatusInternalServerError) } @@ -2039,7 +2035,7 @@ type APIKeyPayload struct { //nolint:revive func (rh *RouteHandler) GetAPIKeys(resp http.ResponseWriter, req *http.Request) { apiKeys, err := rh.c.MetaDB.GetUserAPIKeys(req.Context()) if err != nil { - rh.c.Log.Error().Err(err).Msg("error getting list of API keys for user") + rh.c.Log.Error().Err(err).Msg("failed to get list of api keys for user") resp.WriteHeader(http.StatusInternalServerError) return @@ -2055,7 +2051,7 @@ func (rh *RouteHandler) GetAPIKeys(resp http.ResponseWriter, req *http.Request) data, err := json.Marshal(apiKeyResponse) if err != nil { - rh.c.Log.Error().Err(err).Msg("unable to marshal api key response") + rh.c.Log.Error().Err(err).Msg("failed to marshal api key response") resp.WriteHeader(http.StatusInternalServerError) @@ -2083,7 +2079,7 @@ func (rh *RouteHandler) CreateAPIKey(resp http.ResponseWriter, req *http.Request body, err := io.ReadAll(req.Body) if err != nil { - rh.c.Log.Error().Msg("unable to read request body") + rh.c.Log.Error().Msg("failed to read request body") resp.WriteHeader(http.StatusInternalServerError) return @@ -2138,7 +2134,7 @@ func (rh *RouteHandler) CreateAPIKey(resp http.ResponseWriter, req *http.Request err = rh.c.MetaDB.AddUserAPIKey(req.Context(), hashedAPIKey, apiKeyDetails) if err != nil { - rh.c.Log.Error().Err(err).Msg("error storing API key") + rh.c.Log.Error().Err(err).Msg("failed to store api key") resp.WriteHeader(http.StatusInternalServerError) return @@ -2156,7 +2152,7 @@ func (rh *RouteHandler) CreateAPIKey(resp http.ResponseWriter, req *http.Request data, err := json.Marshal(apiKeyResponse) if err != nil { - rh.c.Log.Error().Err(err).Msg("unable to marshal api key response") + rh.c.Log.Error().Err(err).Msg("failed to marshal api key response") resp.WriteHeader(http.StatusInternalServerError) @@ -2191,7 +2187,7 @@ func (rh *RouteHandler) RevokeAPIKey(resp http.ResponseWriter, req *http.Request err := rh.c.MetaDB.DeleteUserAPIKey(req.Context(), keyID) if err != nil { - rh.c.Log.Error().Err(err).Str("keyID", keyID).Msg("error deleting API key") + rh.c.Log.Error().Err(err).Str("keyID", keyID).Msg("failed to delete api key") resp.WriteHeader(http.StatusInternalServerError) return diff --git a/pkg/api/session.go b/pkg/api/session.go index c5f46f226..35b32682d 100644 --- a/pkg/api/session.go +++ b/pkg/api/session.go @@ -121,7 +121,8 @@ func SessionLogger(ctlr *Controller) mux.MiddlewareFunc { monitoring.ObserveHTTPMethodLatency(ctlr.Metrics, method, latency) // histogram } - log.Str("clientIP", clientIP). + log.Str("component", "session"). + Str("clientIP", clientIP). Str("method", method). Str("path", path). Int("statusCode", statusCode). @@ -172,6 +173,7 @@ func SessionAuditLogger(audit *log.Logger) mux.MiddlewareFunc { method == http.MethodPatch || method == http.MethodDelete) && (statusCode == http.StatusOK || statusCode == http.StatusCreated || statusCode == http.StatusAccepted) { audit.Info(). + Str("component", "session"). Str("clientIP", clientIP). Str("subject", username). Str("action", method). diff --git a/pkg/cli/server/config_reloader.go b/pkg/cli/server/config_reloader.go index 91e22105d..9d3a4ea92 100644 --- a/pkg/cli/server/config_reloader.go +++ b/pkg/cli/server/config_reloader.go @@ -112,7 +112,7 @@ func (hr *HotReloader) Start() context.Context { }() if err := hr.watcher.Add(hr.filePath); err != nil { - log.Error().Err(err).Str("config", hr.filePath).Msg("error adding config file to FsNotify watcher") + log.Error().Err(err).Str("config", hr.filePath).Msg("failed to add config file to fsnotity watcher") panic(err) } diff --git a/pkg/cli/server/config_reloader_test.go b/pkg/cli/server/config_reloader_test.go index f22781f16..2cb9714af 100644 --- a/pkg/cli/server/config_reloader_test.go +++ b/pkg/cli/server/config_reloader_test.go @@ -393,7 +393,7 @@ func TestConfigReloader(t *testing.T) { time.Sleep(5 * time.Second) found, err := test.ReadLogFileAndSearchString(logFile.Name(), - "Error downloading Trivy DB to destination dir", 30*time.Second) + "failed to download trivy-db to destination dir", 30*time.Second) So(err, ShouldBeNil) So(found, ShouldBeTrue) diff --git a/pkg/cli/server/extensions_test.go b/pkg/cli/server/extensions_test.go index bf2c6a61d..62d80f110 100644 --- a/pkg/cli/server/extensions_test.go +++ b/pkg/cli/server/extensions_test.go @@ -785,7 +785,7 @@ func TestServeScrubExtension(t *testing.T) { So(dataStr, ShouldContainSubstring, "\"Extensions\":{\"Search\":null,\"Sync\":null,\"Metrics\":null,\"Scrub\":{\"Enable\":true,\"Interval\":86400000000000},\"Lint\":null") //nolint:lll // gofumpt conflicts with lll So(dataStr, ShouldNotContainSubstring, - "Scrub interval set to too-short interval < 2h, changing scrub duration to 2 hours and continuing.") + "scrub interval set to too-short interval < 2h, changing scrub duration to 2 hours and continuing.") }) Convey("scrub implicitly enabled, but with scrub interval param set", t, func(c C) { @@ -817,7 +817,7 @@ func TestServeScrubExtension(t *testing.T) { dataStr := string(data) So(dataStr, ShouldContainSubstring, "\"Scrub\":{\"Enable\":true,\"Interval\":3600000000000}") So(dataStr, ShouldContainSubstring, - "Scrub interval set to too-short interval < 2h, changing scrub duration to 2 hours and continuing.") + "scrub interval set to too-short interval < 2h, changing scrub duration to 2 hours and continuing.") }) Convey("scrub explicitly enabled, but without scrub interval param set", t, func(c C) { @@ -849,7 +849,7 @@ func TestServeScrubExtension(t *testing.T) { So(dataStr, ShouldContainSubstring, "\"Extensions\":{\"Search\":null,\"Sync\":null,\"Metrics\":null,\"Scrub\":{\"Enable\":true,\"Interval\":86400000000000},\"Lint\":null") //nolint:lll // gofumpt conflicts with lll So(dataStr, ShouldNotContainSubstring, - "Scrub interval set to too-short interval < 2h, changing scrub duration to 2 hours and continuing.") + "scrub interval set to too-short interval < 2h, changing scrub duration to 2 hours and continuing.") }) Convey("scrub explicitly disabled", t, func(c C) { @@ -879,9 +879,9 @@ func TestServeScrubExtension(t *testing.T) { defer os.Remove(logPath) // clean up dataStr := string(data) So(dataStr, ShouldContainSubstring, "\"Scrub\":{\"Enable\":false,\"Interval\":86400000000000}") - So(dataStr, ShouldContainSubstring, "Scrub config not provided, skipping scrub") + So(dataStr, ShouldContainSubstring, "scrub config not provided, skipping scrub") So(dataStr, ShouldNotContainSubstring, - "Scrub interval set to too-short interval < 2h, changing scrub duration to 2 hours and continuing.") + "scrub interval set to too-short interval < 2h, changing scrub duration to 2 hours and continuing.") }) } @@ -1046,7 +1046,7 @@ func TestServeSearchEnabledCVE(t *testing.T) { So(found, ShouldBeTrue) So(err, ShouldBeNil) - found, err = ReadLogFileAndSearchString(logPath, "updating the CVE database", readLogFileTimeout) + found, err = ReadLogFileAndSearchString(logPath, "updating cve-db", readLogFileTimeout) So(found, ShouldBeTrue) So(err, ShouldBeNil) }) diff --git a/pkg/cli/server/root.go b/pkg/cli/server/root.go index c4088fa80..eb84c6492 100644 --- a/pkg/cli/server/root.go +++ b/pkg/cli/server/root.go @@ -71,7 +71,7 @@ func newServeCmd(conf *config.Config) *cobra.Command { } if err := ctlr.Run(reloaderCtx); err != nil { - ctlr.Log.Fatal().Err(err).Msg("unable to start controller, exiting") + ctlr.Log.Fatal().Err(err).Msg("failed to start controller, exiting") } }, } @@ -105,15 +105,15 @@ func newScrubCmd(conf *config.Config) *cobra.Command { fmt.Sprintf("http://%s/v2", net.JoinHostPort(conf.HTTP.Address, conf.HTTP.Port)), nil) if err != nil { - log.Error().Err(err).Msg("unable to create a new http request") + log.Error().Err(err).Msg("failed to create a new http request") panic(err) } response, err := http.DefaultClient.Do(req) if err == nil { response.Body.Close() - log.Warn().Msg("The server is running, in order to perform the scrub command the server should be shut down") - panic("Error: server is running") + log.Warn().Msg("server is running, in order to perform the scrub command the server should be shut down") + panic("server is running") } else { // server is down ctlr := api.NewController(conf) @@ -146,11 +146,11 @@ func newVerifyCmd(conf *config.Config) *cobra.Command { Run: func(cmd *cobra.Command, args []string) { if len(args) > 0 { if err := LoadConfiguration(conf, args[0]); err != nil { - log.Error().Str("config", args[0]).Msg("Config file is invalid") + log.Error().Str("config", args[0]).Msg("config file is invalid") panic(err) } - log.Info().Str("config", args[0]).Msg("Config file is valid") + log.Info().Str("config", args[0]).Msg("config file is valid") } }, } @@ -291,11 +291,11 @@ func validateCacheConfig(cfg *config.Config, log zlog.Logger) error { func validateExtensionsConfig(cfg *config.Config, log zlog.Logger) error { if cfg.Extensions != nil && cfg.Extensions.Mgmt != nil { - log.Warn().Msg("The mgmt extensions configuration option has been made redundant and will be ignored.") + log.Warn().Msg("mgmt extensions configuration option has been made redundant and will be ignored.") } if cfg.Extensions != nil && cfg.Extensions.APIKey != nil { - log.Warn().Msg("The apikey extension configuration will be ignored as API keys " + + log.Warn().Msg("apikey extension configuration will be ignored as API keys " + "are now configurable in the HTTP settings.") } @@ -303,7 +303,7 @@ func validateExtensionsConfig(cfg *config.Config, log zlog.Logger) error { // it would make sense to also check for mgmt and user prefs to be enabled, // but those are both enabled by having the search and ui extensions enabled if cfg.Extensions.Search == nil || !*cfg.Extensions.Search.Enable { - log.Error().Err(zerr.ErrBadConfig).Msg("UI functionality can't be used without search extension.") + log.Error().Err(zerr.ErrBadConfig).Msg("ui functionality can't be used without search extension.") return zerr.ErrBadConfig } @@ -312,7 +312,7 @@ func validateExtensionsConfig(cfg *config.Config, log zlog.Logger) error { //nolint:lll if cfg.Storage.StorageDriver != nil && cfg.Extensions != nil && cfg.Extensions.Search != nil && cfg.Extensions.Search.Enable != nil && *cfg.Extensions.Search.Enable && cfg.Extensions.Search.CVE != nil { - log.Error().Err(zerr.ErrBadConfig).Msg("CVE functionality can't be used with remote storage. Please disable CVE") + log.Error().Err(zerr.ErrBadConfig).Msg("cve functionality can't be used with remote storage - please disable cve") return zerr.ErrBadConfig } @@ -321,7 +321,7 @@ func validateExtensionsConfig(cfg *config.Config, log zlog.Logger) error { //nolint:lll if subPath.StorageDriver != nil && cfg.Extensions != nil && cfg.Extensions.Search != nil && cfg.Extensions.Search.Enable != nil && *cfg.Extensions.Search.Enable && cfg.Extensions.Search.CVE != nil { - log.Error().Err(zerr.ErrBadConfig).Msg("CVE functionality can't be used with remote storage. Please disable CVE") + log.Error().Err(zerr.ErrBadConfig).Msg("cve functionality can't be used with remote storage - please disable cve") return zerr.ErrBadConfig } @@ -534,7 +534,7 @@ func applyDefaultValues(config *config.Config, viperInstance *viper.Viper, log z if config.Extensions.Search.CVE.UpdateInterval < defaultUpdateInterval { config.Extensions.Search.CVE.UpdateInterval = defaultUpdateInterval - log.Warn().Msg("CVE update interval set to too-short interval < 2h, " + + log.Warn().Msg("cve update interval set to too-short interval < 2h, " + "changing update duration to 2 hours and continuing.") } @@ -544,15 +544,15 @@ func applyDefaultValues(config *config.Config, viperInstance *viper.Viper, log z if config.Extensions.Search.CVE.Trivy.DBRepository == "" { defaultDBDownloadURL := "ghcr.io/aquasecurity/trivy-db" - log.Info().Str("trivyDownloadURL", defaultDBDownloadURL). - Msg("Config: using default Trivy DB download URL.") + log.Info().Str("url", defaultDBDownloadURL). + Msg("config: using default trivy-db download URL.") config.Extensions.Search.CVE.Trivy.DBRepository = defaultDBDownloadURL } if config.Extensions.Search.CVE.Trivy.JavaDBRepository == "" { defaultJavaDBDownloadURL := "ghcr.io/aquasecurity/trivy-java-db" - log.Info().Str("trivyJavaDownloadURL", defaultJavaDBDownloadURL). - Msg("Config: using default Trivy Java DB download URL.") + log.Info().Str("url", defaultJavaDBDownloadURL). + Msg("config: using default trivy-java-db download URL.") config.Extensions.Search.CVE.Trivy.JavaDBRepository = defaultJavaDBDownloadURL } } @@ -621,7 +621,7 @@ func applyDefaultValues(config *config.Config, viperInstance *viper.Viper, log z cachePath := path.Join(cacheDir, storageConstants.BoltdbName+storageConstants.DBExtensionName) if _, err := os.Stat(cachePath); err == nil { - log.Info().Msg("Config: dedupe set to false for s3 driver but used to be true.") + log.Info().Msg("config: dedupe set to false for s3 driver but used to be true.") log.Info().Str("cache path", cachePath).Msg("found cache database") config.Storage.RemoteCache = false @@ -642,7 +642,7 @@ func applyDefaultValues(config *config.Config, viperInstance *viper.Viper, log z subpathCachePath := path.Join(subpathCacheDir, storageConstants.BoltdbName+storageConstants.DBExtensionName) if _, err := os.Stat(subpathCachePath); err == nil { - log.Info().Msg("Config: dedupe set to false for s3 driver but used to be true. ") + log.Info().Msg("config: dedupe set to false for s3 driver but used to be true. ") log.Info().Str("cache path", subpathCachePath).Msg("found cache database") storageConfig.RemoteCache = false @@ -701,14 +701,14 @@ func LoadConfiguration(config *config.Config, configPath string) error { viperInstance.SetConfigFile(configPath) if err := viperInstance.ReadInConfig(); err != nil { - log.Error().Err(err).Msg("error while reading configuration") + log.Error().Err(err).Msg("failed to read configuration") return err } metaData := &mapstructure.Metadata{} if err := viperInstance.Unmarshal(&config, metadataConfig(metaData)); err != nil { - log.Error().Err(err).Msg("error while unmarshaling new config") + log.Error().Err(err).Msg("failed to unmarshaling new config") return err } diff --git a/pkg/exporter/api/exporter.go b/pkg/exporter/api/exporter.go index 59a8991e4..16dae68c6 100644 --- a/pkg/exporter/api/exporter.go +++ b/pkg/exporter/api/exporter.go @@ -101,7 +101,7 @@ func (zc Collector) Collect(ch chan<- prometheus.Metric) { func panicOnDuplicateMetricName(m map[string]*prometheus.Desc, name string, log log.Logger) { if _, present := m[name]; present { - log.Fatal().Msg("Duplicate keys: metric " + name + " already present") + log.Fatal().Str("metric", name).Msg("duplicate key found") } } @@ -180,16 +180,16 @@ func runExporter(c *Controller) { err := prometheus.Register(GetCollector(c)) if err != nil { - c.Log.Error().Err(err).Msg("Expected error in testing") + c.Log.Debug().Err(err).Msg("ignoring error") } http.Handle(c.Config.Exporter.Metrics.Path, promhttp.Handler()) - c.Log.Info().Str("exporter addr", exporterAddr). - Str("exporter metrics path", c.Config.Exporter.Metrics.Path). - Msg("Exporter is listening on exporter addr & exposes metrics on exporter metrics path") + c.Log.Info().Str("addr", exporterAddr). + Str("path", c.Config.Exporter.Metrics.Path). + Msg("exporter listening") serverAddr := fmt.Sprintf("%s://%s:%s", c.Config.Server.Protocol, c.Config.Server.Host, c.Config.Server.Port) - c.Log.Info().Str("serverAddr", serverAddr).Msg("Scraping metrics") - c.Log.Fatal().Err(server.ListenAndServe()).Msg("Exporter stopped") + c.Log.Info().Str("serverAddr", serverAddr).Msg("scraping metrics") + c.Log.Fatal().Err(server.ListenAndServe()).Msg("exporter stopped") } diff --git a/pkg/exporter/cli/cli.go b/pkg/exporter/cli/cli.go index 27a547197..e944d39d1 100644 --- a/pkg/exporter/cli/cli.go +++ b/pkg/exporter/cli/cli.go @@ -60,18 +60,24 @@ func loadConfiguration(config *api.Config, configPath string) { viper.SetConfigFile(configPath) if err := viper.ReadInConfig(); err != nil { - log.Error().Err(err).Msg("Error while reading configuration") + log.Error().Err(err).Str("config", configPath).Msg("failed to read configuration") panic(err) } metaData := &mapstructure.Metadata{} if err := viper.Unmarshal(&config, metadataConfig(metaData)); err != nil { - log.Error().Err(err).Msg("Error while unmarshaling new config") + log.Error().Err(err).Str("config", configPath).Msg("failed to unmarshal config") panic(err) } - if len(metaData.Keys) == 0 || len(metaData.Unused) > 0 { - log.Error().Err(zerr.ErrBadConfig).Msg("Bad configuration, retry writing it") + if len(metaData.Keys) == 0 { + log.Error().Err(zerr.ErrBadConfig).Str("config", configPath).Msg("bad configuration") + panic(zerr.ErrBadConfig) + } + + if len(metaData.Unused) > 0 { + log.Error().Err(zerr.ErrBadConfig).Interface("unknown fields", metaData.Unused). + Str("config", configPath).Msg("bad configuration") panic(zerr.ErrBadConfig) } } diff --git a/pkg/extensions/extension_metrics.go b/pkg/extensions/extension_metrics.go index 54e71982f..4978437d0 100644 --- a/pkg/extensions/extension_metrics.go +++ b/pkg/extensions/extension_metrics.go @@ -18,10 +18,10 @@ func EnableMetricsExtension(config *config.Config, log log.Logger, rootDir strin if config.Extensions.Metrics.Prometheus.Path == "" { config.Extensions.Metrics.Prometheus.Path = "/metrics" - log.Warn().Msg("Prometheus instrumentation Path not set, changing to '/metrics'.") + log.Warn().Msg("prometheus instrumentation path not set, changing to '/metrics'.") } } else { - log.Info().Msg("Metrics config not provided, skipping Metrics config update") + log.Info().Msg("metrics config not provided, skipping metrics config update") } } diff --git a/pkg/extensions/extension_scrub.go b/pkg/extensions/extension_scrub.go index bc61ad1fa..045d725db 100644 --- a/pkg/extensions/extension_scrub.go +++ b/pkg/extensions/extension_scrub.go @@ -25,7 +25,7 @@ func EnableScrubExtension(config *config.Config, log log.Logger, storeController if config.Extensions.Scrub.Interval < minScrubInterval { config.Extensions.Scrub.Interval = minScrubInterval - log.Warn().Msg("Scrub interval set to too-short interval < 2h, changing scrub duration to 2 hours and continuing.") //nolint:lll // gofumpt conflicts with lll + log.Warn().Msg("scrub interval set to too-short interval < 2h, changing scrub duration to 2 hours and continuing.") //nolint:lll // gofumpt conflicts with lll } generator := &taskGenerator{ @@ -44,7 +44,7 @@ func EnableScrubExtension(config *config.Config, log log.Logger, storeController } } } else { - log.Info().Msg("Scrub config not provided, skipping scrub") + log.Info().Msg("scrub config not provided, skipping scrub") } } diff --git a/pkg/extensions/extension_search.go b/pkg/extensions/extension_search.go index ecfa04612..a1a04a13d 100644 --- a/pkg/extensions/extension_search.go +++ b/pkg/extensions/extension_search.go @@ -52,14 +52,14 @@ func EnableSearchExtension(conf *config.Config, storeController storage.StoreCon downloadTrivyDB(updateInterval, taskScheduler, cveScanner, log) startScanner(scanInterval, metaDB, taskScheduler, cveScanner, log) } else { - log.Info().Msg("CVE config not provided, skipping CVE update") + log.Info().Msg("cve config not provided, skipping cve-db update") } } func downloadTrivyDB(interval time.Duration, sch *scheduler.Scheduler, cveScanner CveScanner, log log.Logger) { generator := cveinfo.NewDBUpdateTaskGenerator(interval, cveScanner, log) - log.Info().Msg("Submitting CVE DB update scheduler") + log.Info().Msg("submitting cve-db update generator to scheduler") sch.SubmitGenerator(generator, interval, scheduler.HighPriority) } @@ -68,7 +68,7 @@ func startScanner(interval time.Duration, metaDB mTypes.MetaDB, sch *scheduler.S ) { generator := cveinfo.NewScanTaskGenerator(metaDB, cveScanner, log) - log.Info().Msg("Submitting CVE scan scheduler") + log.Info().Msg("submitting cve-scan generator to scheduler") sch.SubmitGenerator(generator, interval, scheduler.MediumPriority) } diff --git a/pkg/extensions/extension_sync.go b/pkg/extensions/extension_sync.go index 852112dc0..a7f141e4a 100644 --- a/pkg/extensions/extension_sync.go +++ b/pkg/extensions/extension_sync.go @@ -64,7 +64,7 @@ func EnableSyncExtension(config *config.Config, metaDB mTypes.MetaDB, return onDemand, nil } - log.Info().Msg("Sync registries config not provided or disabled, skipping sync") + log.Info().Msg("sync config not provided or disabled, so not enabling sync") return nil, nil //nolint: nilnil } diff --git a/pkg/extensions/extensions_test.go b/pkg/extensions/extensions_test.go index 9f1063675..2f6fb9519 100644 --- a/pkg/extensions/extensions_test.go +++ b/pkg/extensions/extensions_test.go @@ -65,7 +65,7 @@ func TestEnableExtension(t *testing.T) { data, err := os.ReadFile(logFile.Name()) So(err, ShouldBeNil) So(string(data), ShouldContainSubstring, - "Sync registries config not provided or disabled, skipping sync") + "sync config not provided or disabled, so not enabling sync") }) } @@ -106,7 +106,7 @@ func TestMetricsExtension(t *testing.T) { data, _ := os.ReadFile(logFile.Name()) So(string(data), ShouldContainSubstring, - "Prometheus instrumentation Path not set, changing to '/metrics'.") + "prometheus instrumentation path not set, changing to '/metrics'.") }) } diff --git a/pkg/extensions/imagetrust/image_trust.go b/pkg/extensions/imagetrust/image_trust.go index 974418a2d..e46ea3b10 100644 --- a/pkg/extensions/imagetrust/image_trust.go +++ b/pkg/extensions/imagetrust/image_trust.go @@ -248,7 +248,7 @@ func (validityT *validityTask) DoWork(ctx context.Context) error { if len(sigs[zcommon.CosignSignature]) != 0 || len(sigs[zcommon.NotationSignature]) != 0 { err := validityT.metaDB.UpdateSignaturesValidity(validityT.repo.Name, godigest.Digest(signedManifest)) if err != nil { - validityT.log.Info().Msg("error while verifying signatures") + validityT.log.Info().Msg("failed to verify signatures") return err } diff --git a/pkg/extensions/monitoring/minimal.go b/pkg/extensions/monitoring/minimal.go index 54efb5d67..c54f69536 100644 --- a/pkg/extensions/monitoring/minimal.go +++ b/pkg/extensions/monitoring/minimal.go @@ -314,7 +314,7 @@ func (ms *metricServer) CounterInc(cv *CounterValue) { if err != nil { // The last thing we want is to panic/stop the server due to instrumentation // thus log a message (should be detected during development of new metrics) - ms.log.Error().Err(err).Msg("Instrumentation error") + ms.log.Error().Err(err).Msg("failed due to instrumentation error") return } @@ -334,7 +334,7 @@ func (ms *metricServer) GaugeSet(gv *GaugeValue) { err := sanityChecks(gv.Name, labels, ok, gv.LabelNames, gv.LabelValues) if err != nil { - ms.log.Error().Err(err).Msg("Instrumentation error") + ms.log.Error().Err(err).Msg("failed due to instrumentation error") return } @@ -353,7 +353,7 @@ func (ms *metricServer) SummaryObserve(sv *SummaryValue) { err := sanityChecks(sv.Name, labels, ok, sv.LabelNames, sv.LabelValues) if err != nil { - ms.log.Error().Err(err).Msg("Instrumentation error") + ms.log.Error().Err(err).Msg("failed due to instrumentation error") return } @@ -374,7 +374,7 @@ func (ms *metricServer) HistogramObserve(hv *HistogramValue) { err := sanityChecks(hv.Name, labels, ok, hv.LabelNames, hv.LabelValues) if err != nil { - ms.log.Error().Err(err).Msg("Instrumentation error") + ms.log.Error().Err(err).Msg("failed due to instrumentation error") return } diff --git a/pkg/extensions/scrub/scrub_test.go b/pkg/extensions/scrub/scrub_test.go index 34db265ee..892652e52 100644 --- a/pkg/extensions/scrub/scrub_test.go +++ b/pkg/extensions/scrub/scrub_test.go @@ -176,7 +176,7 @@ func TestScrubExtension(t *testing.T) { data, err := os.ReadFile(logFile.Name()) So(err, ShouldBeNil) - So(string(data), ShouldContainSubstring, "error while executing generator") + So(string(data), ShouldContainSubstring, "failed to execute generator") So(os.Chmod(path.Join(dir, repoName), 0o755), ShouldBeNil) }) diff --git a/pkg/extensions/search/cve/cve.go b/pkg/extensions/search/cve/cve.go index f4747cff7..f14d31078 100644 --- a/pkg/extensions/search/cve/cve.go +++ b/pkg/extensions/search/cve/cve.go @@ -199,10 +199,10 @@ func (cveinfo BaseCveInfo) GetImageListWithCVEFixed(repo, cveID string) ([]cvemo if len(vulnerableTags) != 0 { cveinfo.Log.Info().Str("repository", repo).Str("cve-id", cveID). - Interface("vulnerableTags", vulnerableTags).Msg("Vulnerable tags") + Interface("tags", vulnerableTags).Msg("vulnerable tags") fixedTags = GetFixedTags(allTags, vulnerableTags) cveinfo.Log.Info().Str("repository", repo).Str("cve-id", cveID). - Interface("fixedTags", fixedTags).Msg("Fixed tags") + Interface("tags", fixedTags).Msg("fixed tags") } else { cveinfo.Log.Info().Str("repository", repo).Str("cve-id", cveID). Msg("image does not contain any tag that have given cve") diff --git a/pkg/extensions/search/cve/cve_test.go b/pkg/extensions/search/cve/cve_test.go index 32d905915..95f75d091 100644 --- a/pkg/extensions/search/cve/cve_test.go +++ b/pkg/extensions/search/cve/cve_test.go @@ -459,23 +459,23 @@ func TestCVESearchDisabled(t *testing.T) { ctrlManager.StartAndWait(port) // Wait for trivy db to download - found, err := ReadLogFileAndSearchString(logPath, "CVE config not provided, skipping CVE update", 90*time.Second) + found, err := ReadLogFileAndSearchString(logPath, "cve config not provided, skipping cve-db update", 90*time.Second) So(err, ShouldBeNil) So(found, ShouldBeTrue) defer ctrlManager.StopServer() resp, _ := resty.R().SetBasicAuth(username, passphrase).Get(baseURL + constants.FullSearchPrefix + "?query={CVEListForImage(image:\"zot-test\"){Tag%20CVEList{Id%20Description%20Severity%20PackageList{Name%20InstalledVersion%20FixedVersion}}}}") - So(string(resp.Body()), ShouldContainSubstring, "search: CVE search is disabled") + So(string(resp.Body()), ShouldContainSubstring, "search: cve search is disabled") So(resp.StatusCode(), ShouldEqual, 200) resp, _ = resty.R().SetBasicAuth(username, passphrase).Get(baseURL + constants.FullSearchPrefix + "?query={ImageListForCVE(id:\"CVE-201-20482\"){Results{RepoName%20Tag}}}") - So(string(resp.Body()), ShouldContainSubstring, "search: CVE search is disabled") + So(string(resp.Body()), ShouldContainSubstring, "search: cve search is disabled") So(resp.StatusCode(), ShouldEqual, 200) resp, _ = resty.R().SetBasicAuth(username, passphrase).Get(baseURL + constants.FullSearchPrefix + "?query={ImageListWithCVEFixed(id:\"" + "randomId" + "\",image:\"zot-test\"){Results{RepoName%20LastUpdated}}}") So(resp, ShouldNotBeNil) - So(string(resp.Body()), ShouldContainSubstring, "search: CVE search is disabled") + So(string(resp.Body()), ShouldContainSubstring, "search: cve search is disabled") So(resp.StatusCode(), ShouldEqual, 200) }) } @@ -536,7 +536,7 @@ func TestCVESearch(t *testing.T) { // trivy db download fail err = os.Mkdir(dbDir+"/_trivy", 0o000) So(err, ShouldBeNil) - found, err := ReadLogFileAndSearchString(logPath, "Error downloading Trivy DB to destination dir", 180*time.Second) + found, err := ReadLogFileAndSearchString(logPath, "failed to download trivy-db to destination dir", 180*time.Second) So(err, ShouldBeNil) So(found, ShouldBeTrue) @@ -544,7 +544,7 @@ func TestCVESearch(t *testing.T) { So(err, ShouldBeNil) // Wait for trivy db to download - found, err = ReadLogFileAndSearchString(logPath, "DB update completed, next update scheduled", 180*time.Second) + found, err = ReadLogFileAndSearchString(logPath, "cve-db update completed, next update scheduled after interval", 180*time.Second) So(err, ShouldBeNil) So(found, ShouldBeTrue) diff --git a/pkg/extensions/search/cve/scan.go b/pkg/extensions/search/cve/scan.go index cacf084ad..bbb13d7ab 100644 --- a/pkg/extensions/search/cve/scan.go +++ b/pkg/extensions/search/cve/scan.go @@ -15,10 +15,12 @@ import ( func NewScanTaskGenerator( metaDB mTypes.MetaDB, scanner Scanner, - log log.Logger, + logC log.Logger, ) scheduler.TaskGenerator { + sublogger := logC.With().Str("component", "cve").Logger() + return &scanTaskGenerator{ - log: log, + log: log.Logger{Logger: sublogger}, metaDB: metaDB, scanner: scanner, lock: &sync.Mutex{}, @@ -127,13 +129,13 @@ func (gen *scanTaskGenerator) Next() (scheduler.Task, error) { if err != nil { // Do not crash the generator for potential repodb inconistencies // as there may be scannable images not yet scanned - gen.log.Warn().Err(err).Msg("Scheduled CVE scan: error while obtaining repo metadata") + gen.log.Warn().Err(err).Msg("failed to obtain repo metadata during scheduled cve scan") } // no reposMeta are returned, all results are in already in cache // or manifests cannot be scanned if len(reposMeta) == 0 { - gen.log.Info().Msg("Scheduled CVE scan: finished for available images") + gen.log.Info().Msg("finished scanning available images during scheduled cve scan") gen.done = true @@ -195,13 +197,13 @@ func (st *scanTask) DoWork(ctx context.Context) error { // We cache the results internally in the scanner // so we can discard the actual results for now if _, err := st.generator.scanner.ScanImage(image); err != nil { - st.generator.log.Error().Err(err).Str("image", image).Msg("Scheduled CVE scan errored for image") + st.generator.log.Error().Err(err).Str("image", image).Msg("scheduled cve scan failed for image") st.generator.addError(st.digest, err) return err } - st.generator.log.Debug().Str("image", image).Msg("Scheduled CVE scan completed successfully for image") + st.generator.log.Debug().Str("image", image).Msg("scheduled cve scan completed successfully for image") return nil } diff --git a/pkg/extensions/search/cve/trivy/scanner.go b/pkg/extensions/search/cve/trivy/scanner.go index ca70c7423..a409cab73 100644 --- a/pkg/extensions/search/cve/trivy/scanner.go +++ b/pkg/extensions/search/cve/trivy/scanner.go @@ -249,7 +249,7 @@ func (scanner Scanner) isManifestScanable(digestStr string) (bool, error) { err = json.Unmarshal(manifestData.ManifestBlob, &manifestContent) if err != nil { - scanner.log.Error().Err(err).Msg("unable to unmashal manifest blob") + scanner.log.Error().Err(err).Msg("failed to unmashal manifest blob") return false, zerr.ErrScanNotSupported } @@ -352,7 +352,7 @@ func (scanner Scanner) ScanImage(image string) (map[string]cvemodel.CVE, error) } if err != nil { - scanner.log.Error().Err(err).Str("image", originalImageInput).Msg("unable to scan image") + scanner.log.Error().Err(err).Str("image", originalImageInput).Msg("failed to scan image") return map[string]cvemodel.CVE{}, err } @@ -507,18 +507,16 @@ func (scanner Scanner) UpdateDB() error { } func (scanner Scanner) updateDB(dbDir string) error { - scanner.log.Debug().Str("dbDir", dbDir).Msg("Download Trivy DB to destination dir") - ctx := context.Background() registryOpts := fanalTypes.RegistryOptions{Insecure: false} - scanner.log.Debug().Str("dbDir", dbDir).Msg("Started downloading Trivy DB to destination dir") + scanner.log.Debug().Str("dbDir", dbDir).Msg("started downloading trivy-db to destination dir") err := operation.DownloadDB(ctx, "dev", dbDir, scanner.dbRepository, false, false, registryOpts) if err != nil { scanner.log.Error().Err(err).Str("dbDir", dbDir). - Str("dbRepository", scanner.dbRepository).Msg("Error downloading Trivy DB to destination dir") + Str("dbRepository", scanner.dbRepository).Msg("failed to download trivy-db to destination dir") return err } @@ -528,13 +526,13 @@ func (scanner Scanner) updateDB(dbDir string) error { if err := javadb.Update(); err != nil { scanner.log.Error().Err(err).Str("dbDir", dbDir). - Str("javaDBRepository", scanner.javaDBRepository).Msg("Error downloading Trivy Java DB to destination dir") + Str("javaDBRepository", scanner.javaDBRepository).Msg("failed to download trivy-java-db to destination dir") return err } } - scanner.log.Debug().Str("dbDir", dbDir).Msg("Finished downloading Trivy DB to destination dir") + scanner.log.Debug().Str("dbDir", dbDir).Msg("finished downloading trivy-db to destination dir") return nil } diff --git a/pkg/extensions/search/cve/update.go b/pkg/extensions/search/cve/update.go index 871bb9743..1c095fab2 100644 --- a/pkg/extensions/search/cve/update.go +++ b/pkg/extensions/search/cve/update.go @@ -92,7 +92,7 @@ func newDBUpdadeTask(interval time.Duration, scanner Scanner, } func (dbt *dbUpdateTask) DoWork(ctx context.Context) error { - dbt.log.Info().Msg("updating the CVE database") + dbt.log.Info().Msg("updating cve-db") err := dbt.scanner.UpdateDB() if err != nil { @@ -114,7 +114,8 @@ func (dbt *dbUpdateTask) DoWork(ctx context.Context) error { dbt.generator.lastTaskTime = time.Now() dbt.generator.status = done dbt.generator.lock.Unlock() - dbt.log.Info().Str("DB update completed, next update scheduled after", dbt.interval.String()).Msg("") + + dbt.log.Info().Interface("interval", dbt.interval).Msg("cve-db update completed, next update scheduled after interval") return nil } diff --git a/pkg/extensions/search/resolver.go b/pkg/extensions/search/resolver.go index 5bbd0daab..90cfce768 100644 --- a/pkg/extensions/search/resolver.go +++ b/pkg/extensions/search/resolver.go @@ -424,12 +424,12 @@ func getImageListForCVE( for _, repoMeta := range reposMeta { repo := repoMeta.Name - log.Info().Str("repository", repo).Str("CVE", cveID).Msg("extracting list of tags affected by CVE") + log.Info().Str("repository", repo).Str("CVE", cveID).Msg("extracting list of tags affected by this cve") tagsInfo, err := cveInfo.GetImageListForCVE(repo, cveID) if err != nil { log.Error().Str("repository", repo).Str("CVE", cveID).Err(err). - Msg("error getting image list for CVE from repo") + Msg("failed to get image list for this cve from repository") return &gql_generated.PaginatedImagesResult{}, err } @@ -497,12 +497,12 @@ func getImageListWithCVEFixed( ) (*gql_generated.PaginatedImagesResult, error) { imageList := make([]*gql_generated.ImageSummary, 0) - log.Info().Str("repository", repo).Str("CVE", cveID).Msg("extracting list of tags where CVE is fixed") + log.Info().Str("repository", repo).Str("CVE", cveID).Msg("extracting list of tags where this cve is fixed") tagsInfo, err := cveInfo.GetImageListWithCVEFixed(repo, cveID) if err != nil { log.Error().Str("repository", repo).Str("CVE", cveID).Err(err). - Msg("error getting image list with CVE fixed from repo") + Msg("failed to get image list with this cve fixed from repository") return &gql_generated.PaginatedImagesResult{ Page: &gql_generated.PageInfo{}, @@ -1121,7 +1121,7 @@ func expandedRepoInfo(ctx context.Context, repo string, metaDB mTypes.MetaDB, cv repoMeta, err := metaDB.GetUserRepoMeta(ctx, repo) if err != nil { - log.Error().Err(err).Str("repository", repo).Msg("resolver: can't retrieve repoMeta for repo") + log.Error().Err(err).Str("repository", repo).Msg("resolver: can't retrieve repoMeta for repository") return &gql_generated.RepoInfo{}, err } diff --git a/pkg/extensions/search/search_test.go b/pkg/extensions/search/search_test.go index f704e48cc..4cc712d60 100644 --- a/pkg/extensions/search/search_test.go +++ b/pkg/extensions/search/search_test.go @@ -709,11 +709,12 @@ func TestRepoListWithNewestImage(t *testing.T) { So(found, ShouldBeTrue) So(err, ShouldBeNil) - found, err = readFileAndSearchString(logPath, "updating the CVE database", 2*time.Minute) + found, err = readFileAndSearchString(logPath, "updating cve-db", 2*time.Minute) So(found, ShouldBeTrue) So(err, ShouldBeNil) - found, err = readFileAndSearchString(logPath, "DB update completed, next update scheduled", 4*time.Minute) + found, err = readFileAndSearchString(logPath, + "cve-db update completed, next update scheduled after interval", 4*time.Minute) So(found, ShouldBeTrue) So(err, ShouldBeNil) @@ -3406,11 +3407,11 @@ func TestGlobalSearch(t *testing.T) { So(found, ShouldBeTrue) So(err, ShouldBeNil) - found, err = readFileAndSearchString(logPath, "updating the CVE database", 2*time.Minute) + found, err = readFileAndSearchString(logPath, "updating cve-db", 2*time.Minute) So(found, ShouldBeTrue) So(err, ShouldBeNil) - found, err = readFileAndSearchString(logPath, "DB update completed, next update scheduled", 4*time.Minute) + found, err = readFileAndSearchString(logPath, "cve-db update completed, next update scheduled after interval", 4*time.Minute) So(found, ShouldBeTrue) So(err, ShouldBeNil) diff --git a/pkg/extensions/sync/content.go b/pkg/extensions/sync/content.go index 7ef2c9eaf..bbe0120a7 100644 --- a/pkg/extensions/sync/content.go +++ b/pkg/extensions/sync/content.go @@ -113,7 +113,7 @@ func (cm ContentManager) getContentByUpstreamRepo(repo string) *syncconf.Content if err != nil { cm.log.Error().Str("errorType", common.TypeOf(err)). Err(err).Str("pattern", - prefix).Msg("error while parsing glob pattern, skipping it...") + prefix).Msg("failed to parse glob pattern, skipping it") continue } diff --git a/pkg/extensions/sync/references/references.go b/pkg/extensions/sync/references/references.go index 9e448100b..539259736 100644 --- a/pkg/extensions/sync/references/references.go +++ b/pkg/extensions/sync/references/references.go @@ -81,7 +81,7 @@ func (refs References) syncAll(ctx context.Context, localRepo, upstreamRepo, for _, ref := range refs.referenceList { syncedRefsDigests, err = ref.SyncReferences(ctx, localRepo, upstreamRepo, subjectDigestStr) if err != nil { - refs.log.Error().Err(err). + refs.log.Warn().Err(err). Str("reference type", ref.Name()). Str("image", fmt.Sprintf("%s:%s", upstreamRepo, subjectDigestStr)). Msg("couldn't sync image referrer") @@ -110,7 +110,7 @@ func (refs References) SyncReference(ctx context.Context, localRepo, upstreamRep if ref.Name() == referenceType { syncedRefsDigests, err = ref.SyncReferences(ctx, localRepo, upstreamRepo, subjectDigestStr) if err != nil { - refs.log.Error().Err(err). + refs.log.Warn().Err(err). Str("reference type", ref.Name()). Str("image", fmt.Sprintf("%s:%s", upstreamRepo, subjectDigestStr)). Msg("couldn't sync image referrer") diff --git a/pkg/extensions/sync/service.go b/pkg/extensions/sync/service.go index a121c7cef..f87cca84c 100644 --- a/pkg/extensions/sync/service.go +++ b/pkg/extensions/sync/service.go @@ -125,7 +125,7 @@ func (service *BaseService) SetNextAvailableClient() error { } if err != nil { - service.log.Error().Err(err).Str("url", url).Msg("sync: failed to initialize http client") + service.log.Error().Err(err).Str("url", url).Msg("failed to initialize http client") continue } @@ -184,7 +184,7 @@ func (service *BaseService) GetNextRepo(lastRepo string) (string, error) { return err }, service.retryOptions); err != nil { service.log.Error().Str("errorType", common.TypeOf(err)).Str("remote registry", service.client.GetConfig().URL). - Err(err).Msg("error while getting repositories from remote registry") + Err(err).Msg("failed to get repository list from remote registry") return "", err } @@ -215,15 +215,15 @@ func (service *BaseService) SyncReference(ctx context.Context, repo string, if len(service.config.Content) > 0 { remoteRepo = service.contentManager.GetRepoSource(repo) if remoteRepo == "" { - service.log.Info().Str("remote", remoteURL).Str("repo", repo).Str("subject", subjectDigestStr). + service.log.Info().Str("remote", remoteURL).Str("repository", repo).Str("subject", subjectDigestStr). Str("reference type", referenceType).Msg("will not sync reference for image, filtered out by content") return zerr.ErrSyncImageFilteredOut } } - service.log.Info().Str("remote", remoteURL).Str("repo", repo).Str("subject", subjectDigestStr). - Str("reference type", referenceType).Msg("sync: syncing reference for image") + service.log.Info().Str("remote", remoteURL).Str("repository", repo).Str("subject", subjectDigestStr). + Str("reference type", referenceType).Msg("syncing reference for image") return service.references.SyncReference(ctx, repo, remoteRepo, subjectDigestStr, referenceType) } @@ -237,15 +237,15 @@ func (service *BaseService) SyncImage(ctx context.Context, repo, reference strin if len(service.config.Content) > 0 { remoteRepo = service.contentManager.GetRepoSource(repo) if remoteRepo == "" { - service.log.Info().Str("remote", remoteURL).Str("repo", repo).Str("reference", reference). + service.log.Info().Str("remote", remoteURL).Str("repository", repo).Str("reference", reference). Msg("will not sync image, filtered out by content") return zerr.ErrSyncImageFilteredOut } } - service.log.Info().Str("remote", remoteURL).Str("repo", repo).Str("reference", reference). - Msg("sync: syncing image") + service.log.Info().Str("remote", remoteURL).Str("repository", repo).Str("reference", reference). + Msg("syncing image") manifestDigest, err := service.syncTag(ctx, repo, remoteRepo, reference) if err != nil { @@ -262,8 +262,8 @@ func (service *BaseService) SyncImage(ctx context.Context, repo, reference strin // sync repo periodically. func (service *BaseService) SyncRepo(ctx context.Context, repo string) error { - service.log.Info().Str("repo", repo).Str("registry", service.client.GetConfig().URL). - Msg("sync: syncing repo") + service.log.Info().Str("repository", repo).Str("registry", service.client.GetConfig().URL). + Msg("syncing repo") var err error @@ -274,8 +274,8 @@ func (service *BaseService) SyncRepo(ctx context.Context, repo string) error { return err }, service.retryOptions); err != nil { - service.log.Error().Str("errorType", common.TypeOf(err)).Str("repo", repo). - Err(err).Msg("error while getting tags for repo") + service.log.Error().Str("errorType", common.TypeOf(err)).Str("repository", repo). + Err(err).Msg("failed to get tags for repository") return err } @@ -286,7 +286,7 @@ func (service *BaseService) SyncRepo(ctx context.Context, repo string) error { return err } - service.log.Info().Str("repo", repo).Msgf("sync: syncing tags %v", tags) + service.log.Info().Str("repository", repo).Msgf("syncing tags %v", tags) // apply content.destination rule localRepo := service.contentManager.GetRepoDestination(repo) @@ -314,8 +314,8 @@ func (service *BaseService) SyncRepo(ctx context.Context, repo string) error { continue } - service.log.Error().Str("errorType", common.TypeOf(err)).Str("repo", repo). - Err(err).Msg("error while syncing tags for repo") + service.log.Error().Str("errorType", common.TypeOf(err)).Str("repository", repo). + Err(err).Msg("failed to sync tags for repository") return err } @@ -329,15 +329,15 @@ func (service *BaseService) SyncRepo(ctx context.Context, repo string) error { return err }, service.retryOptions); err != nil { - service.log.Error().Str("errorType", common.TypeOf(err)).Str("repo", repo). - Err(err).Msg("error while syncing tags for repo") + service.log.Error().Str("errorType", common.TypeOf(err)).Str("repository", repo). + Err(err).Msg("failed to sync tags for repository") return err } } } - service.log.Info().Str("repo", repo).Msg("sync: finished syncing repo") + service.log.Info().Str("component", "sync").Str("repository", repo).Msg("finished syncing repository") return nil } @@ -357,14 +357,14 @@ func (service *BaseService) syncTag(ctx context.Context, localRepo, remoteRepo, remoteImageRef, err := service.remote.GetImageReference(remoteRepo, tag) if err != nil { service.log.Error().Err(err).Str("errortype", common.TypeOf(err)). - Str("repo", remoteRepo).Str("reference", tag).Msg("couldn't get a remote image reference") + Str("repository", remoteRepo).Str("reference", tag).Msg("couldn't get a remote image reference") return "", err } _, mediaType, manifestDigest, err := service.remote.GetManifestContent(remoteImageRef) if err != nil { - service.log.Error().Err(err).Str("repo", remoteRepo).Str("reference", tag). + service.log.Error().Err(err).Str("repository", remoteRepo).Str("reference", tag). Msg("couldn't get upstream image manifest details") return "", err @@ -389,7 +389,7 @@ func (service *BaseService) syncTag(ctx context.Context, localRepo, remoteRepo, skipImage, err := service.local.CanSkipImage(localRepo, tag, manifestDigest) if err != nil { service.log.Error().Err(err).Str("errortype", common.TypeOf(err)). - Str("repo", localRepo).Str("reference", tag). + Str("repository", localRepo).Str("reference", tag). Msg("couldn't check if the local image can be skipped") } @@ -397,7 +397,7 @@ func (service *BaseService) syncTag(ctx context.Context, localRepo, remoteRepo, localImageRef, err := service.local.GetImageReference(localRepo, tag) if err != nil { service.log.Error().Err(err).Str("errortype", common.TypeOf(err)). - Str("repo", localRepo).Str("reference", tag).Msg("couldn't get a local image reference") + Str("repository", localRepo).Str("reference", tag).Msg("couldn't get a local image reference") return "", err } @@ -417,7 +417,7 @@ func (service *BaseService) syncTag(ctx context.Context, localRepo, remoteRepo, err = service.local.CommitImage(localImageRef, localRepo, tag) if err != nil { service.log.Error().Err(err).Str("errortype", common.TypeOf(err)). - Str("repo", localRepo).Str("reference", tag).Msg("couldn't commit image to local image store") + Str("repository", localRepo).Str("reference", tag).Msg("couldn't commit image to local image store") return "", err } @@ -426,7 +426,8 @@ func (service *BaseService) syncTag(ctx context.Context, localRepo, remoteRepo, Msg("skipping image because it's already synced") } - service.log.Info().Str("image", remoteImageRef.DockerReference().String()).Msg("sync: finished syncing image") + service.log.Info().Str("component", "sync"). + Str("image", remoteImageRef.DockerReference().String()).Msg("finished syncing image") return manifestDigest, nil } diff --git a/pkg/log/guidelines.md b/pkg/log/guidelines.md index a01c66075..3d62ee3e9 100644 --- a/pkg/log/guidelines.md +++ b/pkg/log/guidelines.md @@ -61,3 +61,5 @@ For example, lookup a cache (fast path) and it throws a not-found error, and we expect to handle it and perform a slow path lookup. Instead of logging the lookup failure at ERROR level, it may be more appropriate to log at DEBUG level and then handle the error. + +Also, instead of `Msg("error at something")` standardize on `Msg("failed at something")`. diff --git a/pkg/meta/boltdb/boltdb.go b/pkg/meta/boltdb/boltdb.go index ca14e456d..9c5d1f30c 100644 --- a/pkg/meta/boltdb/boltdb.go +++ b/pkg/meta/boltdb/boltdb.go @@ -1118,7 +1118,7 @@ func (bdw *BoltDB) SearchRepos(ctx context.Context, searchText string, indexDataMap[indexDigest] = indexData default: - bdw.Log.Error().Str("mediaType", descriptor.MediaType).Msg("Unsupported media type") + bdw.Log.Error().Str("mediaType", descriptor.MediaType).Msg("unsupported media type") continue } @@ -1315,7 +1315,7 @@ func (bdw *BoltDB) FilterTags(ctx context.Context, filterFunc mTypes.FilterFunc, matchedTags[tag] = descriptor } default: - bdw.Log.Error().Str("mediaType", descriptor.MediaType).Msg("Unsupported media type") + bdw.Log.Error().Str("mediaType", descriptor.MediaType).Msg("unsupported media type") continue } @@ -1474,7 +1474,7 @@ func (bdw *BoltDB) SearchTags(ctx context.Context, searchText string, indexDataMap[indexDigest] = indexData default: - bdw.Log.Error().Str("mediaType", descriptor.MediaType).Msg("Unsupported media type") + bdw.Log.Error().Str("mediaType", descriptor.MediaType).Msg("unsupported media type") continue } diff --git a/pkg/meta/dynamodb/dynamodb.go b/pkg/meta/dynamodb/dynamodb.go index d96c2b1df..1e3428acf 100644 --- a/pkg/meta/dynamodb/dynamodb.go +++ b/pkg/meta/dynamodb/dynamodb.go @@ -1000,7 +1000,7 @@ func (dwr *DynamoDB) SearchRepos(ctx context.Context, searchText string, indexDataMap[descriptor.Digest] = indexData default: - dwr.Log.Error().Str("mediaType", descriptor.MediaType).Msg("Unsupported media type") + dwr.Log.Error().Str("mediaType", descriptor.MediaType).Msg("unsupported media type") continue } @@ -1192,7 +1192,7 @@ func (dwr *DynamoDB) FilterTags(ctx context.Context, filterFunc mTypes.FilterFun matchedTags[tag] = descriptor } default: - dwr.Log.Error().Str("mediaType", descriptor.MediaType).Msg("Unsupported media type") + dwr.Log.Error().Str("mediaType", descriptor.MediaType).Msg("unsupported media type") continue } @@ -1341,7 +1341,7 @@ func (dwr *DynamoDB) SearchTags(ctx context.Context, searchText string, err = json.Unmarshal(indexData.IndexBlob, &indexContent) if err != nil { return []mTypes.RepoMetadata{}, map[string]mTypes.ManifestMetadata{}, map[string]mTypes.IndexData{}, - fmt.Errorf("metadb: error while unmashaling index content for digest %s %w", indexDigest, err) + fmt.Errorf("metadb: error while unmarshaling index content for digest %s %w", indexDigest, err) } for _, manifest := range indexContent.Manifests { @@ -1359,7 +1359,7 @@ func (dwr *DynamoDB) SearchTags(ctx context.Context, searchText string, indexDataMap[indexDigest] = indexData default: - dwr.Log.Error().Str("mediaType", descriptor.MediaType).Msg("Unsupported media type") + dwr.Log.Error().Str("mediaType", descriptor.MediaType).Msg("unsupported media type") continue } diff --git a/pkg/meta/parse.go b/pkg/meta/parse.go index 6613d29b2..687916efc 100644 --- a/pkg/meta/parse.go +++ b/pkg/meta/parse.go @@ -20,13 +20,13 @@ import ( // ParseStorage will sync all repos found in the rootdirectory of the oci layout that zot was deployed on with the // ParseStorage database. func ParseStorage(metaDB mTypes.MetaDB, storeController storage.StoreController, log log.Logger) error { - log.Info().Msg("Started parsing storage and updating MetaDB") + log.Info().Str("component", "metadb").Msg("parsing storage and initializing") allRepos, err := getAllRepos(storeController) if err != nil { rootDir := storeController.DefaultStore.RootDir() - log.Error().Err(err).Str("rootDir", rootDir). - Msg("load-local-layout: failed to get all repo names present under rootDir") + log.Error().Err(err).Str("component", "metadb").Str("rootDir", rootDir). + Msg("failed to get all repo names present under rootDir") return err } @@ -34,13 +34,13 @@ func ParseStorage(metaDB mTypes.MetaDB, storeController storage.StoreController, for _, repo := range allRepos { err := ParseRepo(repo, metaDB, storeController, log) if err != nil { - log.Error().Err(err).Str("repository", repo).Msg("load-local-layout: failed to sync repo") + log.Error().Err(err).Str("component", "metadb").Str("repository", repo).Msg("failed to parse repo") return err } } - log.Info().Msg("Done parsing storage and updating MetaDB") + log.Info().Str("component", "metadb").Msg("successfully initialized") return nil } @@ -56,7 +56,7 @@ func ParseRepo(repo string, metaDB mTypes.MetaDB, storeController storage.StoreC indexBlob, err := imageStore.GetIndexContent(repo) if err != nil { - log.Error().Err(err).Str("repository", repo).Msg("load-repo: failed to read index.json for repo") + log.Error().Err(err).Str("repository", repo).Msg("failed to read index.json for repo") return err } @@ -65,14 +65,14 @@ func ParseRepo(repo string, metaDB mTypes.MetaDB, storeController storage.StoreC err = json.Unmarshal(indexBlob, &indexContent) if err != nil { - log.Error().Err(err).Str("repository", repo).Msg("load-repo: failed to unmarshal index.json for repo") + log.Error().Err(err).Str("repository", repo).Msg("failed to unmarshal index.json for repo") return err } err = resetRepoMeta(repo, metaDB, log) if err != nil && !errors.Is(err, zerr.ErrRepoMetaNotFound) { - log.Error().Err(err).Str("repository", repo).Msg("load-repo: failed to reset tag field in RepoMetadata for repo") + log.Error().Err(err).Str("repository", repo).Msg("failed to reset tag field in RepoMetadata for repo") return err } @@ -86,7 +86,7 @@ func ParseRepo(repo string, metaDB mTypes.MetaDB, storeController storage.StoreC descriptorBlob, err := getCachedBlob(repo, descriptor, metaDB, imageStore, log) if err != nil { - log.Error().Err(err).Msg("load-repo: error checking manifestMeta in MetaDB") + log.Error().Err(err).Msg("failed to check manifestMeta in metadb") return err } @@ -95,7 +95,7 @@ func ParseRepo(repo string, metaDB mTypes.MetaDB, storeController storage.StoreC descriptorBlob, tag) if err != nil { log.Error().Err(err).Str("repository", repo).Str("tag", tag). - Msg("load-repo: failed checking if image is signature for specified image") + Msg("failed to check if image is signature for specified image") return err } @@ -116,7 +116,7 @@ func ParseRepo(repo string, metaDB mTypes.MetaDB, storeController storage.StoreC if err != nil { log.Error().Err(err).Str("repository", repo).Str("tag", tag). Str("manifestDigest", signedManifestDigest.String()). - Msg("load-repo: failed set signature meta for signed image") + Msg("failed to set signature meta for signed image") return err } @@ -124,7 +124,7 @@ func ParseRepo(repo string, metaDB mTypes.MetaDB, storeController storage.StoreC err = metaDB.UpdateSignaturesValidity(repo, signedManifestDigest) if err != nil { log.Error().Err(err).Str("repository", repo).Str("reference", tag).Str("digest", signedManifestDigest.String()).Msg( - "load-repo: failed verify signatures validity for signed image") + "failed to verify signatures validity for signed image") return err } @@ -142,7 +142,7 @@ func ParseRepo(repo string, metaDB mTypes.MetaDB, storeController storage.StoreC imageStore, metaDB, log) if err != nil { log.Error().Err(err).Str("repository", repo).Str("tag", tag). - Msg("load-repo: failed to set metadata for image") + Msg("failed to set metadata for image") return err } @@ -156,13 +156,13 @@ func ParseRepo(repo string, metaDB mTypes.MetaDB, storeController storage.StoreC func resetRepoMeta(repo string, metaDB mTypes.MetaDB, log log.Logger) error { repoMeta, err := metaDB.GetRepoMeta(repo) if err != nil && !errors.Is(err, zerr.ErrRepoMetaNotFound) { - log.Error().Err(err).Str("repository", repo).Msg("load-repo: failed to get RepoMeta for repo") + log.Error().Err(err).Str("repository", repo).Msg("failed to get RepoMeta for repo") return err } if errors.Is(err, zerr.ErrRepoMetaNotFound) { - log.Info().Str("repository", repo).Msg("load-repo: RepoMeta not found for repo, new RepoMeta will be created") + log.Info().Str("repository", repo).Msg("metadata not found for repo, new metadata will be created") return nil } @@ -208,7 +208,7 @@ func getCachedBlob(repo string, descriptor ispec.Descriptor, metaDB mTypes.MetaD descriptorBlob, _, _, err = imageStore.GetImageManifest(repo, digest.String()) if err != nil { log.Error().Err(err).Str("repository", repo).Str("digest", digest.String()). - Msg("load-repo: failed to get blob for image") + Msg("failed to get blob for image") return nil, err } @@ -255,7 +255,7 @@ func getCosignSignatureLayersInfo( var manifestContent ispec.Manifest if err := json.Unmarshal(manifestBlob, &manifestContent); err != nil { log.Error().Err(err).Str("repository", repo).Str("reference", tag).Str("digest", manifestDigest).Msg( - "load-repo: unable to marshal blob index") + "failed to marshal blob index") return layers, err } @@ -269,7 +269,7 @@ func getCosignSignatureLayersInfo( layerContent, err := imageStore.GetBlobContent(repo, layer.Digest) if err != nil { log.Error().Err(err).Str("repository", repo).Str("reference", tag).Str("layerDigest", layer.Digest.String()).Msg( - "load-repo: unable to get cosign signature layer content") + "failed to get cosign signature layer content") return layers, err } @@ -277,7 +277,7 @@ func getCosignSignatureLayersInfo( layerSigKey, ok := layer.Annotations[zcommon.CosignSigKey] if !ok { log.Error().Err(err).Str("repository", repo).Str("reference", tag).Str("layerDigest", layer.Digest.String()).Msg( - "load-repo: unable to get specific annotation of cosign signature") + "failed to get specific annotation of cosign signature") } layers = append(layers, mTypes.LayerInfo{ @@ -298,7 +298,7 @@ func getNotationSignatureLayersInfo( var manifestContent ispec.Manifest if err := json.Unmarshal(manifestBlob, &manifestContent); err != nil { log.Error().Err(err).Str("repository", repo).Str("reference", manifestDigest).Msg( - "load-repo: unable to marshal blob index") + "failed to marshal blob index") return layers, err } @@ -310,7 +310,7 @@ func getNotationSignatureLayersInfo( if len(manifestContent.Layers) != 1 { log.Error().Err(zerr.ErrBadManifest).Str("repository", repo).Str("reference", manifestDigest). - Msg("load-repo: notation signature manifest requires exactly one layer but it does not") + Msg("notation signature manifest requires exactly one layer but it does not") return layers, zerr.ErrBadManifest } @@ -325,7 +325,7 @@ func getNotationSignatureLayersInfo( layerContent, err := imageStore.GetBlobContent(repo, layer) if err != nil { log.Error().Err(err).Str("repository", repo).Str("reference", manifestDigest).Str("layerDigest", layer.String()).Msg( - "load-repo: unable to get notation signature blob content") + "failed to get notation signature blob content") return layers, err } diff --git a/pkg/scheduler/scheduler.go b/pkg/scheduler/scheduler.go index cc771d065..812162c20 100644 --- a/pkg/scheduler/scheduler.go +++ b/pkg/scheduler/scheduler.go @@ -101,13 +101,13 @@ func (scheduler *Scheduler) poolWorker(ctx context.Context, numWorkers int, task for i := 0; i < numWorkers; i++ { go func(workerID int) { for task := range tasks { - scheduler.log.Debug().Int("worker", workerID).Msg("scheduler: starting task") + scheduler.log.Debug().Int("worker", workerID).Msg("starting task") if err := task.DoWork(ctx); err != nil { - scheduler.log.Error().Int("worker", workerID).Err(err).Msg("scheduler: error while executing task") + scheduler.log.Error().Int("worker", workerID).Err(err).Msg("failed to execute task") } - scheduler.log.Debug().Int("worker", workerID).Msg("scheduler: finished task") + scheduler.log.Debug().Int("worker", workerID).Msg("finished task") } }(i + 1) } @@ -129,7 +129,7 @@ func (scheduler *Scheduler) RunScheduler(ctx context.Context) { close(tasksWorker) close(scheduler.stopCh) - scheduler.log.Debug().Msg("scheduler: received stop signal, exiting...") + scheduler.log.Debug().Msg("received stop signal, exiting...") return default: @@ -138,7 +138,7 @@ func (scheduler *Scheduler) RunScheduler(ctx context.Context) { task := scheduler.getTask() if task != nil { // push tasks into worker pool - scheduler.log.Debug().Msg("scheduler: pushing task into worker pool") + scheduler.log.Debug().Msg("pushing task into worker pool") tasksWorker <- task } i++ @@ -162,7 +162,7 @@ func (scheduler *Scheduler) pushReadyGenerators() { scheduler.waitingGenerators = append(scheduler.waitingGenerators[:i], scheduler.waitingGenerators[i+1:]...) modified = true - scheduler.log.Debug().Msg("scheduler: waiting generator is ready, pushing to ready generators") + scheduler.log.Debug().Msg("waiting generator is ready, pushing to ready generators") break } @@ -261,7 +261,7 @@ func (scheduler *Scheduler) SubmitTask(task Task, priority Priority) { case <-scheduler.stopCh: return case tasksQ <- task: - scheduler.log.Info().Msg("scheduler: adding a new task") + scheduler.log.Info().Msg("adding a new task") } } @@ -308,7 +308,7 @@ func (gen *generator) generate(sch *Scheduler) { if gen.remainingTask == nil { nextTask, err := gen.taskGenerator.Next() if err != nil { - sch.log.Error().Err(err).Msg("scheduler: error while executing generator") + sch.log.Error().Err(err).Msg("failed to execute generator") return } diff --git a/pkg/scheduler/scheduler_test.go b/pkg/scheduler/scheduler_test.go index 8cb59b74e..283ab96a8 100644 --- a/pkg/scheduler/scheduler_test.go +++ b/pkg/scheduler/scheduler_test.go @@ -150,7 +150,7 @@ func TestScheduler(t *testing.T) { So(string(data), ShouldContainSubstring, "executing high priority task; index: 1") So(string(data), ShouldContainSubstring, "executing high priority task; index: 2") So(string(data), ShouldNotContainSubstring, "executing medium priority task; index: 1") - So(string(data), ShouldNotContainSubstring, "error while executing task") + So(string(data), ShouldNotContainSubstring, "failed to execute task") }) Convey("Test task returning an error", t, func() { @@ -173,8 +173,8 @@ func TestScheduler(t *testing.T) { data, err := os.ReadFile(logFile.Name()) So(err, ShouldBeNil) - So(string(data), ShouldContainSubstring, "scheduler: adding a new task") - So(string(data), ShouldContainSubstring, "error while executing task") + So(string(data), ShouldContainSubstring, "adding a new task") + So(string(data), ShouldContainSubstring, "failed to execute task") }) Convey("Test resubmit generator", t, func() { @@ -215,7 +215,7 @@ func TestScheduler(t *testing.T) { data, err := os.ReadFile(logFile.Name()) So(err, ShouldBeNil) - So(string(data), ShouldNotContainSubstring, "scheduler: adding a new task") + So(string(data), ShouldNotContainSubstring, "adding a new task") }) Convey("Test adding a new task when context is done", t, func() { @@ -238,7 +238,7 @@ func TestScheduler(t *testing.T) { data, err := os.ReadFile(logFile.Name()) So(err, ShouldBeNil) - So(string(data), ShouldNotContainSubstring, "scheduler: adding a new task") + So(string(data), ShouldNotContainSubstring, "adding a new task") }) } diff --git a/pkg/storage/common/common.go b/pkg/storage/common/common.go index 533c078f5..fe8364e96 100644 --- a/pkg/storage/common/common.go +++ b/pkg/storage/common/common.go @@ -82,7 +82,7 @@ func ValidateManifest(imgStore storageTypes.ImageStore, repo, reference, mediaTy // validate manifest if err := ValidateManifestSchema(body); err != nil { - log.Error().Err(err).Msg("OCIv1 image manifest schema validation failed") + log.Error().Err(err).Msg("failed to validate OCIv1 image manifest schema") return "", zerr.NewError(zerr.ErrBadManifest).AddDetail("jsonSchemaValidation", err.Error()) } @@ -131,7 +131,7 @@ func ValidateManifest(imgStore storageTypes.ImageStore, repo, reference, mediaTy case ispec.MediaTypeImageIndex: // validate manifest if err := ValidateImageIndexSchema(body); err != nil { - log.Error().Err(err).Msg("OCIv1 image index manifest schema validation failed") + log.Error().Err(err).Msg("failed to validate OCIv1 image index manifest schema") return "", zerr.NewError(zerr.ErrBadManifest).AddDetail("jsonSchemaValidation", err.Error()) } diff --git a/pkg/storage/imagestore/imagestore.go b/pkg/storage/imagestore/imagestore.go index a65762c10..ec78e545c 100644 --- a/pkg/storage/imagestore/imagestore.go +++ b/pkg/storage/imagestore/imagestore.go @@ -67,7 +67,7 @@ func NewImageStore(rootDir string, cacheDir string, dedupe, commit bool, log zlo metrics monitoring.MetricServer, linter common.Lint, storeDriver storageTypes.Driver, cacheDriver cache.Cache, ) storageTypes.ImageStore { if err := storeDriver.EnsureDir(rootDir); err != nil { - log.Error().Err(err).Str("rootDir", rootDir).Msg("unable to create root dir") + log.Error().Err(err).Str("rootDir", rootDir).Msg("failed to create root dir") return nil } @@ -139,14 +139,14 @@ func (is *ImageStore) initRepo(name string) error { // create "blobs" subdir err := is.storeDriver.EnsureDir(path.Join(repoDir, "blobs")) if err != nil { - is.log.Error().Err(err).Msg("error creating blobs subdir") + is.log.Error().Err(err).Str("repository", repoDir).Msg("failed to create blobs subdir") return err } // create BlobUploadDir subdir err = is.storeDriver.EnsureDir(path.Join(repoDir, storageConstants.BlobUploadDir)) if err != nil { - is.log.Error().Err(err).Msg("error creating blob upload subdir") + is.log.Error().Err(err).Msg("failed to create blob upload subdir") return err } @@ -158,13 +158,13 @@ func (is *ImageStore) initRepo(name string) error { buf, err := json.Marshal(il) if err != nil { - is.log.Error().Err(err).Msg("unable to marshal JSON") + is.log.Error().Err(err).Msg("failed to marshal JSON") return err } if _, err := is.storeDriver.WriteFile(ilPath, buf); err != nil { - is.log.Error().Err(err).Str("file", ilPath).Msg("unable to write file") + is.log.Error().Err(err).Str("file", ilPath).Msg("failed to write file") return err } @@ -178,13 +178,13 @@ func (is *ImageStore) initRepo(name string) error { buf, err := json.Marshal(index) if err != nil { - is.log.Error().Err(err).Msg("unable to marshal JSON") + is.log.Error().Err(err).Msg("failed to marshal JSON") return err } if _, err := is.storeDriver.WriteFile(indexPath, buf); err != nil { - is.log.Error().Err(err).Str("file", ilPath).Msg("unable to write file") + is.log.Error().Err(err).Str("file", ilPath).Msg("failed to write file") return err } @@ -220,7 +220,7 @@ func (is *ImageStore) ValidateRepo(name string) (bool, error) { files, err := is.storeDriver.List(dir) if err != nil { - is.log.Error().Err(err).Str("dir", dir).Msg("unable to read directory") + is.log.Error().Err(err).Str("dir", dir).Msg("failed to read directory") return false, zerr.ErrRepoNotFound } @@ -565,7 +565,7 @@ func (is *ImageStore) PutImageManifest(repo, reference, mediaType string, //noli manifestPath := path.Join(dir, mDigest.Encoded()) if _, err = is.storeDriver.WriteFile(manifestPath, body); err != nil { - is.log.Error().Err(err).Str("file", manifestPath).Msg("unable to write") + is.log.Error().Err(err).Str("file", manifestPath).Msg("failed to write") return "", "", err } @@ -658,7 +658,7 @@ func (is *ImageStore) deleteImageManifest(repo, reference string, detectCollisio } if _, err := is.storeDriver.WriteFile(file, buf); err != nil { - is.log.Debug().Str("reference", reference).Str("repo", repo).Msg("error while updating index.json") + is.log.Debug().Str("reference", reference).Str("repository", repo).Msg("failed to update index.json") return err } @@ -698,7 +698,7 @@ func (is *ImageStore) BlobUploadPath(repo, uuid string) string { // NewBlobUpload returns the unique ID for an upload in progress. func (is *ImageStore) NewBlobUpload(repo string) (string, error) { if err := is.InitRepo(repo); err != nil { - is.log.Error().Err(err).Msg("error initializing repo") + is.log.Error().Err(err).Msg("failed to initialize repo") return "", err } @@ -889,7 +889,7 @@ func (is *ImageStore) FinishBlobUpload(repo, uuid string, body io.Reader, dstDig err = is.storeDriver.EnsureDir(dir) if err != nil { - is.log.Error().Err(err).Msg("error creating blobs/sha256 dir") + is.log.Error().Err(err).Str("dir", dir).Msg("failed to create dir") return err } @@ -905,14 +905,14 @@ func (is *ImageStore) FinishBlobUpload(repo, uuid string, body io.Reader, dstDig err = is.DedupeBlob(src, dstDigest, dst) if err := inject.Error(err); err != nil { is.log.Error().Err(err).Str("src", src).Str("dstDigest", dstDigest.String()). - Str("dst", dst).Msg("unable to dedupe blob") + Str("dst", dst).Msg("failed to dedupe blob") return err } } else { if err := is.storeDriver.Move(src, dst); err != nil { is.log.Error().Err(err).Str("src", src).Str("dstDigest", dstDigest.String()). - Str("dst", dst).Msg("unable to finish blob") + Str("dst", dst).Msg("failed to finish blob") return err } @@ -983,14 +983,14 @@ func (is *ImageStore) FullBlobUpload(repo string, body io.Reader, dstDigest godi if is.dedupe && fmt.Sprintf("%v", is.cache) != fmt.Sprintf("%v", nil) { if err := is.DedupeBlob(src, dstDigest, dst); err != nil { is.log.Error().Err(err).Str("src", src).Str("dstDigest", dstDigest.String()). - Str("dst", dst).Msg("unable to dedupe blob") + Str("dst", dst).Msg("failed to dedupe blob") return "", -1, err } } else { if err := is.storeDriver.Move(src, dst); err != nil { is.log.Error().Err(err).Str("src", src).Str("dstDigest", dstDigest.String()). - Str("dst", dst).Msg("unable to finish blob") + Str("dst", dst).Msg("failed to finish blob") return "", -1, err } @@ -1005,7 +1005,7 @@ retry: dstRecord, err := is.cache.GetBlob(dstDigest) if err := inject.Error(err); err != nil && !errors.Is(err, zerr.ErrCacheMiss) { - is.log.Error().Err(err).Str("blobPath", dst).Msg("dedupe: unable to lookup blob record") + is.log.Error().Err(err).Str("blobPath", dst).Msg("dedupe: failed to lookup blob record") return err } @@ -1013,14 +1013,14 @@ retry: if dstRecord == "" { // cache record doesn't exist, so first disk and cache entry for this digest if err := is.cache.PutBlob(dstDigest, dst); err != nil { - is.log.Error().Err(err).Str("blobPath", dst).Msg("dedupe: unable to insert blob record") + is.log.Error().Err(err).Str("blobPath", dst).Msg("dedupe: failed to insert blob record") return err } // move the blob from uploads to final dest if err := is.storeDriver.Move(src, dst); err != nil { - is.log.Error().Err(err).Str("src", src).Str("dst", dst).Msg("dedupe: unable to rename blob") + is.log.Error().Err(err).Str("src", src).Str("dst", dst).Msg("dedupe: failed to rename blob") return err } @@ -1036,12 +1036,12 @@ retry: _, err := is.storeDriver.Stat(dstRecord) if err != nil { - is.log.Error().Err(err).Str("blobPath", dstRecord).Msg("dedupe: unable to stat") + is.log.Error().Err(err).Str("blobPath", dstRecord).Msg("dedupe: failed to stat") // the actual blob on disk may have been removed by GC, so sync the cache err := is.cache.DeleteBlob(dstDigest, dstRecord) if err = inject.Error(err); err != nil { //nolint:lll - is.log.Error().Err(err).Str("dstDigest", dstDigest.String()).Str("dst", dst).Msg("dedupe: unable to delete blob record") + is.log.Error().Err(err).Str("dstDigest", dstDigest.String()).Str("dst", dst).Msg("dedupe: failed to delete blob record") return err } @@ -1052,13 +1052,13 @@ retry: // prevent overwrite original blob if !is.storeDriver.SameFile(dst, dstRecord) { if err := is.storeDriver.Link(dstRecord, dst); err != nil { - is.log.Error().Err(err).Str("blobPath", dstRecord).Msg("dedupe: unable to link blobs") + is.log.Error().Err(err).Str("blobPath", dstRecord).Msg("dedupe: failed to link blobs") return err } if err := is.cache.PutBlob(dstDigest, dst); err != nil { - is.log.Error().Err(err).Str("blobPath", dst).Msg("dedupe: unable to insert blob record") + is.log.Error().Err(err).Str("blobPath", dst).Msg("dedupe: failed to insert blob record") return err } @@ -1066,7 +1066,7 @@ retry: // remove temp blobupload if err := is.storeDriver.Delete(src); err != nil { - is.log.Error().Err(err).Str("src", src).Msg("dedupe: unable to remove blob") + is.log.Error().Err(err).Str("src", src).Msg("dedupe: failed to remove blob") return err } @@ -1093,7 +1093,7 @@ func (is *ImageStore) DeleteBlobUpload(repo, uuid string) error { defer writer.Close() if err := writer.Cancel(); err != nil { - is.log.Error().Err(err).Str("blobUploadPath", blobUploadPath).Msg("error deleting blob upload") + is.log.Error().Err(err).Str("blobUploadPath", blobUploadPath).Msg("failed deleting blob upload") return err } @@ -1139,7 +1139,7 @@ func (is *ImageStore) CheckBlob(repo string, digest godigest.Digest) (bool, int6 // Check blobs in cache dstRecord, err := is.checkCacheBlob(digest) if err != nil { - is.log.Error().Err(err).Str("digest", digest.String()).Msg("cache: not found") + is.log.Warn().Err(err).Str("digest", digest.String()).Msg("not found in cache") return false, -1, zerr.ErrBlobNotFound } @@ -1151,7 +1151,7 @@ func (is *ImageStore) CheckBlob(repo string, digest godigest.Digest) (bool, int6 // put deduped blob in cache if err := is.cache.PutBlob(digest, blobPath); err != nil { - is.log.Error().Err(err).Str("blobPath", blobPath).Msg("dedupe: unable to insert blob record") + is.log.Error().Err(err).Str("blobPath", blobPath).Msg("dedupe: failed to insert blob record") return false, -1, err } @@ -1185,7 +1185,7 @@ func (is *ImageStore) StatBlob(repo string, digest godigest.Digest) (bool, int64 // Check blobs in cache dstRecord, err := is.checkCacheBlob(digest) if err != nil { - is.log.Error().Err(err).Str("digest", digest.String()).Msg("cache: not found") + is.log.Warn().Err(err).Str("digest", digest.String()).Msg("not found in cache") return false, -1, time.Time{}, zerr.ErrBlobNotFound } @@ -1224,7 +1224,7 @@ func (is *ImageStore) checkCacheBlob(digest godigest.Digest) (string, error) { // the actual blob on disk may have been removed by GC, so sync the cache if err := is.cache.DeleteBlob(digest, dstRecord); err != nil { is.log.Error().Err(err).Str("digest", digest.String()).Str("blobPath", dstRecord). - Msg("unable to remove blob path from cache") + Msg("failed to remove blob path from cache") return "", err } @@ -1239,7 +1239,7 @@ func (is *ImageStore) checkCacheBlob(digest godigest.Digest) (string, error) { func (is *ImageStore) copyBlob(repo string, blobPath, dstRecord string) (int64, error) { if err := is.initRepo(repo); err != nil { - is.log.Error().Err(err).Str("repository", repo).Msg("unable to initialize an empty repo") + is.log.Error().Err(err).Str("repository", repo).Msg("failed to initialize an empty repo") return -1, err } @@ -1247,7 +1247,7 @@ func (is *ImageStore) copyBlob(repo string, blobPath, dstRecord string) (int64, _ = is.storeDriver.EnsureDir(filepath.Dir(blobPath)) if err := is.storeDriver.Link(dstRecord, blobPath); err != nil { - is.log.Error().Err(err).Str("blobPath", blobPath).Str("link", dstRecord).Msg("dedupe: unable to hard link") + is.log.Error().Err(err).Str("blobPath", blobPath).Str("link", dstRecord).Msg("dedupe: failed to hard link") return -1, zerr.ErrBlobNotFound } @@ -1288,7 +1288,7 @@ func (is *ImageStore) GetBlobPartial(repo string, digest godigest.Digest, mediaT // Check blobs in cache blobPath, err = is.checkCacheBlob(digest) if err != nil { - is.log.Error().Err(err).Str("digest", digest.String()).Msg("cache: not found") + is.log.Warn().Err(err).Str("digest", digest.String()).Msg("not found in cache") return nil, -1, -1, zerr.ErrBlobNotFound } @@ -1358,7 +1358,7 @@ func (is *ImageStore) GetBlob(repo string, digest godigest.Digest, mediaType str // Check blobs in cache dstRecord, err := is.checkCacheBlob(digest) if err != nil { - is.log.Error().Err(err).Str("digest", digest.String()).Msg("cache: not found") + is.log.Warn().Err(err).Str("digest", digest.String()).Msg("not found in cache") return nil, -1, zerr.ErrBlobNotFound } @@ -1411,7 +1411,7 @@ func (is *ImageStore) GetBlobContent(repo string, digest godigest.Digest) ([]byt // Check blobs in cache dstRecord, err := is.checkCacheBlob(digest) if err != nil { - is.log.Error().Err(err).Str("digest", digest.String()).Msg("cache: not found") + is.log.Warn().Err(err).Str("digest", digest.String()).Msg("not found in cache") return nil, zerr.ErrBlobNotFound } @@ -1476,13 +1476,13 @@ func (is *ImageStore) PutIndexContent(repo string, index ispec.Index) error { buf, err := json.Marshal(index) if err != nil { - is.log.Error().Err(err).Str("file", indexPath).Msg("unable to marshal JSON") + is.log.Error().Err(err).Str("file", indexPath).Msg("failed to marshal JSON") return err } if _, err = is.storeDriver.WriteFile(indexPath, buf); err != nil { - is.log.Error().Err(err).Str("file", indexPath).Msg("unable to write") + is.log.Error().Err(err).Str("file", indexPath).Msg("failed to write") return err } @@ -1521,14 +1521,14 @@ func (is *ImageStore) CleanupRepo(repo string, blobs []godigest.Digest, removeRe continue } - is.log.Error().Err(err).Str("repository", repo).Str("digest", digest.String()).Msg("unable to delete manifest") + is.log.Error().Err(err).Str("repository", repo).Str("digest", digest.String()).Msg("failed to delete manifest") return count, err } count++ } else { - is.log.Error().Err(err).Str("repository", repo).Str("digest", digest.String()).Msg("unable to delete blob") + is.log.Error().Err(err).Str("repository", repo).Str("digest", digest.String()).Msg("failed to delete blob") return count, err } @@ -1539,13 +1539,13 @@ func (is *ImageStore) CleanupRepo(repo string, blobs []godigest.Digest, removeRe blobUploads, err := is.storeDriver.List(path.Join(is.RootDir(), repo, storageConstants.BlobUploadDir)) if err != nil { - is.log.Debug().Str("repository", repo).Msg("unable to list .uploads/ dir") + is.log.Debug().Str("repository", repo).Msg("failed to list .uploads/ dir") } // if removeRepo flag is true and we cleanup all blobs and there are no blobs currently being uploaded. if removeRepo && count == len(blobs) && count > 0 && len(blobUploads) == 0 { if err := is.storeDriver.Delete(path.Join(is.rootDir, repo)); err != nil { - is.log.Error().Err(err).Str("repository", repo).Msg("unable to remove repo") + is.log.Error().Err(err).Str("repository", repo).Msg("failed to remove repo") return count, err } @@ -1572,7 +1572,7 @@ func (is *ImageStore) deleteBlob(repo string, digest godigest.Digest) error { if fmt.Sprintf("%v", is.cache) != fmt.Sprintf("%v", nil) { dstRecord, err := is.cache.GetBlob(digest) if err != nil && !errors.Is(err, zerr.ErrCacheMiss) { - is.log.Error().Err(err).Str("blobPath", dstRecord).Msg("dedupe: unable to lookup blob record") + is.log.Error().Err(err).Str("blobPath", dstRecord).Msg("dedupe: failed to lookup blob record") return err } @@ -1581,7 +1581,7 @@ func (is *ImageStore) deleteBlob(repo string, digest godigest.Digest) error { if ok := is.cache.HasBlob(digest, blobPath); ok { if err := is.cache.DeleteBlob(digest, blobPath); err != nil { is.log.Error().Err(err).Str("digest", digest.String()).Str("blobPath", blobPath). - Msg("unable to remove blob path from cache") + Msg("failed to remove blob path from cache") return err } @@ -1592,7 +1592,7 @@ func (is *ImageStore) deleteBlob(repo string, digest godigest.Digest) error { // get next candidate dstRecord, err := is.cache.GetBlob(digest) if err != nil && !errors.Is(err, zerr.ErrCacheMiss) { - is.log.Error().Err(err).Str("blobPath", dstRecord).Msg("dedupe: unable to lookup blob record") + is.log.Error().Err(err).Str("blobPath", dstRecord).Msg("dedupe: failed to lookup blob record") return err } @@ -1610,7 +1610,7 @@ func (is *ImageStore) deleteBlob(repo string, digest godigest.Digest) error { if binfo.Size() == 0 { if err := is.storeDriver.Move(blobPath, dstRecord); err != nil { - is.log.Error().Err(err).Str("blobPath", blobPath).Msg("unable to remove blob path") + is.log.Error().Err(err).Str("blobPath", blobPath).Msg("failed to remove blob path") return err } @@ -1622,7 +1622,7 @@ func (is *ImageStore) deleteBlob(repo string, digest godigest.Digest) error { } if err := is.storeDriver.Delete(blobPath); err != nil { - is.log.Error().Err(err).Str("blobPath", blobPath).Msg("unable to remove blob path") + is.log.Error().Err(err).Str("blobPath", blobPath).Msg("failed to remove blob path") return err } @@ -1732,7 +1732,7 @@ func (is *ImageStore) getOriginalBlob(digest godigest.Digest, duplicateBlobs []s originalBlob, err = is.checkCacheBlob(digest) if err != nil && !errors.Is(err, zerr.ErrBlobNotFound) && !errors.Is(err, zerr.ErrCacheMiss) { - is.log.Error().Err(err).Msg("rebuild dedupe: unable to find blob in cache") + is.log.Error().Err(err).Msg("rebuild dedupe: failed to find blob in cache") return originalBlob, err } @@ -1781,7 +1781,7 @@ func (is *ImageStore) dedupeBlobs(digest godigest.Digest, duplicateBlobs []strin if originalBlob == "" { originalBlob, err = is.getOriginalBlob(digest, duplicateBlobs) if err != nil { - is.log.Error().Err(err).Msg("rebuild dedupe: unable to find original blob") + is.log.Error().Err(err).Msg("rebuild dedupe: failed to find original blob") return zerr.ErrDedupeRebuild } @@ -1804,7 +1804,7 @@ func (is *ImageStore) dedupeBlobs(digest godigest.Digest, duplicateBlobs []strin // if we have an original blob cached then we can safely dedupe the rest of them if originalBlob != "" { if err := is.storeDriver.Link(originalBlob, blobPath); err != nil { - is.log.Error().Err(err).Str("path", blobPath).Msg("rebuild dedupe: unable to dedupe blob") + is.log.Error().Err(err).Str("path", blobPath).Msg("rebuild dedupe: failed to dedupe blob") return err } @@ -1833,7 +1833,7 @@ func (is *ImageStore) restoreDedupedBlobs(digest godigest.Digest, duplicateBlobs // first we need to find the original blob, either in cache or by checking each blob size originalBlob, err := is.getOriginalBlob(digest, duplicateBlobs) if err != nil { - is.log.Error().Err(err).Msg("rebuild dedupe: unable to find original blob") + is.log.Error().Err(err).Msg("rebuild dedupe: failed to find original blob") return zerr.ErrDedupeRebuild } diff --git a/pkg/storage/storage.go b/pkg/storage/storage.go index 56da20b74..9e1617880 100644 --- a/pkg/storage/storage.go +++ b/pkg/storage/storage.go @@ -64,7 +64,7 @@ func New(config *config.Config, linter common.Lint, metrics monitoring.MetricSer // Init a Storager from connection string. store, err := factory.Create(storeName, config.Storage.StorageDriver) if err != nil { - log.Error().Err(err).Str("rootDir", config.Storage.RootDirectory).Msg("unable to create s3 service") + log.Error().Err(err).Str("rootDir", config.Storage.RootDirectory).Msg("failed to create s3 service") return storeController, err } @@ -168,7 +168,7 @@ func getSubStore(cfg *config.Config, subPaths map[string]config.StorageConfig, // Init a Storager from connection string. store, err := factory.Create(storeName, storageConfig.StorageDriver) if err != nil { - log.Error().Err(err).Str("rootDir", storageConfig.RootDirectory).Msg("Unable to create s3 service") + log.Error().Err(err).Str("rootDir", storageConfig.RootDirectory).Msg("failed to create s3 service") return nil, err } diff --git a/pkg/test/oci-utils/oci_layout.go b/pkg/test/oci-utils/oci_layout.go index 7d3528a5c..3a11d79cc 100644 --- a/pkg/test/oci-utils/oci_layout.go +++ b/pkg/test/oci-utils/oci_layout.go @@ -566,7 +566,7 @@ func (olu BaseOciLayoutUtils) ExtractImageDetails( ) { manifest, dig, err := olu.GetImageManifest(repo, tag) if err != nil { - log.Error().Err(err).Msg("Could not retrieve image ispec manifest") + log.Error().Err(err).Msg("failed to retrieve image manifest") return "", nil, nil, err } @@ -575,7 +575,7 @@ func (olu BaseOciLayoutUtils) ExtractImageDetails( imageConfig, err := olu.GetImageConfigInfo(repo, digest) if err != nil { - log.Error().Err(err).Msg("Could not retrieve image config") + log.Error().Err(err).Msg("failed to retrieve image config") return "", nil, nil, err }