From 7ea00c5d23845f7af73843611db44f708599afe3 Mon Sep 17 00:00:00 2001 From: Ramkumar Chinchani Date: Fri, 6 Oct 2023 22:01:29 +0000 Subject: [PATCH] fix(log): refactor some logs Signed-off-by: Ramkumar Chinchani --- .github/workflows/golangci-lint.yaml | 10 +++ pkg/api/routes.go | 4 - pkg/api/session.go | 4 +- pkg/cli/server/root.go | 22 ++--- pkg/exporter/api/exporter.go | 14 ++-- pkg/exporter/cli/cli.go | 14 +++- pkg/extensions/extension_metrics.go | 4 +- pkg/extensions/extension_scrub.go | 2 +- pkg/extensions/extension_search.go | 4 +- pkg/extensions/extension_sync.go | 2 +- pkg/extensions/monitoring/minimal.go | 8 +- pkg/extensions/search/cve/cve.go | 4 +- pkg/extensions/search/cve/scan.go | 14 ++-- pkg/extensions/search/cve/trivy/scanner.go | 14 ++-- pkg/extensions/sync/references/references.go | 4 +- pkg/log/guidelines.md | 2 + pkg/meta/boltdb/boltdb.go | 4 +- pkg/meta/parse.go | 44 +++++----- pkg/scheduler/scheduler.go | 16 ++-- pkg/storage/common/common.go | 4 +- pkg/storage/imagestore/imagestore.go | 88 ++++++++++---------- pkg/storage/storage.go | 4 +- 22 files changed, 151 insertions(+), 135 deletions(-) diff --git a/.github/workflows/golangci-lint.yaml b/.github/workflows/golangci-lint.yaml index 631bcd7d65..a5db08b1a1 100644 --- a/.github/workflows/golangci-lint.yaml +++ b/.github/workflows/golangci-lint.yaml @@ -44,3 +44,13 @@ jobs: - name: Run linter from make target run: | make check + - name: Run log linter + run: | + set +e + + # log messages should never start upper-cased + find . -name '*.go' | grep -v '_test.go' | xargs grep -n "Msg(\"[A-Z]" + if [ $? -eq 0 ]; then + exit 1 + fi + exit 0 diff --git a/pkg/api/routes.go b/pkg/api/routes.go index 1db9a8044b..ac4941d0fb 100644 --- a/pkg/api/routes.go +++ b/pkg/api/routes.go @@ -1221,8 +1221,6 @@ func (rh *RouteHandler) CreateBlobUpload(response http.ResponseWriter, request * return } - rh.c.Log.Info().Int64("r.ContentLength", request.ContentLength).Msg("DEBUG") - digestStr := digests[0] digest := godigest.Digest(digestStr) @@ -1494,8 +1492,6 @@ func (rh *RouteHandler) UpdateBlobUpload(response http.ResponseWriter, request * return } - rh.c.Log.Info().Int64("r.ContentLength", request.ContentLength).Msg("DEBUG") - contentPresent := true contentLen, err := strconv.ParseInt(request.Header.Get("Content-Length"), 10, 64) diff --git a/pkg/api/session.go b/pkg/api/session.go index c5f46f2261..35b32682d8 100644 --- a/pkg/api/session.go +++ b/pkg/api/session.go @@ -121,7 +121,8 @@ func SessionLogger(ctlr *Controller) mux.MiddlewareFunc { monitoring.ObserveHTTPMethodLatency(ctlr.Metrics, method, latency) // histogram } - log.Str("clientIP", clientIP). + log.Str("component", "session"). + Str("clientIP", clientIP). Str("method", method). Str("path", path). Int("statusCode", statusCode). @@ -172,6 +173,7 @@ func SessionAuditLogger(audit *log.Logger) mux.MiddlewareFunc { method == http.MethodPatch || method == http.MethodDelete) && (statusCode == http.StatusOK || statusCode == http.StatusCreated || statusCode == http.StatusAccepted) { audit.Info(). + Str("component", "session"). Str("clientIP", clientIP). Str("subject", username). Str("action", method). diff --git a/pkg/cli/server/root.go b/pkg/cli/server/root.go index c4088fa806..689b3d5698 100644 --- a/pkg/cli/server/root.go +++ b/pkg/cli/server/root.go @@ -71,7 +71,7 @@ func newServeCmd(conf *config.Config) *cobra.Command { } if err := ctlr.Run(reloaderCtx); err != nil { - ctlr.Log.Fatal().Err(err).Msg("unable to start controller, exiting") + ctlr.Log.Fatal().Err(err).Msg("failed to start controller, exiting") } }, } @@ -105,15 +105,15 @@ func newScrubCmd(conf *config.Config) *cobra.Command { fmt.Sprintf("http://%s/v2", net.JoinHostPort(conf.HTTP.Address, conf.HTTP.Port)), nil) if err != nil { - log.Error().Err(err).Msg("unable to create a new http request") + log.Error().Err(err).Msg("failed to create a new http request") panic(err) } response, err := http.DefaultClient.Do(req) if err == nil { response.Body.Close() - log.Warn().Msg("The server is running, in order to perform the scrub command the server should be shut down") - panic("Error: server is running") + log.Warn().Msg("server is running, in order to perform the scrub command the server should be shut down") + panic("server is running") } else { // server is down ctlr := api.NewController(conf) @@ -146,11 +146,11 @@ func newVerifyCmd(conf *config.Config) *cobra.Command { Run: func(cmd *cobra.Command, args []string) { if len(args) > 0 { if err := LoadConfiguration(conf, args[0]); err != nil { - log.Error().Str("config", args[0]).Msg("Config file is invalid") + log.Error().Str("config", args[0]).Msg("config file is invalid") panic(err) } - log.Info().Str("config", args[0]).Msg("Config file is valid") + log.Info().Str("config", args[0]).Msg("config file is valid") } }, } @@ -291,11 +291,11 @@ func validateCacheConfig(cfg *config.Config, log zlog.Logger) error { func validateExtensionsConfig(cfg *config.Config, log zlog.Logger) error { if cfg.Extensions != nil && cfg.Extensions.Mgmt != nil { - log.Warn().Msg("The mgmt extensions configuration option has been made redundant and will be ignored.") + log.Warn().Msg("mgmt extensions configuration option has been made redundant and will be ignored.") } if cfg.Extensions != nil && cfg.Extensions.APIKey != nil { - log.Warn().Msg("The apikey extension configuration will be ignored as API keys " + + log.Warn().Msg("apikey extension configuration will be ignored as API keys " + "are now configurable in the HTTP settings.") } @@ -303,7 +303,7 @@ func validateExtensionsConfig(cfg *config.Config, log zlog.Logger) error { // it would make sense to also check for mgmt and user prefs to be enabled, // but those are both enabled by having the search and ui extensions enabled if cfg.Extensions.Search == nil || !*cfg.Extensions.Search.Enable { - log.Error().Err(zerr.ErrBadConfig).Msg("UI functionality can't be used without search extension.") + log.Error().Err(zerr.ErrBadConfig).Msg("ui functionality can't be used without search extension.") return zerr.ErrBadConfig } @@ -312,7 +312,7 @@ func validateExtensionsConfig(cfg *config.Config, log zlog.Logger) error { //nolint:lll if cfg.Storage.StorageDriver != nil && cfg.Extensions != nil && cfg.Extensions.Search != nil && cfg.Extensions.Search.Enable != nil && *cfg.Extensions.Search.Enable && cfg.Extensions.Search.CVE != nil { - log.Error().Err(zerr.ErrBadConfig).Msg("CVE functionality can't be used with remote storage. Please disable CVE") + log.Error().Err(zerr.ErrBadConfig).Msg("cve functionality can't be used with remote storage. Please disable cve") return zerr.ErrBadConfig } @@ -321,7 +321,7 @@ func validateExtensionsConfig(cfg *config.Config, log zlog.Logger) error { //nolint:lll if subPath.StorageDriver != nil && cfg.Extensions != nil && cfg.Extensions.Search != nil && cfg.Extensions.Search.Enable != nil && *cfg.Extensions.Search.Enable && cfg.Extensions.Search.CVE != nil { - log.Error().Err(zerr.ErrBadConfig).Msg("CVE functionality can't be used with remote storage. Please disable CVE") + log.Error().Err(zerr.ErrBadConfig).Msg("cveCVE functionality can't be used with remote storage. Please disable cve") return zerr.ErrBadConfig } diff --git a/pkg/exporter/api/exporter.go b/pkg/exporter/api/exporter.go index 59a8991e4d..16dae68c6b 100644 --- a/pkg/exporter/api/exporter.go +++ b/pkg/exporter/api/exporter.go @@ -101,7 +101,7 @@ func (zc Collector) Collect(ch chan<- prometheus.Metric) { func panicOnDuplicateMetricName(m map[string]*prometheus.Desc, name string, log log.Logger) { if _, present := m[name]; present { - log.Fatal().Msg("Duplicate keys: metric " + name + " already present") + log.Fatal().Str("metric", name).Msg("duplicate key found") } } @@ -180,16 +180,16 @@ func runExporter(c *Controller) { err := prometheus.Register(GetCollector(c)) if err != nil { - c.Log.Error().Err(err).Msg("Expected error in testing") + c.Log.Debug().Err(err).Msg("ignoring error") } http.Handle(c.Config.Exporter.Metrics.Path, promhttp.Handler()) - c.Log.Info().Str("exporter addr", exporterAddr). - Str("exporter metrics path", c.Config.Exporter.Metrics.Path). - Msg("Exporter is listening on exporter addr & exposes metrics on exporter metrics path") + c.Log.Info().Str("addr", exporterAddr). + Str("path", c.Config.Exporter.Metrics.Path). + Msg("exporter listening") serverAddr := fmt.Sprintf("%s://%s:%s", c.Config.Server.Protocol, c.Config.Server.Host, c.Config.Server.Port) - c.Log.Info().Str("serverAddr", serverAddr).Msg("Scraping metrics") - c.Log.Fatal().Err(server.ListenAndServe()).Msg("Exporter stopped") + c.Log.Info().Str("serverAddr", serverAddr).Msg("scraping metrics") + c.Log.Fatal().Err(server.ListenAndServe()).Msg("exporter stopped") } diff --git a/pkg/exporter/cli/cli.go b/pkg/exporter/cli/cli.go index 27a5471974..e944d39d10 100644 --- a/pkg/exporter/cli/cli.go +++ b/pkg/exporter/cli/cli.go @@ -60,18 +60,24 @@ func loadConfiguration(config *api.Config, configPath string) { viper.SetConfigFile(configPath) if err := viper.ReadInConfig(); err != nil { - log.Error().Err(err).Msg("Error while reading configuration") + log.Error().Err(err).Str("config", configPath).Msg("failed to read configuration") panic(err) } metaData := &mapstructure.Metadata{} if err := viper.Unmarshal(&config, metadataConfig(metaData)); err != nil { - log.Error().Err(err).Msg("Error while unmarshaling new config") + log.Error().Err(err).Str("config", configPath).Msg("failed to unmarshal config") panic(err) } - if len(metaData.Keys) == 0 || len(metaData.Unused) > 0 { - log.Error().Err(zerr.ErrBadConfig).Msg("Bad configuration, retry writing it") + if len(metaData.Keys) == 0 { + log.Error().Err(zerr.ErrBadConfig).Str("config", configPath).Msg("bad configuration") + panic(zerr.ErrBadConfig) + } + + if len(metaData.Unused) > 0 { + log.Error().Err(zerr.ErrBadConfig).Interface("unknown fields", metaData.Unused). + Str("config", configPath).Msg("bad configuration") panic(zerr.ErrBadConfig) } } diff --git a/pkg/extensions/extension_metrics.go b/pkg/extensions/extension_metrics.go index 54e71982fa..4978437d0d 100644 --- a/pkg/extensions/extension_metrics.go +++ b/pkg/extensions/extension_metrics.go @@ -18,10 +18,10 @@ func EnableMetricsExtension(config *config.Config, log log.Logger, rootDir strin if config.Extensions.Metrics.Prometheus.Path == "" { config.Extensions.Metrics.Prometheus.Path = "/metrics" - log.Warn().Msg("Prometheus instrumentation Path not set, changing to '/metrics'.") + log.Warn().Msg("prometheus instrumentation path not set, changing to '/metrics'.") } } else { - log.Info().Msg("Metrics config not provided, skipping Metrics config update") + log.Info().Msg("metrics config not provided, skipping metrics config update") } } diff --git a/pkg/extensions/extension_scrub.go b/pkg/extensions/extension_scrub.go index bc61ad1fa1..6cb7b4787b 100644 --- a/pkg/extensions/extension_scrub.go +++ b/pkg/extensions/extension_scrub.go @@ -25,7 +25,7 @@ func EnableScrubExtension(config *config.Config, log log.Logger, storeController if config.Extensions.Scrub.Interval < minScrubInterval { config.Extensions.Scrub.Interval = minScrubInterval - log.Warn().Msg("Scrub interval set to too-short interval < 2h, changing scrub duration to 2 hours and continuing.") //nolint:lll // gofumpt conflicts with lll + log.Warn().Msg("scrub interval set to too-short interval < 2h, changing scrub duration to 2 hours and continuing.") //nolint:lll // gofumpt conflicts with lll } generator := &taskGenerator{ diff --git a/pkg/extensions/extension_search.go b/pkg/extensions/extension_search.go index ecfa04612e..59069710a8 100644 --- a/pkg/extensions/extension_search.go +++ b/pkg/extensions/extension_search.go @@ -59,7 +59,7 @@ func EnableSearchExtension(conf *config.Config, storeController storage.StoreCon func downloadTrivyDB(interval time.Duration, sch *scheduler.Scheduler, cveScanner CveScanner, log log.Logger) { generator := cveinfo.NewDBUpdateTaskGenerator(interval, cveScanner, log) - log.Info().Msg("Submitting CVE DB update scheduler") + log.Info().Msg("submitting CVE DB update scheduler") sch.SubmitGenerator(generator, interval, scheduler.HighPriority) } @@ -68,7 +68,7 @@ func startScanner(interval time.Duration, metaDB mTypes.MetaDB, sch *scheduler.S ) { generator := cveinfo.NewScanTaskGenerator(metaDB, cveScanner, log) - log.Info().Msg("Submitting CVE scan scheduler") + log.Info().Msg("submitting CVE scan scheduler") sch.SubmitGenerator(generator, interval, scheduler.MediumPriority) } diff --git a/pkg/extensions/extension_sync.go b/pkg/extensions/extension_sync.go index 852112dc0d..a7f141e4a1 100644 --- a/pkg/extensions/extension_sync.go +++ b/pkg/extensions/extension_sync.go @@ -64,7 +64,7 @@ func EnableSyncExtension(config *config.Config, metaDB mTypes.MetaDB, return onDemand, nil } - log.Info().Msg("Sync registries config not provided or disabled, skipping sync") + log.Info().Msg("sync config not provided or disabled, so not enabling sync") return nil, nil //nolint: nilnil } diff --git a/pkg/extensions/monitoring/minimal.go b/pkg/extensions/monitoring/minimal.go index 54efb5d67d..c54f695367 100644 --- a/pkg/extensions/monitoring/minimal.go +++ b/pkg/extensions/monitoring/minimal.go @@ -314,7 +314,7 @@ func (ms *metricServer) CounterInc(cv *CounterValue) { if err != nil { // The last thing we want is to panic/stop the server due to instrumentation // thus log a message (should be detected during development of new metrics) - ms.log.Error().Err(err).Msg("Instrumentation error") + ms.log.Error().Err(err).Msg("failed due to instrumentation error") return } @@ -334,7 +334,7 @@ func (ms *metricServer) GaugeSet(gv *GaugeValue) { err := sanityChecks(gv.Name, labels, ok, gv.LabelNames, gv.LabelValues) if err != nil { - ms.log.Error().Err(err).Msg("Instrumentation error") + ms.log.Error().Err(err).Msg("failed due to instrumentation error") return } @@ -353,7 +353,7 @@ func (ms *metricServer) SummaryObserve(sv *SummaryValue) { err := sanityChecks(sv.Name, labels, ok, sv.LabelNames, sv.LabelValues) if err != nil { - ms.log.Error().Err(err).Msg("Instrumentation error") + ms.log.Error().Err(err).Msg("failed due to instrumentation error") return } @@ -374,7 +374,7 @@ func (ms *metricServer) HistogramObserve(hv *HistogramValue) { err := sanityChecks(hv.Name, labels, ok, hv.LabelNames, hv.LabelValues) if err != nil { - ms.log.Error().Err(err).Msg("Instrumentation error") + ms.log.Error().Err(err).Msg("failed due to instrumentation error") return } diff --git a/pkg/extensions/search/cve/cve.go b/pkg/extensions/search/cve/cve.go index f4747cff7c..f14d31078f 100644 --- a/pkg/extensions/search/cve/cve.go +++ b/pkg/extensions/search/cve/cve.go @@ -199,10 +199,10 @@ func (cveinfo BaseCveInfo) GetImageListWithCVEFixed(repo, cveID string) ([]cvemo if len(vulnerableTags) != 0 { cveinfo.Log.Info().Str("repository", repo).Str("cve-id", cveID). - Interface("vulnerableTags", vulnerableTags).Msg("Vulnerable tags") + Interface("tags", vulnerableTags).Msg("vulnerable tags") fixedTags = GetFixedTags(allTags, vulnerableTags) cveinfo.Log.Info().Str("repository", repo).Str("cve-id", cveID). - Interface("fixedTags", fixedTags).Msg("Fixed tags") + Interface("tags", fixedTags).Msg("fixed tags") } else { cveinfo.Log.Info().Str("repository", repo).Str("cve-id", cveID). Msg("image does not contain any tag that have given cve") diff --git a/pkg/extensions/search/cve/scan.go b/pkg/extensions/search/cve/scan.go index cacf084ad7..66607bb063 100644 --- a/pkg/extensions/search/cve/scan.go +++ b/pkg/extensions/search/cve/scan.go @@ -15,10 +15,12 @@ import ( func NewScanTaskGenerator( metaDB mTypes.MetaDB, scanner Scanner, - log log.Logger, + logC log.Logger, ) scheduler.TaskGenerator { + sublogger := logC.With().Str("component", "search").Logger() + return &scanTaskGenerator{ - log: log, + log: log.Logger{Logger: sublogger}, metaDB: metaDB, scanner: scanner, lock: &sync.Mutex{}, @@ -127,13 +129,13 @@ func (gen *scanTaskGenerator) Next() (scheduler.Task, error) { if err != nil { // Do not crash the generator for potential repodb inconistencies // as there may be scannable images not yet scanned - gen.log.Warn().Err(err).Msg("Scheduled CVE scan: error while obtaining repo metadata") + gen.log.Warn().Err(err).Msg("failed to obtain repo metadata during scheduled CVE scan") } // no reposMeta are returned, all results are in already in cache // or manifests cannot be scanned if len(reposMeta) == 0 { - gen.log.Info().Msg("Scheduled CVE scan: finished for available images") + gen.log.Info().Msg("finished scanning available images during scheduled CVE scan") gen.done = true @@ -195,13 +197,13 @@ func (st *scanTask) DoWork(ctx context.Context) error { // We cache the results internally in the scanner // so we can discard the actual results for now if _, err := st.generator.scanner.ScanImage(image); err != nil { - st.generator.log.Error().Err(err).Str("image", image).Msg("Scheduled CVE scan errored for image") + st.generator.log.Error().Err(err).Str("image", image).Msg("scheduled CVE scan failed for image") st.generator.addError(st.digest, err) return err } - st.generator.log.Debug().Str("image", image).Msg("Scheduled CVE scan completed successfully for image") + st.generator.log.Debug().Str("image", image).Msg("scheduled CVE scan completed successfully for image") return nil } diff --git a/pkg/extensions/search/cve/trivy/scanner.go b/pkg/extensions/search/cve/trivy/scanner.go index 86e0a2f175..32e90ef1ab 100644 --- a/pkg/extensions/search/cve/trivy/scanner.go +++ b/pkg/extensions/search/cve/trivy/scanner.go @@ -249,7 +249,7 @@ func (scanner Scanner) isManifestScanable(digestStr string) (bool, error) { err = json.Unmarshal(manifestData.ManifestBlob, &manifestContent) if err != nil { - scanner.log.Error().Err(err).Msg("unable to unmashal manifest blob") + scanner.log.Error().Err(err).Msg("failed to unmashal manifest blob") return false, zerr.ErrScanNotSupported } @@ -352,7 +352,7 @@ func (scanner Scanner) ScanImage(image string) (map[string]cvemodel.CVE, error) } if err != nil { - scanner.log.Error().Err(err).Str("image", originalImageInput).Msg("unable to scan image") + scanner.log.Error().Err(err).Str("image", originalImageInput).Msg("failed to scan image") return map[string]cvemodel.CVE{}, err } @@ -507,18 +507,16 @@ func (scanner Scanner) UpdateDB() error { } func (scanner Scanner) updateDB(dbDir string) error { - scanner.log.Debug().Str("dbDir", dbDir).Msg("Download Trivy DB to destination dir") - ctx := context.Background() registryOpts := fanalTypes.RegistryOptions{Insecure: false} - scanner.log.Debug().Str("dbDir", dbDir).Msg("Started downloading Trivy DB to destination dir") + scanner.log.Debug().Str("dbDir", dbDir).Msg("started downloading trivy-db to destination dir") err := operation.DownloadDB(ctx, "dev", dbDir, scanner.dbRepository, false, false, registryOpts) if err != nil { scanner.log.Error().Err(err).Str("dbDir", dbDir). - Str("dbRepository", scanner.dbRepository).Msg("Error downloading Trivy DB to destination dir") + Str("dbRepository", scanner.dbRepository).Msg("failed to download trivy-db to destination dir") return err } @@ -528,13 +526,13 @@ func (scanner Scanner) updateDB(dbDir string) error { if err := javadb.Update(); err != nil { scanner.log.Error().Err(err).Str("dbDir", dbDir). - Str("javaDBRepository", scanner.javaDBRepository).Msg("Error downloading Trivy Java DB to destination dir") + Str("javaDBRepository", scanner.javaDBRepository).Msg("failed to download trivy-java-db to destination dir") return err } } - scanner.log.Debug().Str("dbDir", dbDir).Msg("Finished downloading Trivy DB to destination dir") + scanner.log.Debug().Str("dbDir", dbDir).Msg("finished downloading trivy-db to destination dir") return nil } diff --git a/pkg/extensions/sync/references/references.go b/pkg/extensions/sync/references/references.go index ce1acf29ab..971bf9d74f 100644 --- a/pkg/extensions/sync/references/references.go +++ b/pkg/extensions/sync/references/references.go @@ -79,7 +79,7 @@ func (refs References) syncAll(ctx context.Context, localRepo, upstreamRepo, for _, ref := range refs.referenceList { syncedRefsDigests, err = ref.SyncReferences(ctx, localRepo, upstreamRepo, subjectDigestStr) if err != nil { - refs.log.Error().Err(err). + refs.log.Warn().Err(err). Str("reference type", ref.Name()). Str("image", fmt.Sprintf("%s:%s", upstreamRepo, subjectDigestStr)). Msg("couldn't sync image referrer") @@ -108,7 +108,7 @@ func (refs References) SyncReference(ctx context.Context, localRepo, upstreamRep if ref.Name() == referenceType { syncedRefsDigests, err = ref.SyncReferences(ctx, localRepo, upstreamRepo, subjectDigestStr) if err != nil { - refs.log.Error().Err(err). + refs.log.Warn().Err(err). Str("reference type", ref.Name()). Str("image", fmt.Sprintf("%s:%s", upstreamRepo, subjectDigestStr)). Msg("couldn't sync image referrer") diff --git a/pkg/log/guidelines.md b/pkg/log/guidelines.md index a01c660751..3d62ee3e97 100644 --- a/pkg/log/guidelines.md +++ b/pkg/log/guidelines.md @@ -61,3 +61,5 @@ For example, lookup a cache (fast path) and it throws a not-found error, and we expect to handle it and perform a slow path lookup. Instead of logging the lookup failure at ERROR level, it may be more appropriate to log at DEBUG level and then handle the error. + +Also, instead of `Msg("error at something")` standardize on `Msg("failed at something")`. diff --git a/pkg/meta/boltdb/boltdb.go b/pkg/meta/boltdb/boltdb.go index ca14e456da..3cc5cec703 100644 --- a/pkg/meta/boltdb/boltdb.go +++ b/pkg/meta/boltdb/boltdb.go @@ -1118,7 +1118,7 @@ func (bdw *BoltDB) SearchRepos(ctx context.Context, searchText string, indexDataMap[indexDigest] = indexData default: - bdw.Log.Error().Str("mediaType", descriptor.MediaType).Msg("Unsupported media type") + bdw.Log.Error().Str("mediaType", descriptor.MediaType).Msg("unsupported media type") continue } @@ -1315,7 +1315,7 @@ func (bdw *BoltDB) FilterTags(ctx context.Context, filterFunc mTypes.FilterFunc, matchedTags[tag] = descriptor } default: - bdw.Log.Error().Str("mediaType", descriptor.MediaType).Msg("Unsupported media type") + bdw.Log.Error().Str("mediaType", descriptor.MediaType).Msg("unsupported media type") continue } diff --git a/pkg/meta/parse.go b/pkg/meta/parse.go index d9317f65cf..3d639255f2 100644 --- a/pkg/meta/parse.go +++ b/pkg/meta/parse.go @@ -20,13 +20,13 @@ import ( // ParseStorage will sync all repos found in the rootdirectory of the oci layout that zot was deployed on with the // ParseStorage database. func ParseStorage(metaDB mTypes.MetaDB, storeController storage.StoreController, log log.Logger) error { - log.Info().Msg("Started parsing storage and updating MetaDB") + log.Info().Str("component", "metadb").Msg("parsing storage and initializing") allRepos, err := getAllRepos(storeController) if err != nil { rootDir := storeController.DefaultStore.RootDir() - log.Error().Err(err).Str("rootDir", rootDir). - Msg("load-local-layout: failed to get all repo names present under rootDir") + log.Error().Err(err).Str("component", "metadb").Str("rootDir", rootDir). + Msg("failed to get all repo names present under rootDir") return err } @@ -34,13 +34,13 @@ func ParseStorage(metaDB mTypes.MetaDB, storeController storage.StoreController, for _, repo := range allRepos { err := ParseRepo(repo, metaDB, storeController, log) if err != nil { - log.Error().Err(err).Str("repository", repo).Msg("load-local-layout: failed to sync repo") + log.Error().Err(err).Str("component", "metadb").Str("repository", repo).Msg("failed to parse repo") return err } } - log.Info().Msg("Done parsing storage and updating MetaDB") + log.Info().Str("component", "metadb").Msg("successfully initialized") return nil } @@ -56,7 +56,7 @@ func ParseRepo(repo string, metaDB mTypes.MetaDB, storeController storage.StoreC indexBlob, err := imageStore.GetIndexContent(repo) if err != nil { - log.Error().Err(err).Str("repository", repo).Msg("load-repo: failed to read index.json for repo") + log.Error().Err(err).Str("repository", repo).Msg("failed to read index.json for repo") return err } @@ -65,14 +65,14 @@ func ParseRepo(repo string, metaDB mTypes.MetaDB, storeController storage.StoreC err = json.Unmarshal(indexBlob, &indexContent) if err != nil { - log.Error().Err(err).Str("repository", repo).Msg("load-repo: failed to unmarshal index.json for repo") + log.Error().Err(err).Str("repository", repo).Msg("failed to unmarshal index.json for repo") return err } err = resetRepoMeta(repo, metaDB, log) if err != nil && !errors.Is(err, zerr.ErrRepoMetaNotFound) { - log.Error().Err(err).Str("repository", repo).Msg("load-repo: failed to reset tag field in RepoMetadata for repo") + log.Error().Err(err).Str("repository", repo).Msg("failed to reset tag field in RepoMetadata for repo") return err } @@ -82,7 +82,7 @@ func ParseRepo(repo string, metaDB mTypes.MetaDB, storeController storage.StoreC descriptorBlob, err := getCachedBlob(repo, descriptor, metaDB, imageStore, log) if err != nil { - log.Error().Err(err).Msg("load-repo: error checking manifestMeta in MetaDB") + log.Error().Err(err).Msg("error checking manifestMeta in MetaDB") return err } @@ -91,7 +91,7 @@ func ParseRepo(repo string, metaDB mTypes.MetaDB, storeController storage.StoreC descriptorBlob, tag) if err != nil { log.Error().Err(err).Str("repository", repo).Str("tag", tag). - Msg("load-repo: failed checking if image is signature for specified image") + Msg("failed checking if image is signature for specified image") return err } @@ -112,7 +112,7 @@ func ParseRepo(repo string, metaDB mTypes.MetaDB, storeController storage.StoreC if err != nil { log.Error().Err(err).Str("repository", repo).Str("tag", tag). Str("manifestDigest", signedManifestDigest.String()). - Msg("load-repo: failed set signature meta for signed image") + Msg("failed set signature meta for signed image") return err } @@ -120,7 +120,7 @@ func ParseRepo(repo string, metaDB mTypes.MetaDB, storeController storage.StoreC err = metaDB.UpdateSignaturesValidity(repo, signedManifestDigest) if err != nil { log.Error().Err(err).Str("repository", repo).Str("reference", tag).Str("digest", signedManifestDigest.String()).Msg( - "load-repo: failed verify signatures validity for signed image") + "failed verify signatures validity for signed image") return err } @@ -138,7 +138,7 @@ func ParseRepo(repo string, metaDB mTypes.MetaDB, storeController storage.StoreC imageStore, metaDB, log) if err != nil { log.Error().Err(err).Str("repository", repo).Str("tag", tag). - Msg("load-repo: failed to set metadata for image") + Msg("failed to set metadata for image") return err } @@ -152,13 +152,13 @@ func ParseRepo(repo string, metaDB mTypes.MetaDB, storeController storage.StoreC func resetRepoMeta(repo string, metaDB mTypes.MetaDB, log log.Logger) error { repoMeta, err := metaDB.GetRepoMeta(repo) if err != nil && !errors.Is(err, zerr.ErrRepoMetaNotFound) { - log.Error().Err(err).Str("repository", repo).Msg("load-repo: failed to get RepoMeta for repo") + log.Error().Err(err).Str("repository", repo).Msg("failed to get RepoMeta for repo") return err } if errors.Is(err, zerr.ErrRepoMetaNotFound) { - log.Info().Str("repository", repo).Msg("load-repo: RepoMeta not found for repo, new RepoMeta will be created") + log.Info().Str("repository", repo).Msg("RepoMeta not found for repo, new RepoMeta will be created") return nil } @@ -204,7 +204,7 @@ func getCachedBlob(repo string, descriptor ispec.Descriptor, metaDB mTypes.MetaD descriptorBlob, _, _, err = imageStore.GetImageManifest(repo, digest.String()) if err != nil { log.Error().Err(err).Str("repository", repo).Str("digest", digest.String()). - Msg("load-repo: failed to get blob for image") + Msg("failed to get blob for image") return nil, err } @@ -251,7 +251,7 @@ func getCosignSignatureLayersInfo( var manifestContent ispec.Manifest if err := json.Unmarshal(manifestBlob, &manifestContent); err != nil { log.Error().Err(err).Str("repository", repo).Str("reference", tag).Str("digest", manifestDigest).Msg( - "load-repo: unable to marshal blob index") + "failed to marshal blob index") return layers, err } @@ -265,7 +265,7 @@ func getCosignSignatureLayersInfo( layerContent, err := imageStore.GetBlobContent(repo, layer.Digest) if err != nil { log.Error().Err(err).Str("repository", repo).Str("reference", tag).Str("layerDigest", layer.Digest.String()).Msg( - "load-repo: unable to get cosign signature layer content") + "failed to get cosign signature layer content") return layers, err } @@ -273,7 +273,7 @@ func getCosignSignatureLayersInfo( layerSigKey, ok := layer.Annotations[zcommon.CosignSigKey] if !ok { log.Error().Err(err).Str("repository", repo).Str("reference", tag).Str("layerDigest", layer.Digest.String()).Msg( - "load-repo: unable to get specific annotation of cosign signature") + "failed to get specific annotation of cosign signature") } layers = append(layers, mTypes.LayerInfo{ @@ -294,14 +294,14 @@ func getNotationSignatureLayersInfo( var manifestContent ispec.Manifest if err := json.Unmarshal(manifestBlob, &manifestContent); err != nil { log.Error().Err(err).Str("repository", repo).Str("reference", manifestDigest).Msg( - "load-repo: unable to marshal blob index") + "failed to marshal blob index") return layers, err } if len(manifestContent.Layers) != 1 { log.Error().Err(zerr.ErrBadManifest).Str("repository", repo).Str("reference", manifestDigest). - Msg("load-repo: notation signature manifest requires exactly one layer but it does not") + Msg("notation signature manifest requires exactly one layer but it does not") return layers, zerr.ErrBadManifest } @@ -316,7 +316,7 @@ func getNotationSignatureLayersInfo( layerContent, err := imageStore.GetBlobContent(repo, layer) if err != nil { log.Error().Err(err).Str("repository", repo).Str("reference", manifestDigest).Str("layerDigest", layer.String()).Msg( - "load-repo: unable to get notation signature blob content") + "failed to get notation signature blob content") return layers, err } diff --git a/pkg/scheduler/scheduler.go b/pkg/scheduler/scheduler.go index cc771d065c..c53d1017f8 100644 --- a/pkg/scheduler/scheduler.go +++ b/pkg/scheduler/scheduler.go @@ -101,13 +101,13 @@ func (scheduler *Scheduler) poolWorker(ctx context.Context, numWorkers int, task for i := 0; i < numWorkers; i++ { go func(workerID int) { for task := range tasks { - scheduler.log.Debug().Int("worker", workerID).Msg("scheduler: starting task") + scheduler.log.Debug().Int("worker", workerID).Msg("starting task") if err := task.DoWork(ctx); err != nil { - scheduler.log.Error().Int("worker", workerID).Err(err).Msg("scheduler: error while executing task") + scheduler.log.Error().Int("worker", workerID).Err(err).Msg("error while executing task") } - scheduler.log.Debug().Int("worker", workerID).Msg("scheduler: finished task") + scheduler.log.Debug().Int("worker", workerID).Msg("finished task") } }(i + 1) } @@ -129,7 +129,7 @@ func (scheduler *Scheduler) RunScheduler(ctx context.Context) { close(tasksWorker) close(scheduler.stopCh) - scheduler.log.Debug().Msg("scheduler: received stop signal, exiting...") + scheduler.log.Debug().Msg("received stop signal, exiting...") return default: @@ -138,7 +138,7 @@ func (scheduler *Scheduler) RunScheduler(ctx context.Context) { task := scheduler.getTask() if task != nil { // push tasks into worker pool - scheduler.log.Debug().Msg("scheduler: pushing task into worker pool") + scheduler.log.Debug().Msg("pushing task into worker pool") tasksWorker <- task } i++ @@ -162,7 +162,7 @@ func (scheduler *Scheduler) pushReadyGenerators() { scheduler.waitingGenerators = append(scheduler.waitingGenerators[:i], scheduler.waitingGenerators[i+1:]...) modified = true - scheduler.log.Debug().Msg("scheduler: waiting generator is ready, pushing to ready generators") + scheduler.log.Debug().Msg("waiting generator is ready, pushing to ready generators") break } @@ -261,7 +261,7 @@ func (scheduler *Scheduler) SubmitTask(task Task, priority Priority) { case <-scheduler.stopCh: return case tasksQ <- task: - scheduler.log.Info().Msg("scheduler: adding a new task") + scheduler.log.Info().Msg("adding a new task") } } @@ -308,7 +308,7 @@ func (gen *generator) generate(sch *Scheduler) { if gen.remainingTask == nil { nextTask, err := gen.taskGenerator.Next() if err != nil { - sch.log.Error().Err(err).Msg("scheduler: error while executing generator") + sch.log.Error().Err(err).Msg("error while executing generator") return } diff --git a/pkg/storage/common/common.go b/pkg/storage/common/common.go index 533c078f50..fe8364e96f 100644 --- a/pkg/storage/common/common.go +++ b/pkg/storage/common/common.go @@ -82,7 +82,7 @@ func ValidateManifest(imgStore storageTypes.ImageStore, repo, reference, mediaTy // validate manifest if err := ValidateManifestSchema(body); err != nil { - log.Error().Err(err).Msg("OCIv1 image manifest schema validation failed") + log.Error().Err(err).Msg("failed to validate OCIv1 image manifest schema") return "", zerr.NewError(zerr.ErrBadManifest).AddDetail("jsonSchemaValidation", err.Error()) } @@ -131,7 +131,7 @@ func ValidateManifest(imgStore storageTypes.ImageStore, repo, reference, mediaTy case ispec.MediaTypeImageIndex: // validate manifest if err := ValidateImageIndexSchema(body); err != nil { - log.Error().Err(err).Msg("OCIv1 image index manifest schema validation failed") + log.Error().Err(err).Msg("failed to validate OCIv1 image index manifest schema") return "", zerr.NewError(zerr.ErrBadManifest).AddDetail("jsonSchemaValidation", err.Error()) } diff --git a/pkg/storage/imagestore/imagestore.go b/pkg/storage/imagestore/imagestore.go index a65762c10b..e5bddd1c66 100644 --- a/pkg/storage/imagestore/imagestore.go +++ b/pkg/storage/imagestore/imagestore.go @@ -67,7 +67,7 @@ func NewImageStore(rootDir string, cacheDir string, dedupe, commit bool, log zlo metrics monitoring.MetricServer, linter common.Lint, storeDriver storageTypes.Driver, cacheDriver cache.Cache, ) storageTypes.ImageStore { if err := storeDriver.EnsureDir(rootDir); err != nil { - log.Error().Err(err).Str("rootDir", rootDir).Msg("unable to create root dir") + log.Error().Err(err).Str("rootDir", rootDir).Msg("failed to create root dir") return nil } @@ -158,13 +158,13 @@ func (is *ImageStore) initRepo(name string) error { buf, err := json.Marshal(il) if err != nil { - is.log.Error().Err(err).Msg("unable to marshal JSON") + is.log.Error().Err(err).Msg("failed to marshal JSON") return err } if _, err := is.storeDriver.WriteFile(ilPath, buf); err != nil { - is.log.Error().Err(err).Str("file", ilPath).Msg("unable to write file") + is.log.Error().Err(err).Str("file", ilPath).Msg("failed to write file") return err } @@ -178,13 +178,13 @@ func (is *ImageStore) initRepo(name string) error { buf, err := json.Marshal(index) if err != nil { - is.log.Error().Err(err).Msg("unable to marshal JSON") + is.log.Error().Err(err).Msg("failed to marshal JSON") return err } if _, err := is.storeDriver.WriteFile(indexPath, buf); err != nil { - is.log.Error().Err(err).Str("file", ilPath).Msg("unable to write file") + is.log.Error().Err(err).Str("file", ilPath).Msg("failed to write file") return err } @@ -220,7 +220,7 @@ func (is *ImageStore) ValidateRepo(name string) (bool, error) { files, err := is.storeDriver.List(dir) if err != nil { - is.log.Error().Err(err).Str("dir", dir).Msg("unable to read directory") + is.log.Error().Err(err).Str("dir", dir).Msg("failed to read directory") return false, zerr.ErrRepoNotFound } @@ -565,7 +565,7 @@ func (is *ImageStore) PutImageManifest(repo, reference, mediaType string, //noli manifestPath := path.Join(dir, mDigest.Encoded()) if _, err = is.storeDriver.WriteFile(manifestPath, body); err != nil { - is.log.Error().Err(err).Str("file", manifestPath).Msg("unable to write") + is.log.Error().Err(err).Str("file", manifestPath).Msg("failed to write") return "", "", err } @@ -658,7 +658,7 @@ func (is *ImageStore) deleteImageManifest(repo, reference string, detectCollisio } if _, err := is.storeDriver.WriteFile(file, buf); err != nil { - is.log.Debug().Str("reference", reference).Str("repo", repo).Msg("error while updating index.json") + is.log.Debug().Str("reference", reference).Str("repository", repo).Msg("error while updating index.json") return err } @@ -905,14 +905,14 @@ func (is *ImageStore) FinishBlobUpload(repo, uuid string, body io.Reader, dstDig err = is.DedupeBlob(src, dstDigest, dst) if err := inject.Error(err); err != nil { is.log.Error().Err(err).Str("src", src).Str("dstDigest", dstDigest.String()). - Str("dst", dst).Msg("unable to dedupe blob") + Str("dst", dst).Msg("failed to dedupe blob") return err } } else { if err := is.storeDriver.Move(src, dst); err != nil { is.log.Error().Err(err).Str("src", src).Str("dstDigest", dstDigest.String()). - Str("dst", dst).Msg("unable to finish blob") + Str("dst", dst).Msg("failed to finish blob") return err } @@ -983,14 +983,14 @@ func (is *ImageStore) FullBlobUpload(repo string, body io.Reader, dstDigest godi if is.dedupe && fmt.Sprintf("%v", is.cache) != fmt.Sprintf("%v", nil) { if err := is.DedupeBlob(src, dstDigest, dst); err != nil { is.log.Error().Err(err).Str("src", src).Str("dstDigest", dstDigest.String()). - Str("dst", dst).Msg("unable to dedupe blob") + Str("dst", dst).Msg("failed to dedupe blob") return "", -1, err } } else { if err := is.storeDriver.Move(src, dst); err != nil { is.log.Error().Err(err).Str("src", src).Str("dstDigest", dstDigest.String()). - Str("dst", dst).Msg("unable to finish blob") + Str("dst", dst).Msg("failed to finish blob") return "", -1, err } @@ -1005,7 +1005,7 @@ retry: dstRecord, err := is.cache.GetBlob(dstDigest) if err := inject.Error(err); err != nil && !errors.Is(err, zerr.ErrCacheMiss) { - is.log.Error().Err(err).Str("blobPath", dst).Msg("dedupe: unable to lookup blob record") + is.log.Error().Err(err).Str("blobPath", dst).Msg("dedupe: failed to lookup blob record") return err } @@ -1013,14 +1013,14 @@ retry: if dstRecord == "" { // cache record doesn't exist, so first disk and cache entry for this digest if err := is.cache.PutBlob(dstDigest, dst); err != nil { - is.log.Error().Err(err).Str("blobPath", dst).Msg("dedupe: unable to insert blob record") + is.log.Error().Err(err).Str("blobPath", dst).Msg("dedupe: failed to insert blob record") return err } // move the blob from uploads to final dest if err := is.storeDriver.Move(src, dst); err != nil { - is.log.Error().Err(err).Str("src", src).Str("dst", dst).Msg("dedupe: unable to rename blob") + is.log.Error().Err(err).Str("src", src).Str("dst", dst).Msg("dedupe: failed to rename blob") return err } @@ -1036,12 +1036,12 @@ retry: _, err := is.storeDriver.Stat(dstRecord) if err != nil { - is.log.Error().Err(err).Str("blobPath", dstRecord).Msg("dedupe: unable to stat") + is.log.Error().Err(err).Str("blobPath", dstRecord).Msg("dedupe: failed to stat") // the actual blob on disk may have been removed by GC, so sync the cache err := is.cache.DeleteBlob(dstDigest, dstRecord) if err = inject.Error(err); err != nil { //nolint:lll - is.log.Error().Err(err).Str("dstDigest", dstDigest.String()).Str("dst", dst).Msg("dedupe: unable to delete blob record") + is.log.Error().Err(err).Str("dstDigest", dstDigest.String()).Str("dst", dst).Msg("dedupe: failed to delete blob record") return err } @@ -1052,13 +1052,13 @@ retry: // prevent overwrite original blob if !is.storeDriver.SameFile(dst, dstRecord) { if err := is.storeDriver.Link(dstRecord, dst); err != nil { - is.log.Error().Err(err).Str("blobPath", dstRecord).Msg("dedupe: unable to link blobs") + is.log.Error().Err(err).Str("blobPath", dstRecord).Msg("dedupe: failed to link blobs") return err } if err := is.cache.PutBlob(dstDigest, dst); err != nil { - is.log.Error().Err(err).Str("blobPath", dst).Msg("dedupe: unable to insert blob record") + is.log.Error().Err(err).Str("blobPath", dst).Msg("dedupe: failed to insert blob record") return err } @@ -1066,7 +1066,7 @@ retry: // remove temp blobupload if err := is.storeDriver.Delete(src); err != nil { - is.log.Error().Err(err).Str("src", src).Msg("dedupe: unable to remove blob") + is.log.Error().Err(err).Str("src", src).Msg("dedupe: failed to remove blob") return err } @@ -1139,7 +1139,7 @@ func (is *ImageStore) CheckBlob(repo string, digest godigest.Digest) (bool, int6 // Check blobs in cache dstRecord, err := is.checkCacheBlob(digest) if err != nil { - is.log.Error().Err(err).Str("digest", digest.String()).Msg("cache: not found") + is.log.Warn().Err(err).Str("digest", digest.String()).Msg("not found in cache") return false, -1, zerr.ErrBlobNotFound } @@ -1151,7 +1151,7 @@ func (is *ImageStore) CheckBlob(repo string, digest godigest.Digest) (bool, int6 // put deduped blob in cache if err := is.cache.PutBlob(digest, blobPath); err != nil { - is.log.Error().Err(err).Str("blobPath", blobPath).Msg("dedupe: unable to insert blob record") + is.log.Error().Err(err).Str("blobPath", blobPath).Msg("dedupe: failed to insert blob record") return false, -1, err } @@ -1185,7 +1185,7 @@ func (is *ImageStore) StatBlob(repo string, digest godigest.Digest) (bool, int64 // Check blobs in cache dstRecord, err := is.checkCacheBlob(digest) if err != nil { - is.log.Error().Err(err).Str("digest", digest.String()).Msg("cache: not found") + is.log.Warn().Err(err).Str("digest", digest.String()).Msg("not found in cache") return false, -1, time.Time{}, zerr.ErrBlobNotFound } @@ -1224,7 +1224,7 @@ func (is *ImageStore) checkCacheBlob(digest godigest.Digest) (string, error) { // the actual blob on disk may have been removed by GC, so sync the cache if err := is.cache.DeleteBlob(digest, dstRecord); err != nil { is.log.Error().Err(err).Str("digest", digest.String()).Str("blobPath", dstRecord). - Msg("unable to remove blob path from cache") + Msg("failed to remove blob path from cache") return "", err } @@ -1239,7 +1239,7 @@ func (is *ImageStore) checkCacheBlob(digest godigest.Digest) (string, error) { func (is *ImageStore) copyBlob(repo string, blobPath, dstRecord string) (int64, error) { if err := is.initRepo(repo); err != nil { - is.log.Error().Err(err).Str("repository", repo).Msg("unable to initialize an empty repo") + is.log.Error().Err(err).Str("repository", repo).Msg("failed to initialize an empty repo") return -1, err } @@ -1247,7 +1247,7 @@ func (is *ImageStore) copyBlob(repo string, blobPath, dstRecord string) (int64, _ = is.storeDriver.EnsureDir(filepath.Dir(blobPath)) if err := is.storeDriver.Link(dstRecord, blobPath); err != nil { - is.log.Error().Err(err).Str("blobPath", blobPath).Str("link", dstRecord).Msg("dedupe: unable to hard link") + is.log.Error().Err(err).Str("blobPath", blobPath).Str("link", dstRecord).Msg("dedupe: failed to hard link") return -1, zerr.ErrBlobNotFound } @@ -1288,7 +1288,7 @@ func (is *ImageStore) GetBlobPartial(repo string, digest godigest.Digest, mediaT // Check blobs in cache blobPath, err = is.checkCacheBlob(digest) if err != nil { - is.log.Error().Err(err).Str("digest", digest.String()).Msg("cache: not found") + is.log.Warn().Err(err).Str("digest", digest.String()).Msg("not found in cache") return nil, -1, -1, zerr.ErrBlobNotFound } @@ -1358,7 +1358,7 @@ func (is *ImageStore) GetBlob(repo string, digest godigest.Digest, mediaType str // Check blobs in cache dstRecord, err := is.checkCacheBlob(digest) if err != nil { - is.log.Error().Err(err).Str("digest", digest.String()).Msg("cache: not found") + is.log.Warn().Err(err).Str("digest", digest.String()).Msg("not found in cache") return nil, -1, zerr.ErrBlobNotFound } @@ -1411,7 +1411,7 @@ func (is *ImageStore) GetBlobContent(repo string, digest godigest.Digest) ([]byt // Check blobs in cache dstRecord, err := is.checkCacheBlob(digest) if err != nil { - is.log.Error().Err(err).Str("digest", digest.String()).Msg("cache: not found") + is.log.Warn().Err(err).Str("digest", digest.String()).Msg("not found in cache") return nil, zerr.ErrBlobNotFound } @@ -1476,13 +1476,13 @@ func (is *ImageStore) PutIndexContent(repo string, index ispec.Index) error { buf, err := json.Marshal(index) if err != nil { - is.log.Error().Err(err).Str("file", indexPath).Msg("unable to marshal JSON") + is.log.Error().Err(err).Str("file", indexPath).Msg("failed to marshal JSON") return err } if _, err = is.storeDriver.WriteFile(indexPath, buf); err != nil { - is.log.Error().Err(err).Str("file", indexPath).Msg("unable to write") + is.log.Error().Err(err).Str("file", indexPath).Msg("failed to write") return err } @@ -1521,14 +1521,14 @@ func (is *ImageStore) CleanupRepo(repo string, blobs []godigest.Digest, removeRe continue } - is.log.Error().Err(err).Str("repository", repo).Str("digest", digest.String()).Msg("unable to delete manifest") + is.log.Error().Err(err).Str("repository", repo).Str("digest", digest.String()).Msg("failed to delete manifest") return count, err } count++ } else { - is.log.Error().Err(err).Str("repository", repo).Str("digest", digest.String()).Msg("unable to delete blob") + is.log.Error().Err(err).Str("repository", repo).Str("digest", digest.String()).Msg("failed to delete blob") return count, err } @@ -1539,13 +1539,13 @@ func (is *ImageStore) CleanupRepo(repo string, blobs []godigest.Digest, removeRe blobUploads, err := is.storeDriver.List(path.Join(is.RootDir(), repo, storageConstants.BlobUploadDir)) if err != nil { - is.log.Debug().Str("repository", repo).Msg("unable to list .uploads/ dir") + is.log.Debug().Str("repository", repo).Msg("failed to list .uploads/ dir") } // if removeRepo flag is true and we cleanup all blobs and there are no blobs currently being uploaded. if removeRepo && count == len(blobs) && count > 0 && len(blobUploads) == 0 { if err := is.storeDriver.Delete(path.Join(is.rootDir, repo)); err != nil { - is.log.Error().Err(err).Str("repository", repo).Msg("unable to remove repo") + is.log.Error().Err(err).Str("repository", repo).Msg("failed to remove repo") return count, err } @@ -1572,7 +1572,7 @@ func (is *ImageStore) deleteBlob(repo string, digest godigest.Digest) error { if fmt.Sprintf("%v", is.cache) != fmt.Sprintf("%v", nil) { dstRecord, err := is.cache.GetBlob(digest) if err != nil && !errors.Is(err, zerr.ErrCacheMiss) { - is.log.Error().Err(err).Str("blobPath", dstRecord).Msg("dedupe: unable to lookup blob record") + is.log.Error().Err(err).Str("blobPath", dstRecord).Msg("dedupe: failed to lookup blob record") return err } @@ -1581,7 +1581,7 @@ func (is *ImageStore) deleteBlob(repo string, digest godigest.Digest) error { if ok := is.cache.HasBlob(digest, blobPath); ok { if err := is.cache.DeleteBlob(digest, blobPath); err != nil { is.log.Error().Err(err).Str("digest", digest.String()).Str("blobPath", blobPath). - Msg("unable to remove blob path from cache") + Msg("failed to remove blob path from cache") return err } @@ -1592,7 +1592,7 @@ func (is *ImageStore) deleteBlob(repo string, digest godigest.Digest) error { // get next candidate dstRecord, err := is.cache.GetBlob(digest) if err != nil && !errors.Is(err, zerr.ErrCacheMiss) { - is.log.Error().Err(err).Str("blobPath", dstRecord).Msg("dedupe: unable to lookup blob record") + is.log.Error().Err(err).Str("blobPath", dstRecord).Msg("dedupe: failed to lookup blob record") return err } @@ -1610,7 +1610,7 @@ func (is *ImageStore) deleteBlob(repo string, digest godigest.Digest) error { if binfo.Size() == 0 { if err := is.storeDriver.Move(blobPath, dstRecord); err != nil { - is.log.Error().Err(err).Str("blobPath", blobPath).Msg("unable to remove blob path") + is.log.Error().Err(err).Str("blobPath", blobPath).Msg("failed to remove blob path") return err } @@ -1622,7 +1622,7 @@ func (is *ImageStore) deleteBlob(repo string, digest godigest.Digest) error { } if err := is.storeDriver.Delete(blobPath); err != nil { - is.log.Error().Err(err).Str("blobPath", blobPath).Msg("unable to remove blob path") + is.log.Error().Err(err).Str("blobPath", blobPath).Msg("failed to remove blob path") return err } @@ -1732,7 +1732,7 @@ func (is *ImageStore) getOriginalBlob(digest godigest.Digest, duplicateBlobs []s originalBlob, err = is.checkCacheBlob(digest) if err != nil && !errors.Is(err, zerr.ErrBlobNotFound) && !errors.Is(err, zerr.ErrCacheMiss) { - is.log.Error().Err(err).Msg("rebuild dedupe: unable to find blob in cache") + is.log.Error().Err(err).Msg("rebuild dedupe: failed to find blob in cache") return originalBlob, err } @@ -1781,7 +1781,7 @@ func (is *ImageStore) dedupeBlobs(digest godigest.Digest, duplicateBlobs []strin if originalBlob == "" { originalBlob, err = is.getOriginalBlob(digest, duplicateBlobs) if err != nil { - is.log.Error().Err(err).Msg("rebuild dedupe: unable to find original blob") + is.log.Error().Err(err).Msg("rebuild dedupe: failed to find original blob") return zerr.ErrDedupeRebuild } @@ -1804,7 +1804,7 @@ func (is *ImageStore) dedupeBlobs(digest godigest.Digest, duplicateBlobs []strin // if we have an original blob cached then we can safely dedupe the rest of them if originalBlob != "" { if err := is.storeDriver.Link(originalBlob, blobPath); err != nil { - is.log.Error().Err(err).Str("path", blobPath).Msg("rebuild dedupe: unable to dedupe blob") + is.log.Error().Err(err).Str("path", blobPath).Msg("rebuild dedupe: failed to dedupe blob") return err } @@ -1833,7 +1833,7 @@ func (is *ImageStore) restoreDedupedBlobs(digest godigest.Digest, duplicateBlobs // first we need to find the original blob, either in cache or by checking each blob size originalBlob, err := is.getOriginalBlob(digest, duplicateBlobs) if err != nil { - is.log.Error().Err(err).Msg("rebuild dedupe: unable to find original blob") + is.log.Error().Err(err).Msg("rebuild dedupe: failed to find original blob") return zerr.ErrDedupeRebuild } diff --git a/pkg/storage/storage.go b/pkg/storage/storage.go index 56da20b74e..9e16178804 100644 --- a/pkg/storage/storage.go +++ b/pkg/storage/storage.go @@ -64,7 +64,7 @@ func New(config *config.Config, linter common.Lint, metrics monitoring.MetricSer // Init a Storager from connection string. store, err := factory.Create(storeName, config.Storage.StorageDriver) if err != nil { - log.Error().Err(err).Str("rootDir", config.Storage.RootDirectory).Msg("unable to create s3 service") + log.Error().Err(err).Str("rootDir", config.Storage.RootDirectory).Msg("failed to create s3 service") return storeController, err } @@ -168,7 +168,7 @@ func getSubStore(cfg *config.Config, subPaths map[string]config.StorageConfig, // Init a Storager from connection string. store, err := factory.Create(storeName, storageConfig.StorageDriver) if err != nil { - log.Error().Err(err).Str("rootDir", storageConfig.RootDirectory).Msg("Unable to create s3 service") + log.Error().Err(err).Str("rootDir", storageConfig.RootDirectory).Msg("failed to create s3 service") return nil, err }