From cbb3ae28ab8e616fcdfdd03e304ee3799e34fd02 Mon Sep 17 00:00:00 2001 From: Christoph Hartmann Date: Tue, 30 Jan 2024 18:02:24 +0100 Subject: [PATCH] =?UTF-8?q?=F0=9F=90=9B=20report=20asset=20errors=20(#3152?= =?UTF-8?q?)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- apps/cnquery/cmd/scan.go | 4 ++++ explorer/scan/local_scanner.go | 42 +++++++++++++++++++++++++++------- 2 files changed, 38 insertions(+), 8 deletions(-) diff --git a/apps/cnquery/cmd/scan.go b/apps/cnquery/cmd/scan.go index 2d513b4d96..b1a272578c 100644 --- a/apps/cnquery/cmd/scan.go +++ b/apps/cnquery/cmd/scan.go @@ -127,6 +127,10 @@ var scanCmdRun = func(cmd *cobra.Command, runtime *providers.Runtime, cliRes *pl } printReports(report, conf, cmd) + + if report != nil && len(report.Errors) > 0 { + os.Exit(1) + } } // helper method to retrieve the list of query packs for autocomplete diff --git a/explorer/scan/local_scanner.go b/explorer/scan/local_scanner.go index 14f11c7cd3..ab9ae90fd0 100644 --- a/explorer/scan/local_scanner.go +++ b/explorer/scan/local_scanner.go @@ -43,6 +43,11 @@ type assetWithRuntime struct { runtime *providers.Runtime } +type assetWithError struct { + asset *inventory.Asset + err error +} + type LocalScanner struct { fetcher *fetcher upstream *upstream.UpstreamConfig @@ -194,6 +199,7 @@ func (s *LocalScanner) distributeJob(job *Job, ctx context.Context, upstream *up var assets []*assetWithRuntime // note: asset candidate runtimes are the runtime that discovered them var assetCandidates []*assetWithRuntime + var assetErrors []*assetWithError // we connect and perform discovery for each asset in the job inventory for i := range assetList { @@ -206,6 +212,10 @@ func (s *LocalScanner) distributeJob(job *Job, ctx context.Context, upstream *up runtime, err := providers.Coordinator.RuntimeFor(asset, providers.DefaultRuntime()) if err != nil { log.Error().Err(err).Str("asset", asset.Name).Msg("unable to create runtime for asset") + assetErrors = append(assetErrors, &assetWithError{ + asset: resolvedAsset, + err: err, + }) continue } runtime.SetRecording(s.recording) @@ -216,6 +226,10 @@ func (s *LocalScanner) distributeJob(job *Job, ctx context.Context, upstream *up Upstream: upstream, }); err != nil { log.Error().Err(err).Msg("unable to connect to asset") + assetErrors = append(assetErrors, &assetWithError{ + asset: resolvedAsset, + err: err, + }) continue } asset = runtime.Provider.Connection.Asset // to ensure we get all the information the connect call gave us @@ -227,7 +241,11 @@ func (s *LocalScanner) distributeJob(job *Job, ctx context.Context, upstream *up } processedAssets, err := providers.ProcessAssetCandidates(runtime, runtime.Provider.Connection, upstream, "") if err != nil { - return nil, false, err + assetErrors = append(assetErrors, &assetWithError{ + asset: resolvedAsset, + err: err, + }) + continue } for i := range processedAssets { assetCandidates = append(assetCandidates, &assetWithRuntime{ @@ -300,10 +318,6 @@ func (s *LocalScanner) distributeJob(job *Job, ctx context.Context, upstream *up }) } - if len(assets) == 0 { - return nil, false, nil - } - // if there is exactly one asset, assure that the --asset-name is used // TODO: make it so that the --asset-name is set for the root asset only even if multiple assets are there // This is a temporary fix that only works if there is only one asset @@ -317,6 +331,20 @@ func (s *LocalScanner) distributeJob(job *Job, ctx context.Context, upstream *up asset.asset.KindString = asset.asset.GetPlatform().Kind justAssets = append(justAssets, asset.asset) } + for _, asset := range assetErrors { + justAssets = append(justAssets, asset.asset) + } + + // plan scan jobs + reporter := NewAggregateReporter(justAssets) + // if we had asset errors we want to place them into the reporter + for i := range assetErrors { + reporter.AddScanError(assetErrors[i].asset, assetErrors[i].err) + } + + if len(assets) == 0 { + return reporter.Reports(), false, nil + } // sync assets if upstream != nil && upstream.ApiEndpoint != "" && !upstream.Incognito { @@ -368,8 +396,6 @@ func (s *LocalScanner) distributeJob(job *Job, ctx context.Context, upstream *up } } - // plan scan jobs - reporter := NewAggregateReporter(justAssets) // if a bundle was provided check that it matches the filter, bundles can also be downloaded // later therefore we do not want to stop execution here if job.Bundle != nil && job.Bundle.FilterQueryPacks(job.QueryPackFilters) { @@ -382,7 +408,7 @@ func (s *LocalScanner) distributeJob(job *Job, ctx context.Context, upstream *up // this shouldn't happen, but might // it normally indicates a bug in the provider if presentAsset, present := progressBarElements[assets[i].asset.PlatformIds[0]]; present { - return nil, false, fmt.Errorf("asset %s and %s have the same platform id %s", presentAsset, assets[i].asset.Name, assets[i].asset.PlatformIds[0]) + return reporter.Reports(), false, fmt.Errorf("asset %s and %s have the same platform id %s", presentAsset, assets[i].asset.Name, assets[i].asset.PlatformIds[0]) } progressBarElements[assets[i].asset.PlatformIds[0]] = assets[i].asset.Name orderedKeys = append(orderedKeys, assets[i].asset.PlatformIds[0])