From f9ac865cd5147e1cc7fc7531fb6be52443d56061 Mon Sep 17 00:00:00 2001 From: Preslav Gerchev Date: Wed, 14 Feb 2024 12:13:29 +0200 Subject: [PATCH] =?UTF-8?q?=E2=AD=90=EF=B8=8F=20Allow=20skipping=20setup?= =?UTF-8?q?=20for=20azure=20snapshot=20connection=20(#3298)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Preslav --- providers/azure/config/config.go | 14 ++ .../connection/azureinstancesnapshot/lun.go | 107 +++++++++-- .../azureinstancesnapshot/lun_test.go | 109 ++++++++++- .../azureinstancesnapshot/provider.go | 174 +++++++----------- .../connection/azureinstancesnapshot/setup.go | 129 +++++++++++++ .../azureinstancesnapshot/snapshot.go | 28 ++- providers/azure/provider/provider.go | 18 +- 7 files changed, 444 insertions(+), 135 deletions(-) create mode 100644 providers/azure/connection/azureinstancesnapshot/setup.go diff --git a/providers/azure/config/config.go b/providers/azure/config/config.go index 356c4f08f6..ec26c0fbfb 100644 --- a/providers/azure/config/config.go +++ b/providers/azure/config/config.go @@ -104,6 +104,20 @@ var Config = plugin.Provider{ Desc: "If set, no cleanup will be performed for the snapshot connection.", Option: plugin.FlagOption_Hidden, }, + { + Long: "skip-snapshot-setup", + Type: plugin.FlagType_Bool, + Default: "", + Desc: "If set, no setup will be performed for the snapshot connection. It is expected that the target's disk is already attached. Use together with --lun.", + Option: plugin.FlagOption_Hidden, + }, + { + Long: "lun", + Type: plugin.FlagType_Int, + Default: "", + Desc: "The logical unit number of the attached disk that should be scanned. Use together with --skip-snapshot-setup.", + Option: plugin.FlagOption_Hidden, + }, }, }, }, diff --git a/providers/azure/connection/azureinstancesnapshot/lun.go b/providers/azure/connection/azureinstancesnapshot/lun.go index 78fbe72a9c..c788b1f4a3 100644 --- a/providers/azure/connection/azureinstancesnapshot/lun.go +++ b/providers/azure/connection/azureinstancesnapshot/lun.go @@ -4,6 +4,7 @@ package azureinstancesnapshot import ( + "encoding/json" "fmt" "io" "strconv" @@ -11,19 +12,34 @@ import ( "github.com/cockroachdb/errors" "github.com/rs/zerolog/log" - "go.mondoo.com/cnquery/v10/providers/os/connection/local" ) -type deviceInfo struct { - // the LUN number, e.g. 3 +type scsiDeviceInfo struct { + // the LUN, e.g. 3 Lun int32 // where the disk is mounted, e.g. /dev/sda VolumePath string } -func (a *azureScannerInstance) getAvailableLun(mountedDevices []deviceInfo) (int32, error) { +type scsiDevices = []scsiDeviceInfo + +// TODO: we should combine this with the OS-connection blockDevices struct +type blockDevices struct { + BlockDevices []blockDevice `json:"blockDevices,omitempty"` +} + +type blockDevice struct { + Name string `json:"name,omitempty"` + FsType string `json:"fstype,omitempty"` + Label string `json:"label,omitempty"` + Uuid string `json:"uuid,omitempty"` + Mountpoint []string `json:"mountpoints,omitempty"` + Children []blockDevice `json:"children,omitempty"` +} + +func getAvailableLun(scsiDevices scsiDevices) (int32, error) { takenLuns := []int32{} - for _, d := range mountedDevices { + for _, d := range scsiDevices { takenLuns = append(takenLuns, d.Lun) } @@ -52,8 +68,8 @@ func (a *azureScannerInstance) getAvailableLun(mountedDevices []deviceInfo) (int // https://learn.microsoft.com/en-us/azure/virtual-machines/linux/azure-to-guest-disk-mapping // for more information. we want to find the LUNs of the data disks and their mount location -func getMountedDevices(localConn *local.LocalConnection) ([]deviceInfo, error) { - cmd, err := localConn.RunCommand("lsscsi --brief") +func (c *AzureSnapshotConnection) listScsiDevices() ([]scsiDeviceInfo, error) { + cmd, err := c.localConn.RunCommand("lsscsi --brief") if err != nil { return nil, err } @@ -72,21 +88,84 @@ func getMountedDevices(localConn *local.LocalConnection) ([]deviceInfo, error) { return parseLsscsiOutput(output) } -func getMatchingDevice(mountedDevices []deviceInfo, lun int32) (deviceInfo, error) { - for _, d := range mountedDevices { +// https://learn.microsoft.com/en-us/azure/virtual-machines/linux/azure-to-guest-disk-mapping +// for more information. we want to find the LUNs of the data disks and their mount location +func (c *AzureSnapshotConnection) listBlockDevices() (*blockDevices, error) { + cmd, err := c.localConn.RunCommand("lsblk -f --json") + if err != nil { + return nil, err + } + if cmd.ExitStatus != 0 { + outErr, err := io.ReadAll(cmd.Stderr) + if err != nil { + return nil, err + } + return nil, fmt.Errorf("failed to list logical unit numbers: %s", outErr) + } + data, err := io.ReadAll(cmd.Stdout) + if err != nil { + return nil, err + } + blockEntries := &blockDevices{} + if err := json.Unmarshal(data, blockEntries); err != nil { + return nil, err + } + return blockEntries, nil +} + +func filterScsiDevices(scsiDevices scsiDevices, lun int32) []scsiDeviceInfo { + matching := []scsiDeviceInfo{} + for _, d := range scsiDevices { if d.Lun == lun { - return d, nil + matching = append(matching, d) } } - return deviceInfo{}, errors.New("could not find matching device") + + return matching +} + +// there can be multiple devices mounted at the same LUN. the Azure API only provides +// the LUN so we need to find all the blocks, mounted at that LUN. then we find the first one +// that has no mounted partitions and use that as the target device. this is a best-effort approach +func findMatchingDeviceByBlock(scsiDevices scsiDevices, blockDevices *blockDevices) (string, error) { + matchingBlocks := []blockDevice{} + for _, device := range scsiDevices { + for _, block := range blockDevices.BlockDevices { + devName := "/dev/" + block.Name + if devName == device.VolumePath { + matchingBlocks = append(matchingBlocks, block) + } + } + } + + if len(matchingBlocks) == 0 { + return "", errors.New("no matching blocks found") + } + + var target string + for _, b := range matchingBlocks { + log.Debug().Str("name", b.Name).Msg("azure snapshot> checking block") + mounted := false + for _, ch := range b.Children { + if len(ch.Mountpoint) > 0 && ch.Mountpoint[0] != "" { + log.Debug().Str("name", ch.Name).Msg("azure snapshot> has mounted partitons, skipping") + mounted = true + } + if !mounted { + target = "/dev/" + b.Name + } + } + } + + return target, nil } // parses the output from running 'lsscsi --brief' and gets the device info, the output looks like this: // [0:0:0:0] /dev/sda // [1:0:0:0] /dev/sdb -func parseLsscsiOutput(output string) ([]deviceInfo, error) { +func parseLsscsiOutput(output string) (scsiDevices, error) { lines := strings.Split(strings.TrimSpace(output), "\n") - mountedDevices := []deviceInfo{} + mountedDevices := []scsiDeviceInfo{} for _, line := range lines { log.Debug().Str("line", line).Msg("azure snapshot> parsing lsscsi output") if line == "" { @@ -107,7 +186,7 @@ func parseLsscsiOutput(output string) ([]deviceInfo, error) { if err != nil { return nil, err } - mountedDevices = append(mountedDevices, deviceInfo{Lun: int32(lunInt), VolumePath: path}) + mountedDevices = append(mountedDevices, scsiDeviceInfo{Lun: int32(lunInt), VolumePath: path}) } return mountedDevices, nil diff --git a/providers/azure/connection/azureinstancesnapshot/lun_test.go b/providers/azure/connection/azureinstancesnapshot/lun_test.go index 5f2d65a0f1..f71617c006 100644 --- a/providers/azure/connection/azureinstancesnapshot/lun_test.go +++ b/providers/azure/connection/azureinstancesnapshot/lun_test.go @@ -21,7 +21,7 @@ func TestParseLsscsiOutput(t *testing.T) { devices, err := parseLsscsiOutput(output) assert.NoError(t, err) assert.Len(t, devices, 4) - expected := []deviceInfo{ + expected := scsiDevices{ {Lun: 0, VolumePath: "/dev/sda"}, {Lun: 1, VolumePath: "/dev/sdb"}, {Lun: 2, VolumePath: "/dev/sdc"}, @@ -29,3 +29,110 @@ func TestParseLsscsiOutput(t *testing.T) { } assert.ElementsMatch(t, expected, devices) } + +func TestFilterScsiDevices(t *testing.T) { + devices := scsiDevices{ + {Lun: 0, VolumePath: "/dev/sda"}, + {Lun: 1, VolumePath: "/dev/sdb"}, + {Lun: 2, VolumePath: "/dev/sdc"}, + {Lun: 3, VolumePath: "/dev/sdd"}, + } + + filtered := filterScsiDevices(devices, int32(1)) + expected := scsiDevices{ + {Lun: 1, VolumePath: "/dev/sdb"}, + } + assert.ElementsMatch(t, expected, filtered) + + filtered = filterScsiDevices(devices, int32(4)) + assert.Len(t, filtered, 0) +} + +func TestFindDeviceByBlock(t *testing.T) { + devices := scsiDevices{ + {Lun: 0, VolumePath: "/dev/sda"}, + {Lun: 0, VolumePath: "/dev/sdb"}, + } + t.Run("find device by block", func(t *testing.T) { + blockDevices := &blockDevices{ + BlockDevices: []blockDevice{ + { + Name: "sda", + Children: []blockDevice{ + { + Name: "sda1", + Mountpoint: []string{"/"}, + }, + }, + }, + { + Name: "sdb", + Children: []blockDevice{ + { + Name: "sdb1", + Mountpoint: []string{""}, + }, + }, + }, + }, + } + target, err := findMatchingDeviceByBlock(devices, blockDevices) + assert.NoError(t, err) + expected := "/dev/sdb" + assert.Equal(t, expected, target) + }) + + t.Run("no matches", func(t *testing.T) { + blockDevices := &blockDevices{ + BlockDevices: []blockDevice{ + { + Name: "sdc", + Children: []blockDevice{ + { + Name: "sdc1", + Mountpoint: []string{"/"}, + }, + }, + }, + { + Name: "sdc", + Children: []blockDevice{ + { + Name: "sdc1", + Mountpoint: []string{"/tmp"}, + }, + }, + }, + }, + } + _, err := findMatchingDeviceByBlock(devices, blockDevices) + assert.Error(t, err) + }) + t.Run("empty target as all blocks are mounted", func(t *testing.T) { + blockDevices := &blockDevices{ + BlockDevices: []blockDevice{ + { + Name: "sda", + Children: []blockDevice{ + { + Name: "sda1", + Mountpoint: []string{"/"}, + }, + }, + }, + { + Name: "sdb", + Children: []blockDevice{ + { + Name: "sdb1", + Mountpoint: []string{"/tmp"}, + }, + }, + }, + }, + } + target, err := findMatchingDeviceByBlock(devices, blockDevices) + assert.NoError(t, err) + assert.Empty(t, target) + }) +} diff --git a/providers/azure/connection/azureinstancesnapshot/provider.go b/providers/azure/connection/azureinstancesnapshot/provider.go index ccbbd197ee..d9660c25ca 100644 --- a/providers/azure/connection/azureinstancesnapshot/provider.go +++ b/providers/azure/connection/azureinstancesnapshot/provider.go @@ -4,8 +4,7 @@ package azureinstancesnapshot import ( - "fmt" - "time" + "strconv" "github.com/Azure/azure-sdk-for-go/sdk/azcore" "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" @@ -30,6 +29,8 @@ const ( SnapshotTargetType string = "snapshot" InstanceTargetType string = "instance" SkipCleanup string = "skip-snapshot-cleanup" + SkipSetup string = "skip-snapshot-setup" + Lun string = "lun" ) // the instance from which we're performing the scan @@ -37,16 +38,25 @@ type azureScannerInstance struct { instanceInfo } +type assetInfo struct { + assetName string + platformId string +} + type scanTarget struct { - TargetType string - Target string - ResourceGroup string + TargetType string + Target string + ResourceGroup string + SubscriptionId string } type mountInfo struct { deviceName string - diskId string - diskName string +} + +type mountedDiskInfo struct { + diskId string + diskName string } func determineScannerInstanceInfo(localConn *local.LocalConnection, token azcore.TokenCredential) (*azureScannerInstance, error) { @@ -101,15 +111,17 @@ func ParseTarget(conf *inventory.Config, scanner *azureScannerInstance) (scanTar if err != nil { log.Debug().Msg("could not parse target as resource id, assuming it's only the resource name") return scanTarget{ - TargetType: conf.Options["type"], - Target: conf.Options["target"], - ResourceGroup: scanner.resourceGroup, + TargetType: conf.Options["type"], + Target: conf.Options["target"], + ResourceGroup: scanner.resourceGroup, + SubscriptionId: scanner.subscriptionId, }, nil } return scanTarget{ - TargetType: conf.Options["type"], - Target: id.Name, - ResourceGroup: id.ResourceGroupName, + TargetType: conf.Options["type"], + Target: id.Name, + ResourceGroup: id.ResourceGroupName, + SubscriptionId: id.SubscriptionID, }, nil } @@ -122,20 +134,21 @@ func NewAzureSnapshotConnection(id uint32, conf *inventory.Config, asset *invent if err != nil { return nil, err } + // local connection is required here to run lsscsi and lsblk to identify where the mounted disk is localConn := local.NewConnection(id, conf, asset) - // check if we run on an azure instance + // 1. check if we run on an azure instance scanner, err := determineScannerInstanceInfo(localConn, token) if err != nil { return nil, err } + // 2. determine the target target, err := ParseTarget(conf, scanner) if err != nil { return nil, err } - // determine the target sc, err := NewSnapshotCreator(token, scanner.subscriptionId) if err != nil { return nil, err @@ -146,102 +159,56 @@ func NewAzureSnapshotConnection(id uint32, conf *inventory.Config, asset *invent snapshotCreator: sc, scanner: *scanner, identifier: conf.PlatformId, + localConn: localConn, } - // setup disk image so and attach it to the instance - mi := mountInfo{} - - diskName := "cnspec-" + target.TargetType + "-snapshot-" + time.Now().Format("2006-01-02t15-04-05z00-00") - switch target.TargetType { - case InstanceTargetType: - instanceInfo, err := sc.instanceInfo(target.ResourceGroup, target.Target) + var lun int32 + // 3. we either clone the target disk/snapshot and mount it + // or we skip the setup and expect the disk to be already attached + if !c.skipSetup() { + scsiDevices, err := c.listScsiDevices() if err != nil { + c.Close() return nil, err } - if instanceInfo.bootDiskId == "" { - return nil, fmt.Errorf("could not find boot disk for instance %s", target.Target) - } - - log.Debug().Str("boot disk", instanceInfo.bootDiskId).Msg("found boot disk for instance, cloning") - disk, err := sc.cloneDisk(instanceInfo.bootDiskId, scanner.resourceGroup, diskName, scanner.location, scanner.vm.Zones) - if err != nil { - log.Error().Err(err).Msg("could not complete disk cloning") - return nil, errors.Wrap(err, "could not complete disk cloning") - } - log.Debug().Str("disk", *disk.ID).Msg("cloned disk from instance boot disk") - mi.diskId = *disk.ID - mi.diskName = *disk.Name - asset.Name = instanceInfo.instanceName - conf.PlatformId = azcompute.MondooAzureInstanceID(*instanceInfo.vm.ID) - case SnapshotTargetType: - snapshotInfo, err := sc.snapshotInfo(target.ResourceGroup, target.Target) + lun, err = getAvailableLun(scsiDevices) if err != nil { + c.Close() return nil, err } - - disk, err := sc.createSnapshotDisk(snapshotInfo.snapshotId, scanner.resourceGroup, diskName, scanner.location, scanner.vm.Zones) - if err != nil { - log.Error().Err(err).Msg("could not complete snapshot disk creation") - return nil, errors.Wrap(err, "could not create disk from snapshot") - } - log.Debug().Str("disk", *disk.ID).Msg("created disk from snapshot") - mi.diskId = *disk.ID - mi.diskName = *disk.Name - asset.Name = target.Target - conf.PlatformId = SnapshotPlatformMrn(snapshotInfo.snapshotId) - case DiskTargetType: - diskInfo, err := sc.diskInfo(target.ResourceGroup, target.Target) + diskInfo, ai, err := c.setupDiskAndMount(target, lun) if err != nil { + c.Close() return nil, err } - - disk, err := sc.cloneDisk(diskInfo.diskId, scanner.resourceGroup, diskName, scanner.location, scanner.vm.Zones) + asset.Name = ai.assetName + conf.PlatformId = ai.platformId + c.mountedDiskInfo = diskInfo + } else { + log.Debug().Msg("skipping snapshot setup, expect that disk is already attached") + if c.opts[Lun] == "" { + return nil, errors.New("lun is required to hint where the target disk is located") + } + lunOpt, err := strconv.Atoi(c.opts[Lun]) if err != nil { - log.Error().Err(err).Msg("could not complete disk cloning") - return nil, errors.Wrap(err, "could not complete disk cloning") + return nil, errors.Wrap(err, "could not parse lun") } - log.Debug().Str("disk", *disk.ID).Msg("cloned disk from target disk") - mi.diskId = *disk.ID - mi.diskName = *disk.Name - asset.Name = diskInfo.diskName - conf.PlatformId = DiskPlatformMrn(diskInfo.diskId) - default: - return nil, errors.New("invalid target type") - } - - // fetch the mounted devices. we want to find an available LUN to mount the disk at - mountedDevices, err := getMountedDevices(localConn) - if err != nil { - return nil, err - } - lun, err := scanner.getAvailableLun(mountedDevices) - if err != nil { - return nil, err - } - err = sc.attachDisk(scanner.instanceInfo, mi.diskName, mi.diskId, lun) - if err != nil { - c.Close() - return nil, err + lun = int32(lunOpt) + asset.Name = target.Target } - // refetch the mounted devices, we now are looking for the specific LUN that we just attached. - // we don't know from the Azure API where it will be mounted, we need to look it up - mountedDevices, err = getMountedDevices(localConn) + // 4. once mounted (either by the connection or from the outside), identify the disk by the provided LUN + mi, err := c.identifyDisk(lun) if err != nil { c.Close() return nil, err } - matchingDevice, err := getMatchingDevice(mountedDevices, lun) - if err != nil { - c.Close() - return nil, err - } - mi.deviceName = matchingDevice.VolumePath + c.mountInfo = mi - // mount volume + // 5. mount volume shell := []string{"sh", "-c"} volumeMounter := snapshot.NewVolumeMounter(shell) - volumeMounter.VolumeAttachmentLoc = mi.deviceName + volumeMounter.VolumeAttachmentLoc = c.mountInfo.deviceName err = volumeMounter.Mount() if err != nil { log.Error().Err(err).Msg("unable to complete mount step") @@ -264,7 +231,6 @@ func NewAzureSnapshotConnection(id uint32, conf *inventory.Config, asset *invent } c.FileSystemConnection = fsConn - c.mountInfo = mi c.volumeMounter = volumeMounter var ok bool @@ -287,7 +253,11 @@ type AzureSnapshotConnection struct { snapshotCreator *snapshotCreator scanner azureScannerInstance mountInfo mountInfo + // only set if the connection mounts the disk. used for cleanup + mountedDiskInfo mountedDiskInfo identifier string + // used on the target VM to run commands, related to finding the target disk by LUN + localConn *local.LocalConnection } func (c *AzureSnapshotConnection) Close() { @@ -296,30 +266,24 @@ func (c *AzureSnapshotConnection) Close() { return } - if c.opts != nil { - if c.opts[snapshot.NoSetup] == "true" { - return - } - } - if c.volumeMounter != nil { err := c.volumeMounter.UnmountVolumeFromInstance() if err != nil { log.Error().Err(err).Msg("unable to unmount volume") } } - if c.skipCleanup() { + if c.skipDiskCleanup() { log.Debug().Msgf("skipping azure snapshot cleanup, %s flag is set to true", SkipCleanup) } else if c.snapshotCreator != nil { - if c.mountInfo.diskName != "" { - err := c.snapshotCreator.detachDisk(c.mountInfo.diskName, c.scanner.instanceInfo) + if c.mountedDiskInfo.diskName != "" { + err := c.snapshotCreator.detachDisk(c.mountedDiskInfo.diskName, c.scanner.instanceInfo) if err != nil { log.Error().Err(err).Msg("unable to detach volume") } } - if c.mountInfo.diskName != "" { - err := c.snapshotCreator.deleteCreatedDisk(c.scanner.resourceGroup, c.mountInfo.diskName) + if c.mountedDiskInfo.diskName != "" { + err := c.snapshotCreator.deleteCreatedDisk(c.scanner.resourceGroup, c.mountedDiskInfo.diskName) if err != nil { log.Error().Err(err).Msg("could not delete created disk") } @@ -334,10 +298,14 @@ func (c *AzureSnapshotConnection) Close() { } } -func (c *AzureSnapshotConnection) skipCleanup() bool { +func (c *AzureSnapshotConnection) skipDiskCleanup() bool { return c.opts[SkipCleanup] == "true" } +func (c *AzureSnapshotConnection) skipSetup() bool { + return c.opts[SkipSetup] == "true" +} + func (c *AzureSnapshotConnection) Kind() string { return "api" } @@ -356,4 +324,4 @@ func (c *AzureSnapshotConnection) Type() shared.ConnectionType { func (c *AzureSnapshotConnection) Config() *inventory.Config { return c.FileSystemConnection.Conf -} +} \ No newline at end of file diff --git a/providers/azure/connection/azureinstancesnapshot/setup.go b/providers/azure/connection/azureinstancesnapshot/setup.go new file mode 100644 index 0000000000..f35434d413 --- /dev/null +++ b/providers/azure/connection/azureinstancesnapshot/setup.go @@ -0,0 +1,129 @@ +// Copyright (c) Mondoo, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package azureinstancesnapshot + +import ( + "crypto/sha256" + "fmt" + "time" + + "github.com/cockroachdb/errors" + "github.com/rs/zerolog/log" + "go.mondoo.com/cnquery/v10/providers/os/id/azcompute" +) + +func (c *AzureSnapshotConnection) identifyDisk(lun int32) (mountInfo, error) { + scsiDevices, err := c.listScsiDevices() + if err != nil { + return mountInfo{}, err + } + + // only interested in the scsi devices that match the provided LUN + filteredScsiDevices := filterScsiDevices(scsiDevices, lun) + if len(filteredScsiDevices) == 0 { + return mountInfo{}, errors.New("no matching scsi devices found") + } + + // if we have exactly one device present at the LUN we can directly point the volume mounter towards it + if len(filteredScsiDevices) == 1 { + return mountInfo{deviceName: filteredScsiDevices[0].VolumePath}, nil + } + + blockDevices, err := c.listBlockDevices() + if err != nil { + return mountInfo{}, err + } + target, err := findMatchingDeviceByBlock(filteredScsiDevices, blockDevices) + if err != nil { + return mountInfo{}, err + } + return mountInfo{deviceName: target}, nil +} + +func (c *AzureSnapshotConnection) setupDiskAndMount(target scanTarget, lun int32) (mountedDiskInfo, assetInfo, error) { + mi, ai, err := c.setupDisk(target) + if err != nil { + return mountedDiskInfo{}, assetInfo{}, err + } + err = c.snapshotCreator.attachDisk(c.scanner.instanceInfo, mi.diskName, mi.diskId, lun) + if err != nil { + return mountedDiskInfo{}, assetInfo{}, err + } + + return mi, ai, nil +} + +func (c *AzureSnapshotConnection) setupDisk(target scanTarget) (mountedDiskInfo, assetInfo, error) { + mi := mountedDiskInfo{} + ai := assetInfo{} + h := sha256.New() + now := time.Now() + // ensure no name collisions if performing multiple snapshot scans at once + h.Write([]byte(target.Target)) + h.Write([]byte(target.TargetType)) + h.Write([]byte(target.ResourceGroup)) + h.Write([]byte(target.SubscriptionId)) + h.Write([]byte(now.Format("2006-01-02t15-04-05z00-00"))) + + diskHash := fmt.Sprintf("%x", h.Sum(nil)) + diskName := fmt.Sprintf("mondoo-snapshot-%s-%s", diskHash[:8], now.Format("2006-01-02t15-04-05z00-00")) + switch target.TargetType { + case InstanceTargetType: + instanceInfo, err := c.snapshotCreator.instanceInfo(target.ResourceGroup, target.Target) + if err != nil { + return mountedDiskInfo{}, assetInfo{}, err + } + if instanceInfo.bootDiskId == "" { + return mountedDiskInfo{}, assetInfo{}, fmt.Errorf("could not find boot disk for instance %s", target.Target) + } + + log.Debug().Str("boot disk", instanceInfo.bootDiskId).Msg("found boot disk for instance, cloning") + disk, err := c.snapshotCreator.cloneDisk(instanceInfo.bootDiskId, c.scanner.resourceGroup, diskName, c.scanner.location, c.scanner.vm.Zones) + if err != nil { + log.Error().Err(err).Msg("could not complete disk cloning") + return mountedDiskInfo{}, assetInfo{}, errors.Wrap(err, "could not complete disk cloning") + } + log.Debug().Str("disk", *disk.ID).Msg("cloned disk from instance boot disk") + mi.diskId = *disk.ID + mi.diskName = *disk.Name + ai.assetName = instanceInfo.instanceName + ai.platformId = azcompute.MondooAzureInstanceID(*instanceInfo.vm.ID) + case SnapshotTargetType: + snapshotInfo, err := c.snapshotCreator.snapshotInfo(target.ResourceGroup, target.Target) + if err != nil { + return mountedDiskInfo{}, assetInfo{}, err + } + + disk, err := c.snapshotCreator.createSnapshotDisk(snapshotInfo.snapshotId, c.scanner.resourceGroup, diskName, c.scanner.location, c.scanner.vm.Zones) + if err != nil { + log.Error().Err(err).Msg("could not complete snapshot disk creation") + return mountedDiskInfo{}, assetInfo{}, errors.Wrap(err, "could not create disk from snapshot") + } + log.Debug().Str("disk", *disk.ID).Msg("created disk from snapshot") + mi.diskId = *disk.ID + mi.diskName = *disk.Name + ai.assetName = target.Target + ai.platformId = SnapshotPlatformMrn(snapshotInfo.snapshotId) + case DiskTargetType: + diskInfo, err := c.snapshotCreator.diskInfo(target.ResourceGroup, target.Target) + if err != nil { + return mountedDiskInfo{}, assetInfo{}, err + } + + disk, err := c.snapshotCreator.cloneDisk(diskInfo.diskId, c.scanner.resourceGroup, diskName, c.scanner.location, c.scanner.vm.Zones) + if err != nil { + log.Error().Err(err).Msg("could not complete disk cloning") + return mountedDiskInfo{}, assetInfo{}, errors.Wrap(err, "could not complete disk cloning") + } + log.Debug().Str("disk", *disk.ID).Msg("cloned disk from target disk") + mi.diskId = *disk.ID + mi.diskName = *disk.Name + ai.assetName = diskInfo.diskName + ai.platformId = DiskPlatformMrn(diskInfo.diskId) + default: + return mountedDiskInfo{}, assetInfo{}, errors.New("invalid target type") + } + + return mi, ai, nil +} diff --git a/providers/azure/connection/azureinstancesnapshot/snapshot.go b/providers/azure/connection/azureinstancesnapshot/snapshot.go index ef77201a69..14342b52b1 100644 --- a/providers/azure/connection/azureinstancesnapshot/snapshot.go +++ b/providers/azure/connection/azureinstancesnapshot/snapshot.go @@ -12,6 +12,7 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/azcore" "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/policy" "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" compute "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute" ) @@ -213,42 +214,40 @@ func (sc *snapshotCreator) attachDisk(targetInstance instanceInfo, diskName, dis if err != nil { return err } - attachOpt := compute.DiskCreateOptionTypesAttach - deleteOpt := compute.DiskDeleteOptionTypesDelete // the Azure API requires all disks to be specified, even the already attached ones. // we simply attach the new disk to the end of the already present list of data disks disks := targetInstance.vm.Properties.StorageProfile.DataDisks disks = append(disks, &compute.DataDisk{ Name: &diskName, - CreateOption: &attachOpt, - DeleteOption: &deleteOpt, + CreateOption: to.Ptr(compute.DiskCreateOptionTypesAttach), + DeleteOption: to.Ptr(compute.DiskDeleteOptionTypesDelete), + Caching: to.Ptr(compute.CachingTypesNone), Lun: &lun, ManagedDisk: &compute.ManagedDiskParameters{ ID: &diskId, }, }) - vm := compute.VirtualMachine{ - Location: &targetInstance.location, + props := targetInstance.vm.Properties + props.StorageProfile.DataDisks = disks + vm := compute.VirtualMachineUpdate{ Properties: &compute.VirtualMachineProperties{ - StorageProfile: &compute.StorageProfile{ - DataDisks: disks, - }, + StorageProfile: props.StorageProfile, }, } - poller, err := computeSvc.BeginCreateOrUpdate(ctx, targetInstance.resourceGroup, targetInstance.instanceName, vm, &compute.VirtualMachinesClientBeginCreateOrUpdateOptions{}) + poller, err := computeSvc.BeginUpdate(ctx, targetInstance.resourceGroup, targetInstance.instanceName, vm, &compute.VirtualMachinesClientBeginUpdateOptions{}) if err != nil { return err } start := time.Now() for { log.Debug().Str("disk-name", diskName).Str("elapsed", time.Duration(time.Since(start)).String()).Msg("polling for disk attach") - _, err := poller.Poll(ctx) + _, err = poller.Poll(ctx) if err != nil { return err } - if poller.Done() { + log.Debug().Str("disk-name", diskName).Msg("poller done") break } time.Sleep(5 * time.Second) @@ -268,8 +267,7 @@ func (sc *snapshotCreator) detachDisk(diskName string, targetInstance instanceIn // we stored the disks as they were before attaching the new one in the targetInstance. // we simply use that list which will result in the new disk being detached - vm := compute.VirtualMachine{ - Location: &targetInstance.location, + vm := compute.VirtualMachineUpdate{ Properties: &compute.VirtualMachineProperties{ StorageProfile: &compute.StorageProfile{ DataDisks: targetInstance.vm.Properties.StorageProfile.DataDisks, @@ -277,7 +275,7 @@ func (sc *snapshotCreator) detachDisk(diskName string, targetInstance instanceIn }, } - poller, err := computeSvc.BeginCreateOrUpdate(ctx, targetInstance.resourceGroup, targetInstance.instanceName, vm, &compute.VirtualMachinesClientBeginCreateOrUpdateOptions{}) + poller, err := computeSvc.BeginUpdate(ctx, targetInstance.resourceGroup, targetInstance.instanceName, vm, &compute.VirtualMachinesClientBeginUpdateOptions{}) if err != nil { return err } diff --git a/providers/azure/provider/provider.go b/providers/azure/provider/provider.go index c53becb3dd..f480f5b9c3 100644 --- a/providers/azure/provider/provider.go +++ b/providers/azure/provider/provider.go @@ -5,6 +5,7 @@ package provider import ( "errors" + "fmt" "go.mondoo.com/cnquery/v10/llx" "go.mondoo.com/cnquery/v10/providers-sdk/v1/inventory" @@ -43,6 +44,8 @@ func (s *Service) ParseCLI(req *plugin.ParseCLIReq) (*plugin.ParseCLIRes, error) certificatePath := flags["certificate-path"] certificateSecret := flags["certificate-secret"] skipSnapshotCleanup := flags["skip-snapshot-cleanup"] + skipSnapshotSetup := flags["skip-snapshot-setup"] + lun := flags["lun"] opts := map[string]string{} creds := []*vault.Credential{} @@ -57,8 +60,20 @@ func (s *Service) ParseCLI(req *plugin.ParseCLIReq) (*plugin.ParseCLIRes, error) if len(subscriptionsToExclude.Value) > 0 { opts["subscriptions-exclude"] = string(subscriptionsToExclude.Value) } + if len(lun.Value) > 0 { + opts[azureinstancesnapshot.Lun] = fmt.Sprint(lun.RawData().Value.(int64)) + } // the presence of the flag indicates that we should skip cleanup - if present := skipSnapshotCleanup.RawData().Value.(bool); present { + if skipCleanup := skipSnapshotCleanup.RawData().Value.(bool); skipCleanup { + opts[azureinstancesnapshot.SkipCleanup] = "true" + } + // the presence of the flag indicates that we should skip setup. the disk we're trying to scan + // is already attached. Rely on the lun parameter to give us a hint as to the location of the disk + if skipSetup := skipSnapshotSetup.RawData().Value.(bool); skipSetup { + opts[azureinstancesnapshot.SkipSetup] = "true" + // we cannot detach the disk if we didn't attach it. + // we cannot delete the disk as we do not know it's azure resource id + // explicitly set the cleanup flag to false for clarity opts[azureinstancesnapshot.SkipCleanup] = "true" } if len(clientSecret.Value) > 0 { @@ -221,7 +236,6 @@ func (s *Service) connect(req *plugin.ConnectReq, callback plugin.ProviderCallba } func (s *Service) detect(asset *inventory.Asset, conn shared.AzureConnection) error { - // TODO: what do i put here return nil }