Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Storage: Add reverter to disk device update & test #14703

Merged
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 5 additions & 5 deletions lxd/storage_volumes.go
Original file line number Diff line number Diff line change
Expand Up @@ -1878,11 +1878,13 @@ func storagePoolVolumeTypePostRename(s *state.State, r *http.Request, poolName s
defer revert.Fail()

// Update devices using the volume in instances and profiles.
err = storagePoolVolumeUpdateUsers(s, projectName, pool.Name(), vol, pool.Name(), &newVol)
cleanup, err := storagePoolVolumeUpdateUsers(s, projectName, pool.Name(), vol, pool.Name(), &newVol)
if err != nil {
return response.SmartError(err)
}

revert.Add(cleanup)

// Use an empty operation for this sync response to pass the requestor
op := &operations.Operation{}
op.SetRequestor(r)
Expand Down Expand Up @@ -1919,14 +1921,12 @@ func storagePoolVolumeTypePostMove(s *state.State, r *http.Request, poolName str
defer revert.Fail()

// Update devices using the volume in instances and profiles.
err = storagePoolVolumeUpdateUsers(s, requestProjectName, pool.Name(), vol, newPool.Name(), &newVol)
cleanup, err := storagePoolVolumeUpdateUsers(s, requestProjectName, pool.Name(), vol, newPool.Name(), &newVol)
if err != nil {
return err
}

revert.Add(func() {
MggMuggins marked this conversation as resolved.
Show resolved Hide resolved
_ = storagePoolVolumeUpdateUsers(s, projectName, newPool.Name(), &newVol, pool.Name(), vol)
})
revert.Add(cleanup)

// Provide empty description and nil config to instruct CreateCustomVolumeFromCopy to copy it
// from source volume.
Expand Down
78 changes: 60 additions & 18 deletions lxd/storage_volumes_utils.go
Original file line number Diff line number Diff line change
Expand Up @@ -12,12 +12,17 @@ import (
storagePools "github.com/canonical/lxd/lxd/storage"
"github.com/canonical/lxd/shared"
"github.com/canonical/lxd/shared/api"
"github.com/canonical/lxd/shared/logger"
"github.com/canonical/lxd/shared/revert"
"github.com/canonical/lxd/shared/version"
)

var supportedVolumeTypes = []int{cluster.StoragePoolVolumeTypeContainer, cluster.StoragePoolVolumeTypeVM, cluster.StoragePoolVolumeTypeCustom, cluster.StoragePoolVolumeTypeImage}

func storagePoolVolumeUpdateUsers(s *state.State, projectName string, oldPoolName string, oldVol *api.StorageVolume, newPoolName string, newVol *api.StorageVolume) error {
func storagePoolVolumeUpdateUsers(s *state.State, projectName string, oldPoolName string, oldVol *api.StorageVolume, newPoolName string, newVol *api.StorageVolume) (revert.Hook, error) {
revert := revert.New()
defer revert.Fail()

// Update all instances that are using the volume with a local (non-expanded) device.
err := storagePools.VolumeUsedByInstanceDevices(s, oldPoolName, projectName, oldVol, false, func(dbInst db.InstanceArgs, project api.Project, usedByDevices []string) error {
inst, err := instance.Load(s, dbInst, project)
Expand All @@ -26,15 +31,17 @@ func storagePoolVolumeUpdateUsers(s *state.State, projectName string, oldPoolNam
}

localDevices := inst.LocalDevices()
newDevices := localDevices.Clone()

for _, devName := range usedByDevices {
_, exists := localDevices[devName]
_, exists := newDevices[devName]
if exists {
localDevices[devName]["pool"] = newPoolName
newDevices[devName]["pool"] = newPoolName

if strings.Contains(localDevices[devName]["source"], "/") {
localDevices[devName]["source"] = newVol.Type + "/" + newVol.Name
newDevices[devName]["source"] = newVol.Type + "/" + newVol.Name
} else {
localDevices[devName]["source"] = newVol.Name
newDevices[devName]["source"] = newVol.Name
}
}
}
Expand All @@ -43,7 +50,7 @@ func storagePoolVolumeUpdateUsers(s *state.State, projectName string, oldPoolNam
Architecture: inst.Architecture(),
Description: inst.Description(),
Config: inst.LocalConfig(),
Devices: localDevices,
Devices: newDevices,
Ephemeral: inst.IsEphemeral(),
Profiles: inst.Profiles(),
Project: inst.Project().Name,
Expand All @@ -56,42 +63,77 @@ func storagePoolVolumeUpdateUsers(s *state.State, projectName string, oldPoolNam
return err
}

revert.Add(func() {
err := inst.Update(dbInst, false)
if err != nil {
logger.Error("Failed to revert instance update", logger.Ctx{"project": dbInst.Project, "instance": dbInst.Name, "error": err})
}
})

return nil
})
if err != nil {
return err
return nil, err
}

// Update all profiles that are using the volume with a device.
err = storagePools.VolumeUsedByProfileDevices(s, oldPoolName, projectName, oldVol, func(profileID int64, profile api.Profile, p api.Project, usedByDevices []string) error {
for name, dev := range profile.Devices {
if shared.ValueInSlice(name, usedByDevices) {
dev["pool"] = newPoolName
newDevices := make(map[string]map[string]string, len(profile.Devices))

for devName, dev := range profile.Devices {
for key, val := range dev {
_, exists := newDevices[devName]
if !exists {
newDevices[devName] = make(map[string]string, len(dev))
}

newDevices[devName][key] = val
}

if shared.ValueInSlice(devName, usedByDevices) {
newDevices[devName]["pool"] = newPoolName

if strings.Contains(dev["source"], "/") {
dev["source"] = newVol.Type + "/" + newVol.Name
newDevices[devName]["source"] = newVol.Type + "/" + newVol.Name
} else {
dev["source"] = newVol.Name
newDevices[devName]["source"] = newVol.Name
}
}
}

pUpdate := api.ProfilePut{}
pUpdate.Config = profile.Config
pUpdate.Description = profile.Description
pUpdate.Devices = profile.Devices
pUpdate := api.ProfilePut{
Config: profile.Config,
Description: profile.Description,
Devices: newDevices,
}

err = doProfileUpdate(s, p, profile.Name, profileID, &profile, pUpdate)
if err != nil {
return err
}

revert.Add(func() {
original := api.ProfilePut{
Config: profile.Config,
Description: profile.Description,
Devices: profile.Devices,
}

err := doProfileUpdate(s, p, profile.Name, profileID, &profile, original)
if err != nil {
logger.Error("Failed reverting profile update", logger.Ctx{"project": p.Name, "profile": profile.Name, "error": err})
}
})

return nil
})
if err != nil {
return err
return nil, err
}

return nil
cleanup := revert.Clone().Fail
revert.Success()
return cleanup, nil
}

// storagePoolVolumeUsedByGet returns a list of URL resources that use the volume.
Expand Down
39 changes: 37 additions & 2 deletions test/suites/clustering.sh
Original file line number Diff line number Diff line change
Expand Up @@ -938,12 +938,47 @@ test_clustering_storage() {
! LXD_DIR="${LXD_TWO_DIR}" lxc storage volume rename data web webbaz || false
! LXD_DIR="${LXD_TWO_DIR}" lxc storage volume delete data web || false

# Specifying the --target parameter shows, renames and deletes the
# proper volume.
LXD_DIR="${LXD_TWO_DIR}" lxc init --empty c1 --target node1
LXD_DIR="${LXD_TWO_DIR}" lxc init --empty c2 --target node2
LXD_DIR="${LXD_TWO_DIR}" lxc init --empty c3 --target node2

LXD_DIR="${LXD_TWO_DIR}" lxc config device add c1 web disk pool=data source=web path=/mnt/web
LXD_DIR="${LXD_TWO_DIR}" lxc config device add c2 web disk pool=data source=web path=/mnt/web

# Specifying the --target parameter shows the proper volume.
LXD_DIR="${LXD_TWO_DIR}" lxc storage volume show --target node1 data web | grep -q "location: node1"
LXD_DIR="${LXD_TWO_DIR}" lxc storage volume show --target node2 data web | grep -q "location: node2"

# rename updates the disk devices that refer to the disk
LXD_DIR="${LXD_TWO_DIR}" lxc storage volume rename --target node1 data web webbaz

[ "$(LXD_DIR=${LXD_TWO_DIR} lxc config device get c1 web source)" = "webbaz" ]
[ "$(LXD_DIR=${LXD_TWO_DIR} lxc config device get c2 web source)" = "web" ]

LXD_DIR="${LXD_TWO_DIR}" lxc storage volume rename --target node2 data web webbaz

[ "$(LXD_DIR=${LXD_TWO_DIR} lxc config device get c1 web source)" = "webbaz" ]
[ "$(LXD_DIR=${LXD_TWO_DIR} lxc config device get c2 web source)" = "webbaz" ]

LXD_DIR="${LXD_TWO_DIR}" lxc config device remove c1 web

# renaming a local storage volume when attached via profile fails
LXD_DIR="${LXD_TWO_DIR}" lxc profile create stovol-webbaz
LXD_DIR="${LXD_TWO_DIR}" lxc profile device add stovol-webbaz webbaz disk pool=data source=webbaz path=/mnt/web

LXD_DIR="${LXD_TWO_DIR}" lxc profile add c3 stovol-webbaz # c2 and c3 both have webbaz attached

! LXD_DIR="${LXD_TWO_DIR}" lxc storage volume rename --target node2 data webbaz webbaz2 || false

[ "$(LXD_DIR=${LXD_TWO_DIR} lxc profile device get stovol-webbaz webbaz source)" = "webbaz" ]
[ "$(LXD_DIR=${LXD_TWO_DIR} lxc config device get c2 web source)" = "webbaz" ]

LXD_DIR="${LXD_TWO_DIR}" lxc profile remove c3 stovol-webbaz
LXD_DIR="${LXD_TWO_DIR}" lxc profile delete stovol-webbaz

# clean up
LXD_DIR="${LXD_TWO_DIR}" lxc delete c1 c2 c3

LXD_DIR="${LXD_TWO_DIR}" lxc storage volume delete --target node2 data webbaz

# Since now there's only one volume in the pool left named webbaz,
Expand Down
Loading