From 88b2a2a4d8113873342e474a5e13ef62170b225f Mon Sep 17 00:00:00 2001 From: Mark Bolton Date: Tue, 3 Dec 2024 20:53:40 -0800 Subject: [PATCH 1/6] api: Add project_daemon_storage extension Signed-off-by: Mark Bolton --- doc/api-extensions.md | 4 ++++ shared/version/api.go | 1 + 2 files changed, 5 insertions(+) diff --git a/doc/api-extensions.md b/doc/api-extensions.md index 41c1c8a6560b..90295ab77ae8 100644 --- a/doc/api-extensions.md +++ b/doc/api-extensions.md @@ -2535,3 +2535,7 @@ Adds a new {config:option}`device-unix-hotplug-device-conf:ownership.inherit` co ## `unix_device_hotplug_subsystem_device_option` Adds a new {config:option}`device-unix-hotplug-device-conf:subsystem` configuration option for `unix-hotplug` devices. This adds support for detecting `unix-hotplug` devices by subsystem, and can be used in conjunction with {config:option}`device-unix-hotplug-device-conf:productid` and {config:option}`device-unix-hotplug-device-conf:vendorid`. + +## `project_daemon_storage` +This introduces two new configuration keys `storage.images_volume` and `storage.backups_volume` to the project level to allow for a storage volume +on an existing pool be used for storing the project-wide images and backups artifacts. diff --git a/shared/version/api.go b/shared/version/api.go index 4d84065f601f..806e5b4d2aab 100644 --- a/shared/version/api.go +++ b/shared/version/api.go @@ -427,6 +427,7 @@ var APIExtensions = []string{ "metadata_configuration_scope", "unix_device_hotplug_ownership_inherit", "unix_device_hotplug_subsystem_device_option", + "project_daemon_storage", } // APIExtensionsCount returns the number of available API extensions. From 0e02bf68b02db75a45ea03950cf61e660778227e Mon Sep 17 00:00:00 2001 From: Mark Bolton Date: Tue, 3 Dec 2024 20:54:34 -0800 Subject: [PATCH 2/6] lxd: Make daemon storage able to handle project-level granularity Signed-off-by: Mark Bolton --- lxd/daemon_storage.go | 113 ++++++++++++++++++++++++++++-------------- 1 file changed, 75 insertions(+), 38 deletions(-) diff --git a/lxd/daemon_storage.go b/lxd/daemon_storage.go index ea7bd64535b8..f11b9a029d66 100644 --- a/lxd/daemon_storage.go +++ b/lxd/daemon_storage.go @@ -16,10 +16,9 @@ import ( storagePools "github.com/canonical/lxd/lxd/storage" storageDrivers "github.com/canonical/lxd/lxd/storage/drivers" "github.com/canonical/lxd/shared" - "github.com/canonical/lxd/shared/api" ) -func daemonStorageVolumesUnmount(s *state.State) error { +func daemonStorageVolumesUnmount(s *state.State, projectName string) error { var storageBackups string var storageImages string @@ -51,7 +50,7 @@ func daemonStorageVolumesUnmount(s *state.State) error { } // Mount volume. - _, err = pool.UnmountVolume(api.ProjectDefaultName, volumeName, storageDrivers.VolumeTypeCustom, nil) + _, err = pool.UnmountVolume(projectName, volumeName, storageDrivers.VolumeTypeCustom, nil) if err != nil { return fmt.Errorf("Failed to unmount storage volume %q: %w", source, err) } @@ -76,7 +75,7 @@ func daemonStorageVolumesUnmount(s *state.State) error { return nil } -func daemonStorageMount(s *state.State) error { +func daemonStorageMount(s *state.State, projectName string) error { var storageBackups string var storageImages string err := s.DB.Node.Transaction(context.TODO(), func(ctx context.Context, tx *db.NodeTx) error { @@ -107,7 +106,7 @@ func daemonStorageMount(s *state.State) error { } // Mount volume. - _, err = pool.MountVolume(api.ProjectDefaultName, volumeName, storageDrivers.VolumeTypeCustom, nil) + _, err = pool.MountVolume(projectName, volumeName, storageDrivers.VolumeTypeCustom, nil) if err != nil { return fmt.Errorf("Failed to mount storage volume %q: %w", source, err) } @@ -144,7 +143,7 @@ func daemonStorageSplitVolume(volume string) (poolName string, volumeName string return poolName, volumeName, nil } -func daemonStorageValidate(s *state.State, target string) error { +func daemonStorageValidate(s *state.State, projectName string, target string) error { // Check syntax. if target == "" { return nil @@ -166,18 +165,18 @@ func daemonStorageValidate(s *state.State, target string) error { } // Confirm volume exists. - dbVol, err := tx.GetStoragePoolVolume(ctx, poolID, api.ProjectDefaultName, cluster.StoragePoolVolumeTypeCustom, volumeName, true) + dbVol, err := tx.GetStoragePoolVolume(ctx, poolID, projectName, cluster.StoragePoolVolumeTypeCustom, volumeName, true) if err != nil { - return fmt.Errorf("Failed loading storage volume %q in %q project: %w", target, api.ProjectDefaultName, err) + return fmt.Errorf("Failed loading storage volume %q in %q project: %w", target, projectName, err) } if dbVol.ContentType != cluster.StoragePoolVolumeContentTypeNameFS { - return fmt.Errorf("Storage volume %q in %q project is not filesystem content type", target, api.ProjectDefaultName) + return fmt.Errorf("Storage volume %q in %q project is not filesystem content type", target, projectName) } - snapshots, err = tx.GetLocalStoragePoolVolumeSnapshotsWithType(ctx, api.ProjectDefaultName, volumeName, cluster.StoragePoolVolumeTypeCustom, poolID) + snapshots, err = tx.GetLocalStoragePoolVolumeSnapshotsWithType(ctx, projectName, volumeName, cluster.StoragePoolVolumeTypeCustom, poolID) if err != nil { - return fmt.Errorf("Unable to load storage volume snapshots %q in %q project: %w", target, api.ProjectDefaultName, err) + return fmt.Errorf("Unable to load storage volume snapshots %q in %q project: %w", target, projectName, err) } return nil @@ -196,17 +195,17 @@ func daemonStorageValidate(s *state.State, target string) error { } // Mount volume. - _, err = pool.MountVolume(api.ProjectDefaultName, volumeName, storageDrivers.VolumeTypeCustom, nil) + _, err = pool.MountVolume(projectName, volumeName, storageDrivers.VolumeTypeCustom, nil) if err != nil { return fmt.Errorf("Failed to mount storage volume %q: %w", target, err) } defer func() { - _, _ = pool.UnmountVolume(api.ProjectDefaultName, volumeName, storageDrivers.VolumeTypeCustom, nil) + _, _ = pool.UnmountVolume(projectName, volumeName, storageDrivers.VolumeTypeCustom, nil) }() // Validate volume is empty (ignore lost+found). - volStorageName := project.StorageVolume(api.ProjectDefaultName, volumeName) + volStorageName := project.StorageVolume(projectName, volumeName) mountpoint := storageDrivers.GetVolumeMountPath(poolName, storageDrivers.VolumeTypeCustom, volStorageName) entries, err := os.ReadDir(mountpoint) @@ -233,8 +232,11 @@ func daemonStorageValidate(s *state.State, target string) error { return nil } -func daemonStorageMove(s *state.State, storageType string, target string) error { - destPath := shared.VarPath(storageType) +func daemonStorageMove(s *state.State, storageType string, projectName string, target string, isDaemon bool) error { + destPath := shared.VarPath(storageType, "daemon") + if !isDaemon { + destPath = shared.VarPath(storageType, fmt.Sprintf("project_%s", projectName)) + } // Track down the current storage. var sourcePool string @@ -285,6 +287,11 @@ func daemonStorageMove(s *state.State, storageType string, target string) error return fmt.Errorf("Failed to delete storage symlink at %q: %w", destPath, err) } + // If unsetting the target for project-level storage selection, move to daemon storage. + if !isDaemon { + destPath = shared.VarPath(storageType, "daemon", fmt.Sprintf("project_%s", projectName)) + } + // Re-create as a directory. err = os.MkdirAll(destPath, 0700) if err != nil { @@ -324,13 +331,13 @@ func daemonStorageMove(s *state.State, storageType string, target string) error } // Mount volume. - _, err = pool.MountVolume(api.ProjectDefaultName, volumeName, storageDrivers.VolumeTypeCustom, nil) + _, err = pool.MountVolume(projectName, volumeName, storageDrivers.VolumeTypeCustom, nil) if err != nil { return fmt.Errorf("Failed to mount storage volume %q: %w", target, err) } // Set ownership & mode. - volStorageName := project.StorageVolume(api.ProjectDefaultName, volumeName) + volStorageName := project.StorageVolume(projectName, volumeName) mountpoint := storageDrivers.GetVolumeMountPath(poolName, storageDrivers.VolumeTypeCustom, volStorageName) destPath = mountpoint @@ -345,17 +352,31 @@ func daemonStorageMove(s *state.State, storageType string, target string) error } // Handle changes. - if sourcePath != shared.VarPath(storageType) { - // Remove the symlink. - err := os.Remove(shared.VarPath(storageType)) - if err != nil { - return fmt.Errorf("Failed to remove the new symlink at %q: %w", shared.VarPath(storageType), err) - } + if !strings.HasPrefix(sourcePath, shared.VarPath(storageType, "daemon")) { + if isDaemon { + // Remove the symlink. + err := os.Remove(shared.VarPath(storageType, "daemon")) + if err != nil { + return fmt.Errorf("Failed to remove the new symlink at %q: %w", shared.VarPath(storageType, "daemon"), err) + } - // Create the new symlink. - err = os.Symlink(destPath, shared.VarPath(storageType)) - if err != nil { - return fmt.Errorf("Failed to create the new symlink at %q: %w", shared.VarPath(storageType), err) + // Create the new symlink. + err = os.Symlink(destPath, shared.VarPath(storageType, "daemon")) + if err != nil { + return fmt.Errorf("Failed to create the new symlink at %q: %w", shared.VarPath(storageType, "daemon"), err) + } + } else { + // Remove the symlink. + err := os.Remove(shared.VarPath(storageType, fmt.Sprintf("project_%s", projectName))) + if err != nil { + return fmt.Errorf("Failed to remove the new symlink at %q: %w", shared.VarPath(storageType, fmt.Sprintf("project_%s", projectName)), err) + } + + // Create the new symlink. + err = os.Symlink(destPath, shared.VarPath(storageType, fmt.Sprintf("project_%s", projectName))) + if err != nil { + return fmt.Errorf("Failed to create the new symlink at %q: %w", shared.VarPath(storageType, fmt.Sprintf("project_%s", projectName)), err) + } } // Move the data across. @@ -379,18 +400,34 @@ func daemonStorageMove(s *state.State, storageType string, target string) error return nil } - sourcePath = shared.VarPath(storageType) + ".temp" + if isDaemon { + sourcePath = shared.VarPath(storageType, "daemon") + ".temp" - // Rename the existing storage. - err = os.Rename(shared.VarPath(storageType), sourcePath) - if err != nil { - return fmt.Errorf("Failed to rename existing storage %q: %w", shared.VarPath(storageType), err) - } + // Rename the existing storage. + err = os.Rename(shared.VarPath(storageType, "daemon"), sourcePath) + if err != nil { + return fmt.Errorf("Failed to rename existing storage %q: %w", shared.VarPath(storageType, "daemon"), err) + } - // Create the new symlink. - err = os.Symlink(destPath, shared.VarPath(storageType)) - if err != nil { - return fmt.Errorf("Failed to create the new symlink at %q: %w", shared.VarPath(storageType), err) + // Create the new symlink. + err = os.Symlink(destPath, shared.VarPath(storageType, "daemon")) + if err != nil { + return fmt.Errorf("Failed to create the new symlink at %q: %w", shared.VarPath(storageType, "daemon"), err) + } + } else { + sourcePath = shared.VarPath(storageType, "daemon") + ".temp" + + // Rename the existing storage. + err = os.Rename(shared.VarPath(storageType, fmt.Sprintf("project_%s", projectName)), sourcePath) + if err != nil { + return fmt.Errorf("Failed to rename existing storage %q: %w", shared.VarPath(storageType, fmt.Sprintf("project_%s", projectName)), err) + } + + // Create the new symlink. + err = os.Symlink(destPath, shared.VarPath(storageType, fmt.Sprintf("project_%s", projectName))) + if err != nil { + return fmt.Errorf("Failed to create the new symlink at %q: %w", shared.VarPath(storageType, fmt.Sprintf("project_%s", projectName)), err) + } } // Move the data across. From b614932284943abfc99e7794beaaac48a325c617 Mon Sep 17 00:00:00 2001 From: Mark Bolton Date: Tue, 3 Dec 2024 20:55:02 -0800 Subject: [PATCH 3/6] lxd: Update use of daemon_storage functions Signed-off-by: Mark Bolton --- lxd/api_1.0.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/lxd/api_1.0.go b/lxd/api_1.0.go index 8d162bc04277..554b3d89c284 100644 --- a/lxd/api_1.0.go +++ b/lxd/api_1.0.go @@ -636,14 +636,14 @@ func doAPI10Update(d *Daemon, r *http.Request, req api.ServerPut, patch bool) re // Validate the storage volumes if nodeValues["storage.backups_volume"] != nil && nodeValues["storage.backups_volume"] != newNodeConfig.StorageBackupsVolume() { - err := daemonStorageValidate(s, nodeValues["storage.backups_volume"].(string)) + err := daemonStorageValidate(s, api.ProjectDefaultName, nodeValues["storage.backups_volume"].(string)) if err != nil { return fmt.Errorf("Failed validation of %q: %w", "storage.backups_volume", err) } } if nodeValues["storage.images_volume"] != nil && nodeValues["storage.images_volume"] != newNodeConfig.StorageImagesVolume() { - err := daemonStorageValidate(s, nodeValues["storage.images_volume"].(string)) + err := daemonStorageValidate(s, api.ProjectDefaultName, nodeValues["storage.images_volume"].(string)) if err != nil { return fmt.Errorf("Failed validation of %q: %w", "storage.images_volume", err) } @@ -939,7 +939,7 @@ func doAPI10UpdateTriggers(d *Daemon, nodeChanged, clusterChanged map[string]str value, ok = nodeChanged["storage.backups_volume"] if ok { - err := daemonStorageMove(s, "backups", value) + err := daemonStorageMove(s, "backups", api.ProjectDefaultName, value, true) if err != nil { return err } @@ -947,7 +947,7 @@ func doAPI10UpdateTriggers(d *Daemon, nodeChanged, clusterChanged map[string]str value, ok = nodeChanged["storage.images_volume"] if ok { - err := daemonStorageMove(s, "images", value) + err := daemonStorageMove(s, "images", api.ProjectDefaultName, value, true) if err != nil { return err } From eb7d1797962a63c1dab99f06abc8ddad2c16f444 Mon Sep 17 00:00:00 2001 From: Mark Bolton Date: Tue, 3 Dec 2024 20:55:32 -0800 Subject: [PATCH 4/6] lxd: Allow configuration of project-level images/backups storage Signed-off-by: Mark Bolton --- lxd/api_project.go | 74 ++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 74 insertions(+) diff --git a/lxd/api_project.go b/lxd/api_project.go index 15ed0568ba49..e3d3dc9928d1 100644 --- a/lxd/api_project.go +++ b/lxd/api_project.go @@ -9,6 +9,9 @@ import ( "net" "net/http" "net/url" + "os" + "path/filepath" + "slices" "strings" "github.com/gorilla/mux" @@ -331,6 +334,39 @@ func projectsPost(d *Daemon, r *http.Request) response.Response { return response.SmartError(fmt.Errorf("Failed creating project %q: %w", project.Name, err)) } + // Create images and backups directories, and symlink to the daemon path by default. + imageDaemonPath := shared.VarPath("images", "daemon", fmt.Sprintf("project_%s", project.Name)) + imageSymPath := shared.VarPath("images", fmt.Sprintf("project_%s", project.Name)) + err = os.MkdirAll(imageDaemonPath, 0700) + if err != nil { + return response.InternalError(fmt.Errorf("Failed to create directory %q: %w", imageDaemonPath, err)) + } + err = os.Symlink(imageDaemonPath, imageSymPath) + if err != nil { + return response.InternalError(fmt.Errorf("Failed to create directory %q: %w", imageSymPath, err)) + } + + backupsDaemonPath := shared.VarPath("backups", "daemon", fmt.Sprintf("project_%s", project.Name)) + backupsSymPath := shared.VarPath("backups", fmt.Sprintf("project_%s", project.Name)) + backupsCustomPath := filepath.Join(backupsSymPath, "custom") + backupsInstancesPath := filepath.Join(backupsSymPath, "instances") + err = os.MkdirAll(backupsDaemonPath, 0700) + if err != nil { + return response.InternalError(fmt.Errorf("Failed to create directory %q: %w", backupsDaemonPath, err)) + } + err = os.Symlink(backupsDaemonPath, backupsSymPath) + if err != nil { + return response.InternalError(fmt.Errorf("Failed to create directory %q: %w", backupsSymPath, err)) + } + err = os.MkdirAll(backupsCustomPath, 0700) + if err != nil { + return response.InternalError(fmt.Errorf("Failed to create directory %q: %w", backupsCustomPath, err)) + } + err = os.MkdirAll(backupsInstancesPath, 0700) + if err != nil { + return response.InternalError(fmt.Errorf("Failed to create directory %q: %w", backupsInstancesPath, err)) + } + requestor := request.CreateRequestor(r) lc := lifecycle.ProjectCreated.Event(project.Name, requestor, nil) s.Events.SendLifecycle(project.Name, lc) @@ -687,6 +723,30 @@ func projectChange(s *state.State, project *api.Project, req api.ProjectPut) res return response.BadRequest(err) } + // Validate the storage volumes. + if req.Config["storage.backups_volume"] != "" && req.Config["storage.backups_volume"] != project.Config["storage.backups_volume"] { + err := daemonStorageValidate(s, project.Name, req.Config["storage.backups_volume"]) + if err != nil { + return response.SmartError(fmt.Errorf("Failed validation of %q: %w", "storage.backups_volume", err)) + } + } + + if req.Config["storage.images_volume"] != "" && req.Config["storage.images_volume"] != project.Config["storage.images_volume"] { + err := daemonStorageValidate(s, project.Name, req.Config["storage.images_volume"]) + if err != nil { + return response.SmartError(fmt.Errorf("Failed validation of %q: %w", "storage.images_volume", err)) + } + } + + // Move storage if necessary. + if slices.Contains(configChanged, "storage.backups_volume") { + daemonStorageMove(s, "backups", project.Name, req.Config["storage.backups_volume"], false) + } + + if slices.Contains(configChanged, "storage.images_volume") { + daemonStorageMove(s, "images", project.Name, req.Config["storage.images_volume"], false) + } + // Update the database entry. err = s.DB.Cluster.Transaction(context.TODO(), func(ctx context.Context, tx *db.ClusterTx) error { err := limits.AllowProjectUpdate(s.GlobalConfig, tx, project.Name, req.Config, configChanged) @@ -1381,6 +1441,20 @@ func projectValidateConfig(s *state.State, config map[string]string) error { // defaultdesc: `block` // shortdesc: Whether to prevent creating instance or volume snapshots "restricted.snapshots": isEitherAllowOrBlock, + // lxdmeta:generate(entities=server; group=miscellaneous; key=storage.backups_volume) + // Specify the volume using the syntax `POOL/VOLUME`. + // --- + // type: string + // scope: local + // shortdesc: Volume to use to store backup tarballs + "storage.backups_volume": validate.Optional(), + // lxdmeta:generate(entities=server; group=miscellaneous; key=storage.images_volume) + // Specify the volume using the syntax `POOL/VOLUME`. + // --- + // type: string + // scope: local + // shortdesc: Volume to use to store the image tarballs + "storage.images_volume": validate.Optional(), } // Add the storage pool keys. From 8cd2b7b7daa5d4839156d79db008b96cacc4ac8b Mon Sep 17 00:00:00 2001 From: Mark Bolton Date: Tue, 3 Dec 2024 20:56:08 -0800 Subject: [PATCH 5/6] lxd/sys: Create new directories for project-level distinction Signed-off-by: Mark Bolton --- lxd/sys/fs.go | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/lxd/sys/fs.go b/lxd/sys/fs.go index a5f4b55b22fb..a35387cdaf0c 100644 --- a/lxd/sys/fs.go +++ b/lxd/sys/fs.go @@ -32,6 +32,7 @@ func (s *OS) initDirs() error { }{ {s.VarDir, 0711}, {filepath.Join(s.VarDir, "backups"), 0700}, + {filepath.Join(s.VarDir, "backups", "daemon"), 0700}, {s.CacheDir, 0700}, // containers is 0711 because liblxc needs to traverse dir to get to each container. {filepath.Join(s.VarDir, "containers"), 0711}, @@ -78,8 +79,8 @@ func (s *OS) initStorageDirs() error { path string mode os.FileMode }{ - {filepath.Join(s.VarDir, "backups", "custom"), 0700}, - {filepath.Join(s.VarDir, "backups", "instances"), 0700}, + {filepath.Join(s.VarDir, "backups", "daemon", "custom"), 0700}, + {filepath.Join(s.VarDir, "backups", "daemon", "instances"), 0700}, } for _, dir := range dirs { From d5efb5e231714f70c825a4d53bd9aaf7161d953b Mon Sep 17 00:00:00 2001 From: Mark Bolton Date: Tue, 3 Dec 2024 20:57:07 -0800 Subject: [PATCH 6/6] lxd: Handle new dir struction for images and backups Signed-off-by: Mark Bolton --- lxd/backup.go | 10 ++--- lxd/backup/backup_instance.go | 17 ++++---- lxd/backup/backup_volume.go | 13 +++--- lxd/daemon.go | 4 +- lxd/daemon_images.go | 4 +- lxd/images.go | 68 +++++++++++++++++------------ lxd/instance.go | 2 +- lxd/instance/drivers/driver_lxc.go | 6 +-- lxd/instance/drivers/driver_qemu.go | 6 +-- lxd/instance_backup.go | 6 +-- lxd/lifecycle/instance_backup.go | 4 +- lxd/lifecycle/instance_log.go | 4 +- lxd/storage/backend_lxd.go | 6 +-- lxd/storage_volumes_backup.go | 2 +- 14 files changed, 85 insertions(+), 67 deletions(-) diff --git a/lxd/backup.go b/lxd/backup.go index 3dcd7ca67fc9..c1e95c9ea27c 100644 --- a/lxd/backup.go +++ b/lxd/backup.go @@ -101,7 +101,7 @@ func backupCreate(s *state.State, args db.InstanceBackup, sourceInst instance.In } // Create the target path if needed. - backupsPath := shared.VarPath("backups", "instances", project.Instance(sourceInst.Project().Name, sourceInst.Name())) + backupsPath := shared.VarPath("backups", fmt.Sprintf("project_%s", op.Project()), "instances", project.Instance(sourceInst.Project().Name, sourceInst.Name())) if !shared.PathExists(backupsPath) { err := os.MkdirAll(backupsPath, 0700) if err != nil { @@ -111,7 +111,7 @@ func backupCreate(s *state.State, args db.InstanceBackup, sourceInst instance.In revert.Add(func() { _ = os.Remove(backupsPath) }) } - target := shared.VarPath("backups", "instances", project.Instance(sourceInst.Project().Name, b.Name())) + target := shared.VarPath("backups", fmt.Sprintf("project_%s", op.Project()), "instances", project.Instance(sourceInst.Project().Name, b.Name())) // Setup the tarball writer. l.Debug("Opening backup tarball for writing", logger.Ctx{"path": target}) @@ -373,7 +373,7 @@ func pruneExpiredInstanceBackups(ctx context.Context, s *state.State) error { } instBackup := backup.NewInstanceBackup(s, inst, b.ID, b.Name, b.CreationDate, b.ExpiryDate, b.InstanceOnly, b.OptimizedStorage) - err = instBackup.Delete() + err = instBackup.Delete(inst.Project().Name) if err != nil { return fmt.Errorf("Error deleting instance backup %q: %w", b.Name, err) } @@ -437,7 +437,7 @@ func volumeBackupCreate(s *state.State, args db.StoragePoolVolumeBackup, project } // Create the target path if needed. - backupsPath := shared.VarPath("backups", "custom", pool.Name(), project.StorageVolume(projectName, volumeName)) + backupsPath := shared.VarPath("backups", fmt.Sprintf("project_%s", projectName), "custom", pool.Name(), project.StorageVolume(projectName, volumeName)) if !shared.PathExists(backupsPath) { err := os.MkdirAll(backupsPath, 0700) if err != nil { @@ -447,7 +447,7 @@ func volumeBackupCreate(s *state.State, args db.StoragePoolVolumeBackup, project revert.Add(func() { _ = os.Remove(backupsPath) }) } - target := shared.VarPath("backups", "custom", pool.Name(), project.StorageVolume(projectName, backupRow.Name)) + target := shared.VarPath("backups", fmt.Sprintf("project_%s", projectName), "custom", pool.Name(), project.StorageVolume(projectName, backupRow.Name)) // Setup the tarball writer. l.Debug("Opening backup tarball for writing", logger.Ctx{"path": target}) diff --git a/lxd/backup/backup_instance.go b/lxd/backup/backup_instance.go index 62ea6218cf39..3ec9e218e5bb 100644 --- a/lxd/backup/backup_instance.go +++ b/lxd/backup/backup_instance.go @@ -2,6 +2,7 @@ package backup import ( "context" + "fmt" "os" "strings" "time" @@ -58,17 +59,17 @@ func (b *InstanceBackup) Instance() Instance { } // Rename renames an instance backup. -func (b *InstanceBackup) Rename(newName string) error { - oldBackupPath := shared.VarPath("backups", "instances", project.Instance(b.instance.Project().Name, b.name)) - newBackupPath := shared.VarPath("backups", "instances", project.Instance(b.instance.Project().Name, newName)) +func (b *InstanceBackup) Rename(newName string, projectName string) error { + oldBackupPath := shared.VarPath("backups", fmt.Sprintf("project_%s", projectName), "instances", project.Instance(b.instance.Project().Name, b.name)) + newBackupPath := shared.VarPath("backups", fmt.Sprintf("project_%s", projectName), "instances", project.Instance(b.instance.Project().Name, newName)) // Extract the old and new parent backup paths from the old and new backup names rather than use // instance.Name() as this may be in flux if the instance itself is being renamed, whereas the relevant // instance name is encoded into the backup names. oldParentName, _, _ := api.GetParentAndSnapshotName(b.name) - oldParentBackupsPath := shared.VarPath("backups", "instances", project.Instance(b.instance.Project().Name, oldParentName)) + oldParentBackupsPath := shared.VarPath("backups", fmt.Sprintf("project_%s", projectName), "instances", project.Instance(b.instance.Project().Name, oldParentName)) newParentName, _, _ := api.GetParentAndSnapshotName(newName) - newParentBackupsPath := shared.VarPath("backups", "instances", project.Instance(b.instance.Project().Name, newParentName)) + newParentBackupsPath := shared.VarPath("backups", fmt.Sprintf("project_%s", projectName), "instances", project.Instance(b.instance.Project().Name, newParentName)) // Create the new backup path if doesn't exist. if !shared.PathExists(newParentBackupsPath) { @@ -108,8 +109,8 @@ func (b *InstanceBackup) Rename(newName string) error { } // Delete removes an instance backup. -func (b *InstanceBackup) Delete() error { - backupPath := shared.VarPath("backups", "instances", project.Instance(b.instance.Project().Name, b.name)) +func (b *InstanceBackup) Delete(projectName string) error { + backupPath := shared.VarPath("backups", fmt.Sprintf("project_%s", projectName), "instances", project.Instance(b.instance.Project().Name, b.name)) // Delete the on-disk data. if shared.PathExists(backupPath) { @@ -120,7 +121,7 @@ func (b *InstanceBackup) Delete() error { } // Check if we can remove the instance directory. - backupsPath := shared.VarPath("backups", "instances", project.Instance(b.instance.Project().Name, b.instance.Name())) + backupsPath := shared.VarPath("backups", fmt.Sprintf("project_%s", projectName), "instances", project.Instance(b.instance.Project().Name, b.instance.Name())) empty, _ := shared.PathIsEmpty(backupsPath) if empty { err := os.Remove(backupsPath) diff --git a/lxd/backup/backup_volume.go b/lxd/backup/backup_volume.go index add6b6bdf758..a0d95113bbdd 100644 --- a/lxd/backup/backup_volume.go +++ b/lxd/backup/backup_volume.go @@ -2,6 +2,7 @@ package backup import ( "context" + "fmt" "os" "strings" "time" @@ -54,16 +55,16 @@ func (b *VolumeBackup) OptimizedStorage() bool { // Rename renames a volume backup. func (b *VolumeBackup) Rename(newName string) error { - oldBackupPath := shared.VarPath("backups", "custom", b.poolName, project.StorageVolume(b.projectName, b.name)) - newBackupPath := shared.VarPath("backups", "custom", b.poolName, project.StorageVolume(b.projectName, newName)) + oldBackupPath := shared.VarPath("backups", fmt.Sprintf("project_%s", b.projectName), "custom", b.poolName, project.StorageVolume(b.projectName, b.name)) + newBackupPath := shared.VarPath("backups", fmt.Sprintf("project_%s", b.projectName), "custom", b.poolName, project.StorageVolume(b.projectName, newName)) // Extract the old and new parent backup paths from the old and new backup names rather than use // instance.Name() as this may be in flux if the instance itself is being renamed, whereas the relevant // instance name is encoded into the backup names. oldParentName, _, _ := api.GetParentAndSnapshotName(b.name) - oldParentBackupsPath := shared.VarPath("backups", "custom", b.poolName, project.StorageVolume(b.projectName, oldParentName)) + oldParentBackupsPath := shared.VarPath("backups", fmt.Sprintf("project_%s", b.projectName), "custom", b.poolName, project.StorageVolume(b.projectName, oldParentName)) newParentName, _, _ := api.GetParentAndSnapshotName(newName) - newParentBackupsPath := shared.VarPath("backups", "custom", b.poolName, project.StorageVolume(b.projectName, newParentName)) + newParentBackupsPath := shared.VarPath("backups", fmt.Sprintf("project_%s", b.projectName), "custom", b.poolName, project.StorageVolume(b.projectName, newParentName)) revert := revert.New() defer revert.Fail() @@ -107,7 +108,7 @@ func (b *VolumeBackup) Rename(newName string) error { // Delete removes a volume backup. func (b *VolumeBackup) Delete() error { - backupPath := shared.VarPath("backups", "custom", b.poolName, project.StorageVolume(b.projectName, b.name)) + backupPath := shared.VarPath("backups", fmt.Sprintf("project_%s", b.projectName), "custom", b.poolName, project.StorageVolume(b.projectName, b.name)) // Delete the on-disk data. if shared.PathExists(backupPath) { err := os.RemoveAll(backupPath) @@ -117,7 +118,7 @@ func (b *VolumeBackup) Delete() error { } // Check if we can remove the volume directory. - backupsPath := shared.VarPath("backups", "custom", b.poolName, project.StorageVolume(b.projectName, b.volumeName)) + backupsPath := shared.VarPath("backups", fmt.Sprintf("project_%s", b.projectName), "custom", b.poolName, project.StorageVolume(b.projectName, b.volumeName)) empty, _ := shared.PathIsEmpty(backupsPath) if empty { err := os.Remove(backupsPath) diff --git a/lxd/daemon.go b/lxd/daemon.go index afaf4e8bf93c..17a1ae87ccb9 100644 --- a/lxd/daemon.go +++ b/lxd/daemon.go @@ -1506,7 +1506,7 @@ func (d *Daemon) init() error { // Mount any daemon storage volumes. logger.Infof("Initializing daemon storage mounts") - err = daemonStorageMount(d.State()) + err = daemonStorageMount(d.State(), api.ProjectDefaultName) if err != nil { return err } @@ -1974,7 +1974,7 @@ func (d *Daemon) Stop(ctx context.Context, sig os.Signal) error { logger.Info("Stopping daemon storage volumes") done := make(chan struct{}) go func() { - err := daemonStorageVolumesUnmount(s) + err := daemonStorageVolumesUnmount(s, api.ProjectDefaultName) if err != nil { logger.Error("Failed to unmount image and backup volumes", logger.Ctx{"err": err}) } diff --git a/lxd/daemon_images.go b/lxd/daemon_images.go index ea8fe411e444..09ff379110ec 100644 --- a/lxd/daemon_images.go +++ b/lxd/daemon_images.go @@ -321,7 +321,7 @@ func ImageDownload(r *http.Request, s *state.State, op *operations.Operation, ar logger.Info("Downloading image", ctxMap) // Cleanup any leftover from a past attempt - destDir := shared.VarPath("images") + destDir := shared.VarPath("images", fmt.Sprintf("project_%s", args.ProjectName)) destName := filepath.Join(destDir, fp) failure := true @@ -408,7 +408,7 @@ func ImageDownload(r *http.Request, s *state.State, op *operations.Operation, ar ProgressHandler: progress, Canceler: canceler, DeltaSourceRetriever: func(fingerprint string, file string) string { - path := shared.VarPath("images", fmt.Sprintf("%s.%s", fingerprint, file)) + path := shared.VarPath("images", fmt.Sprintf("project_%s", args.ProjectName), fmt.Sprintf("%s.%s", fingerprint, file)) if shared.PathExists(path) { return path } diff --git a/lxd/images.go b/lxd/images.go index 98b726ecf95b..9620e153f5fb 100644 --- a/lxd/images.go +++ b/lxd/images.go @@ -304,7 +304,7 @@ func compressFile(compress string, infile io.Reader, outfile io.Writer) error { * This function takes a container or snapshot from the local image server and * exports it as an image. */ -func imgPostInstanceInfo(s *state.State, r *http.Request, req api.ImagesPost, op *operations.Operation, builddir string, budget int64) (*api.Image, error) { +func imgPostInstanceInfo(s *state.State, r *http.Request, req api.ImagesPost, op *operations.Operation, builddir string, budget int64, project string) (*api.Image, error) { info := api.Image{} info.Properties = map[string]string{} projectName := request.ProjectParam(r) @@ -492,7 +492,7 @@ func imgPostInstanceInfo(s *state.State, r *http.Request, req api.ImagesPost, op } /* rename the file to the expected name so our caller can use it */ - finalName := shared.VarPath("images", info.Fingerprint) + finalName := shared.VarPath("images", fmt.Sprintf("project_%s", project), info.Fingerprint) err = shared.FileMove(imageFile.Name(), finalName) if err != nil { return nil, err @@ -820,7 +820,7 @@ func getImgPostInfo(s *state.State, r *http.Request, builddir string, project st info.Type = imageType } - imgfname := shared.VarPath("images", info.Fingerprint) + imgfname := shared.VarPath("images", fmt.Sprintf("project_%s", project), info.Fingerprint) err = shared.FileMove(imageTmpFilename, imgfname) if err != nil { l.Error("Failed to move the image tarfile", logger.Ctx{ @@ -831,7 +831,7 @@ func getImgPostInfo(s *state.State, r *http.Request, builddir string, project st } if rootfsTmpFilename != "" { - rootfsfname := shared.VarPath("images", info.Fingerprint+".rootfs") + rootfsfname := shared.VarPath("images", fmt.Sprintf("project_%s", project), info.Fingerprint+".rootfs") err = shared.FileMove(rootfsTmpFilename, rootfsfname) if err != nil { l.Error("Failed to move the rootfs tarfile", logger.Ctx{ @@ -1238,7 +1238,7 @@ func imagesPost(d *Daemon, r *http.Request) response.Response { } else { /* Processing image creation from container */ imagePublishLock.Lock() - info, err = imgPostInstanceInfo(s, r, req, op, builddir, budget) + info, err = imgPostInstanceInfo(s, r, req, op, builddir, budget, projectName) imagePublishLock.Unlock() } } @@ -1934,6 +1934,7 @@ func autoUpdateImages(ctx context.Context, s *state.State) error { var deleteIDs []int var newImage *api.Image + var newImageProject string for _, image := range images { imgProject := image.Project @@ -1970,12 +1971,13 @@ func autoUpdateImages(ctx context.Context, s *state.State) error { // Therefore, we just pick the first. if newImage == nil { newImage = newInfo + newImageProject = imgProject } } if newImage != nil { if len(nodes) > 1 { - err := distributeImage(ctx, s, nodes, fingerprint, newImage) + err := distributeImage(ctx, s, nodes, fingerprint, newImage, newImageProject) if err != nil { logger.Error("Failed to distribute new image", logger.Ctx{"err": err, "fingerprint": newImage.Fingerprint}) @@ -2002,7 +2004,7 @@ func autoUpdateImages(ctx context.Context, s *state.State) error { return nil } -func distributeImage(ctx context.Context, s *state.State, nodes []string, oldFingerprint string, newImage *api.Image) error { +func distributeImage(ctx context.Context, s *state.State, nodes []string, oldFingerprint string, newImage *api.Image, projectName string) error { // Get config of all nodes (incl. own) and check for storage.images_volume. // If the setting is missing, distribute the image to the node. // If the option is set, only distribute the image once to nodes with this @@ -2148,8 +2150,8 @@ func distributeImage(ctx context.Context, s *state.State, nodes []string, oldFin } createArgs := &lxd.ImageCreateArgs{} - imageMetaPath := shared.VarPath("images", newImage.Fingerprint) - imageRootfsPath := shared.VarPath("images", newImage.Fingerprint+".rootfs") + imageMetaPath := shared.VarPath("images", fmt.Sprintf("project_%s", projectName), newImage.Fingerprint) + imageRootfsPath := shared.VarPath("images", fmt.Sprintf("project_%s", projectName), newImage.Fingerprint+".rootfs") metaFile, err := os.Open(imageMetaPath) if err != nil { @@ -2442,7 +2444,7 @@ func autoUpdateImage(ctx context.Context, s *state.State, op *operations.Operati } // Remove main image file. - fname := filepath.Join(s.OS.VarDir, "images", fingerprint) + fname := filepath.Join(s.OS.VarDir, "images", fmt.Sprintf("project_%s", projectName), fingerprint) if shared.PathExists(fname) { err = os.Remove(fname) if err != nil { @@ -2451,7 +2453,7 @@ func autoUpdateImage(ctx context.Context, s *state.State, op *operations.Operati } // Remove the rootfs file for the image. - fname = filepath.Join(s.OS.VarDir, "images", fingerprint) + ".rootfs" + fname = filepath.Join(s.OS.VarDir, "images", fmt.Sprintf("project_%s", projectName), fingerprint) + ".rootfs" if shared.PathExists(fname) { err = os.Remove(fname) if err != nil { @@ -2575,14 +2577,22 @@ func pruneLeftoverImages(s *state.State) { // Check and delete leftovers for _, entry := range entries { - fp := strings.Split(entry.Name(), ".")[0] - if !shared.ValueInSlice(fp, images) { - err = os.RemoveAll(shared.VarPath("images", entry.Name())) + if entry.IsDir() { + projectEntries, err := os.ReadDir(shared.VarPath("images", entry.Name())) if err != nil { - return fmt.Errorf("Unable to remove leftover image: %v: %w", entry.Name(), err) + return fmt.Errorf("Unable to list project images directory %s: %w", entry.Name(), err) } - logger.Debugf("Removed leftover image file: %s", entry.Name()) + for _, projectEntry := range projectEntries { + fp := strings.Split(projectEntry.Name(), ".")[0] + if !shared.ValueInSlice(fp, images) { + err = os.RemoveAll(shared.VarPath("images", entry.Name(), projectEntry.Name())) + if err != nil { + return fmt.Errorf("Unable to remove leftover image: %v: %w", projectEntry.Name(), err) + } + logger.Debugf("Removed leftover image file: %s", projectEntry.Name()) + } + } } } @@ -2756,10 +2766,12 @@ func pruneExpiredImages(ctx context.Context, s *state.State, op *operations.Oper } } - // Remove main image file. - err := imageDeleteFromDisk(fingerprint) - if err != nil { - return err + // Remove main image files across projects. + for _, dbImage := range dbImages { + err := imageDeleteFromDisk(dbImage.Project, fingerprint) + if err != nil { + return err + } } logger.Info("Deleted expired cached image files and volumes", logger.Ctx{"fingerprint": fingerprint}) @@ -2920,7 +2932,7 @@ func imageDelete(d *Daemon, r *http.Request) response.Response { } // Remove main image file from disk. - err = imageDeleteFromDisk(details.image.Fingerprint) + err = imageDeleteFromDisk(projectName, details.image.Fingerprint) if err != nil { return err } @@ -2952,9 +2964,9 @@ func imageDelete(d *Daemon, r *http.Request) response.Response { } // imageDeleteFromDisk removes the main image file and rootfs file of an image. -func imageDeleteFromDisk(fingerprint string) error { +func imageDeleteFromDisk(projectName string, fingerprint string) error { // Remove main image file. - fname := shared.VarPath("images", fingerprint) + fname := shared.VarPath("images", fmt.Sprintf("project_%s", projectName), fingerprint) if shared.PathExists(fname) { err := os.Remove(fname) if err != nil && !os.IsNotExist(err) { @@ -2963,7 +2975,7 @@ func imageDeleteFromDisk(fingerprint string) error { } // Remove the rootfs file for the image. - fname = shared.VarPath("images", fingerprint) + ".rootfs" + fname = shared.VarPath("images", fmt.Sprintf("project_%s", projectName), fingerprint) + ".rootfs" if shared.PathExists(fname) { err := os.Remove(fname) if err != nil && !os.IsNotExist(err) { @@ -4269,7 +4281,7 @@ func imageExport(d *Daemon, r *http.Request) response.Response { return response.ForwardedResponse(client, r) } - imagePath := shared.VarPath("images", imgInfo.Fingerprint) + imagePath := shared.VarPath("images", fmt.Sprintf("project_%s", projectName), imgInfo.Fingerprint) rootfsPath := imagePath + ".rootfs" _, ext, _, err := shared.DetectCompression(imagePath) @@ -4384,8 +4396,8 @@ func imageExportPost(d *Daemon, r *http.Request) response.Response { run := func(op *operations.Operation) error { createArgs := &lxd.ImageCreateArgs{} - imageMetaPath := shared.VarPath("images", details.imageFingerprintPrefix) - imageRootfsPath := shared.VarPath("images", details.imageFingerprintPrefix+".rootfs") + imageMetaPath := shared.VarPath("images", fmt.Sprintf("project_%s", projectName), details.imageFingerprintPrefix) + imageRootfsPath := shared.VarPath("images", fmt.Sprintf("project_%s", projectName), details.imageFingerprintPrefix+".rootfs") metaFile, err := os.Open(imageMetaPath) if err != nil { @@ -4625,7 +4637,7 @@ func imageRefresh(d *Daemon, r *http.Request) response.Response { if newImage != nil { if len(nodes) > 1 { - err := distributeImage(s.ShutdownCtx, s, nodes, details.imageFingerprintPrefix, newImage) + err := distributeImage(s.ShutdownCtx, s, nodes, details.imageFingerprintPrefix, newImage, projectName) if err != nil { return fmt.Errorf("Failed to distribute new image %q: %w", newImage.Fingerprint, err) } diff --git a/lxd/instance.go b/lxd/instance.go index caf089e636a0..6b242f6ae7d5 100644 --- a/lxd/instance.go +++ b/lxd/instance.go @@ -77,7 +77,7 @@ func instanceImageTransfer(s *state.State, r *http.Request, projectName string, client = client.UseProject(projectName) - err = imageImportFromNode(filepath.Join(s.OS.VarDir, "images"), client, hash) + err = imageImportFromNode(filepath.Join(s.OS.VarDir, "images", fmt.Sprintf("project_%s", projectName)), client, hash) if err != nil { return err } diff --git a/lxd/instance/drivers/driver_lxc.go b/lxd/instance/drivers/driver_lxc.go index 9f996ef8b79d..90dcca030af0 100644 --- a/lxd/instance/drivers/driver_lxc.go +++ b/lxd/instance/drivers/driver_lxc.go @@ -3820,7 +3820,7 @@ func (d *lxc) delete(force bool) error { } for _, backup := range backups { - err = backup.Delete() + err = backup.Delete(d.project.Name) if err != nil { return err } @@ -4006,12 +4006,12 @@ func (d *lxc) Rename(newName string, applyTemplateTrigger bool) error { backupName := strings.Split(oldName, "/")[1] newName := fmt.Sprintf("%s/%s", newName, backupName) - err = b.Rename(newName) + err = b.Rename(newName, d.project.Name) if err != nil { return err } - revert.Add(func() { _ = b.Rename(oldName) }) + revert.Add(func() { _ = b.Rename(oldName, d.project.Name) }) } // Invalidate the go-lxc cache. diff --git a/lxd/instance/drivers/driver_qemu.go b/lxd/instance/drivers/driver_qemu.go index 83d9f63ab013..11ca34b3ec26 100644 --- a/lxd/instance/drivers/driver_qemu.go +++ b/lxd/instance/drivers/driver_qemu.go @@ -5307,12 +5307,12 @@ func (d *qemu) Rename(newName string, applyTemplateTrigger bool) error { backupName := strings.Split(oldName, "/")[1] newName := fmt.Sprintf("%s/%s", newName, backupName) - err = b.Rename(newName) + err = b.Rename(newName, d.project.Name) if err != nil { return err } - revert.Add(func() { _ = b.Rename(oldName) }) + revert.Add(func() { _ = b.Rename(oldName, d.project.Name) }) } // Update lease files. @@ -6114,7 +6114,7 @@ func (d *qemu) delete(force bool) error { } for _, backup := range backups { - err = backup.Delete() + err = backup.Delete(d.project.Name) if err != nil { return err } diff --git a/lxd/instance_backup.go b/lxd/instance_backup.go index 3c8a6e5631a9..b0ccf0e87e9c 100644 --- a/lxd/instance_backup.go +++ b/lxd/instance_backup.go @@ -540,7 +540,7 @@ func instanceBackupPost(d *Daemon, r *http.Request) response.Response { newName := name + shared.SnapshotDelimiter + req.Name rename := func(op *operations.Operation) error { - err := backup.Rename(newName) + err := backup.Rename(newName, projectName) if err != nil { return err } @@ -629,7 +629,7 @@ func instanceBackupDelete(d *Daemon, r *http.Request) response.Response { } remove := func(op *operations.Operation) error { - err := backup.Delete() + err := backup.Delete(projectName) if err != nil { return err } @@ -714,7 +714,7 @@ func instanceBackupExportGet(d *Daemon, r *http.Request) response.Response { } ent := response.FileResponseEntry{ - Path: shared.VarPath("backups", "instances", project.Instance(projectName, backup.Name())), + Path: shared.VarPath("backups", fmt.Sprintf("project_%s", projectName), "instances", project.Instance(projectName, backup.Name())), } s.Events.SendLifecycle(projectName, lifecycle.InstanceBackupRetrieved.Event(fullName, backup.Instance(), nil)) diff --git a/lxd/lifecycle/instance_backup.go b/lxd/lifecycle/instance_backup.go index 46f7f7dd560d..1606ee81b171 100644 --- a/lxd/lifecycle/instance_backup.go +++ b/lxd/lifecycle/instance_backup.go @@ -1,6 +1,8 @@ package lifecycle import ( + "fmt" + "github.com/canonical/lxd/shared/api" "github.com/canonical/lxd/shared/version" ) @@ -20,7 +22,7 @@ const ( func (a InstanceBackupAction) Event(fullBackupName string, inst instance, ctx map[string]any) api.EventLifecycle { _, backupName, _ := api.GetParentAndSnapshotName(fullBackupName) - u := api.NewURL().Path(version.APIVersion, "instances", inst.Name(), "backups", backupName).Project(inst.Project().Name) + u := api.NewURL().Path(version.APIVersion, fmt.Sprintf("project_%s", inst.Project().Name), "instances", inst.Name(), "backups", backupName).Project(inst.Project().Name) var requestor *api.EventLifecycleRequestor if inst.Operation() != nil { diff --git a/lxd/lifecycle/instance_log.go b/lxd/lifecycle/instance_log.go index c1f5909c4212..a4a69c86063c 100644 --- a/lxd/lifecycle/instance_log.go +++ b/lxd/lifecycle/instance_log.go @@ -1,6 +1,8 @@ package lifecycle import ( + "fmt" + "github.com/canonical/lxd/shared/api" "github.com/canonical/lxd/shared/version" ) @@ -16,7 +18,7 @@ const ( // Event creates the lifecycle event for an action on an instance log. func (a InstanceLogAction) Event(file string, inst instance, requestor *api.EventLifecycleRequestor, ctx map[string]any) api.EventLifecycle { - u := api.NewURL().Path(version.APIVersion, "instances", inst.Name(), "backups", file).Project(inst.Project().Name) + u := api.NewURL().Path(version.APIVersion, fmt.Sprintf("project_%s", inst.Project().Name), "instances", inst.Name(), "backups", file).Project(inst.Project().Name) return api.EventLifecycle{ Action: string(a), diff --git a/lxd/storage/backend_lxd.go b/lxd/storage/backend_lxd.go index 06eeafd1cc59..d71ea8127b36 100644 --- a/lxd/storage/backend_lxd.go +++ b/lxd/storage/backend_lxd.go @@ -1795,7 +1795,7 @@ func (b *lxdBackend) imageFiller(fingerprint string, op *operations.Operation) f }} } - imageFile := shared.VarPath("images", fingerprint) + imageFile := shared.VarPath("images", fmt.Sprintf("project_%s", op.Project()), fingerprint) return ImageUnpack(imageFile, vol, rootBlockPath, b.state.OS, allowUnsafeResize, tracker) } } @@ -2323,7 +2323,7 @@ func (b *lxdBackend) CreateInstanceFromMigration(inst instance.Instance, conn io } // Make sure that the image is available locally too (not guaranteed in clusters). - imageExists = err == nil && shared.PathExists(shared.VarPath("images", fingerprint)) + imageExists = err == nil && shared.PathExists(shared.VarPath("images", fmt.Sprintf("project_%s", projectName), fingerprint)) } if imageExists { @@ -6195,7 +6195,7 @@ func (b *lxdBackend) DeleteCustomVolume(projectName string, volName string, op * } // Remove backups directory for volume. - backupsPath := shared.VarPath("backups", "custom", b.name, project.StorageVolume(projectName, volName)) + backupsPath := shared.VarPath("backups", fmt.Sprintf("project_%s", projectName), "custom", b.name, project.StorageVolume(projectName, volName)) if shared.PathExists(backupsPath) { err := os.RemoveAll(backupsPath) if err != nil { diff --git a/lxd/storage_volumes_backup.go b/lxd/storage_volumes_backup.go index 0e99450e290e..179f9b8d3511 100644 --- a/lxd/storage_volumes_backup.go +++ b/lxd/storage_volumes_backup.go @@ -795,7 +795,7 @@ func storagePoolVolumeTypeCustomBackupExportGet(d *Daemon, r *http.Request) resp } ent := response.FileResponseEntry{ - Path: shared.VarPath("backups", "custom", details.pool.Name(), project.StorageVolume(effectiveProjectName, fullName)), + Path: shared.VarPath("backups", fmt.Sprintf("project_%s", effectiveProjectName), "custom", details.pool.Name(), project.StorageVolume(effectiveProjectName, fullName)), } s.Events.SendLifecycle(effectiveProjectName, lifecycle.StorageVolumeBackupRetrieved.Event(details.pool.Name(), details.volumeTypeName, fullName, effectiveProjectName, request.CreateRequestor(r), nil))