Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Storage: Per project image and backup storage options #14582

Draft
wants to merge 6 commits into
base: main
Choose a base branch
from
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 4 additions & 0 deletions doc/api-extensions.md
Original file line number Diff line number Diff line change
Expand Up @@ -2535,3 +2535,7 @@ Adds a new {config:option}`device-unix-hotplug-device-conf:ownership.inherit` co
## `unix_device_hotplug_subsystem_device_option`

Adds a new {config:option}`device-unix-hotplug-device-conf:subsystem` configuration option for `unix-hotplug` devices. This adds support for detecting `unix-hotplug` devices by subsystem, and can be used in conjunction with {config:option}`device-unix-hotplug-device-conf:productid` and {config:option}`device-unix-hotplug-device-conf:vendorid`.

## `project_daemon_storage`
This introduces two new configuration keys `storage.images_volume` and `storage.backups_volume` to the project level to allow for a storage volume
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Suggested change
This introduces two new configuration keys `storage.images_volume` and `storage.backups_volume` to the project level to allow for a storage volume
This introduces two new configuration keys, `storage.images_volume` and `storage.backups_volume`, to the project level. These keys allow for a storage volume

on an existing pool be used for storing the project-wide images and backups artifacts.
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Suggested change
on an existing pool be used for storing the project-wide images and backups artifacts.
on an existing pool to be used for storing the project-wide images and backups artifacts.

8 changes: 4 additions & 4 deletions lxd/api_1.0.go
Original file line number Diff line number Diff line change
Expand Up @@ -636,14 +636,14 @@ func doAPI10Update(d *Daemon, r *http.Request, req api.ServerPut, patch bool) re

// Validate the storage volumes
if nodeValues["storage.backups_volume"] != nil && nodeValues["storage.backups_volume"] != newNodeConfig.StorageBackupsVolume() {
err := daemonStorageValidate(s, nodeValues["storage.backups_volume"].(string))
err := daemonStorageValidate(s, api.ProjectDefaultName, nodeValues["storage.backups_volume"].(string))
if err != nil {
return fmt.Errorf("Failed validation of %q: %w", "storage.backups_volume", err)
}
}

if nodeValues["storage.images_volume"] != nil && nodeValues["storage.images_volume"] != newNodeConfig.StorageImagesVolume() {
err := daemonStorageValidate(s, nodeValues["storage.images_volume"].(string))
err := daemonStorageValidate(s, api.ProjectDefaultName, nodeValues["storage.images_volume"].(string))
if err != nil {
return fmt.Errorf("Failed validation of %q: %w", "storage.images_volume", err)
}
Expand Down Expand Up @@ -939,15 +939,15 @@ func doAPI10UpdateTriggers(d *Daemon, nodeChanged, clusterChanged map[string]str

value, ok = nodeChanged["storage.backups_volume"]
if ok {
err := daemonStorageMove(s, "backups", value)
err := daemonStorageMove(s, "backups", api.ProjectDefaultName, value, true)
if err != nil {
return err
}
}

value, ok = nodeChanged["storage.images_volume"]
if ok {
err := daemonStorageMove(s, "images", value)
err := daemonStorageMove(s, "images", api.ProjectDefaultName, value, true)
if err != nil {
return err
}
Expand Down
74 changes: 74 additions & 0 deletions lxd/api_project.go
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,9 @@
"net"
"net/http"
"net/url"
"os"
"path/filepath"
"slices"
"strings"

"github.com/gorilla/mux"
Expand Down Expand Up @@ -331,6 +334,39 @@
return response.SmartError(fmt.Errorf("Failed creating project %q: %w", project.Name, err))
}

// Create images and backups directories, and symlink to the daemon path by default.
imageDaemonPath := shared.VarPath("images", "daemon", fmt.Sprintf("project_%s", project.Name))
imageSymPath := shared.VarPath("images", fmt.Sprintf("project_%s", project.Name))
err = os.MkdirAll(imageDaemonPath, 0700)

Check failure

Code scanning / CodeQL

Uncontrolled data used in path expression High

This path depends on a
user-provided value
.
if err != nil {
return response.InternalError(fmt.Errorf("Failed to create directory %q: %w", imageDaemonPath, err))
}
err = os.Symlink(imageDaemonPath, imageSymPath)

Check failure

Code scanning / CodeQL

Uncontrolled data used in path expression High

This path depends on a
user-provided value
.

Check failure

Code scanning / CodeQL

Uncontrolled data used in path expression High

This path depends on a
user-provided value
.
if err != nil {
return response.InternalError(fmt.Errorf("Failed to create directory %q: %w", imageSymPath, err))
}

backupsDaemonPath := shared.VarPath("backups", "daemon", fmt.Sprintf("project_%s", project.Name))
backupsSymPath := shared.VarPath("backups", fmt.Sprintf("project_%s", project.Name))
backupsCustomPath := filepath.Join(backupsSymPath, "custom")
backupsInstancesPath := filepath.Join(backupsSymPath, "instances")
err = os.MkdirAll(backupsDaemonPath, 0700)

Check failure

Code scanning / CodeQL

Uncontrolled data used in path expression High

This path depends on a
user-provided value
.
if err != nil {
return response.InternalError(fmt.Errorf("Failed to create directory %q: %w", backupsDaemonPath, err))
}
err = os.Symlink(backupsDaemonPath, backupsSymPath)

Check failure

Code scanning / CodeQL

Uncontrolled data used in path expression High

This path depends on a
user-provided value
.

Check failure

Code scanning / CodeQL

Uncontrolled data used in path expression High

This path depends on a
user-provided value
.
if err != nil {
return response.InternalError(fmt.Errorf("Failed to create directory %q: %w", backupsSymPath, err))
}
err = os.MkdirAll(backupsCustomPath, 0700)

Check failure

Code scanning / CodeQL

Uncontrolled data used in path expression High

This path depends on a
user-provided value
.
if err != nil {
return response.InternalError(fmt.Errorf("Failed to create directory %q: %w", backupsCustomPath, err))
}
err = os.MkdirAll(backupsInstancesPath, 0700)

Check failure

Code scanning / CodeQL

Uncontrolled data used in path expression High

This path depends on a
user-provided value
.
if err != nil {
return response.InternalError(fmt.Errorf("Failed to create directory %q: %w", backupsInstancesPath, err))
}

requestor := request.CreateRequestor(r)
lc := lifecycle.ProjectCreated.Event(project.Name, requestor, nil)
s.Events.SendLifecycle(project.Name, lc)
Expand Down Expand Up @@ -687,6 +723,30 @@
return response.BadRequest(err)
}

// Validate the storage volumes.
if req.Config["storage.backups_volume"] != "" && req.Config["storage.backups_volume"] != project.Config["storage.backups_volume"] {
err := daemonStorageValidate(s, project.Name, req.Config["storage.backups_volume"])
if err != nil {
return response.SmartError(fmt.Errorf("Failed validation of %q: %w", "storage.backups_volume", err))
}
}

if req.Config["storage.images_volume"] != "" && req.Config["storage.images_volume"] != project.Config["storage.images_volume"] {
err := daemonStorageValidate(s, project.Name, req.Config["storage.images_volume"])
if err != nil {
return response.SmartError(fmt.Errorf("Failed validation of %q: %w", "storage.images_volume", err))
}
}

// Move storage if necessary.
if slices.Contains(configChanged, "storage.backups_volume") {
daemonStorageMove(s, "backups", project.Name, req.Config["storage.backups_volume"], false)

Check failure on line 743 in lxd/api_project.go

View workflow job for this annotation

GitHub Actions / Code

Error return value is not checked (errcheck)
}

if slices.Contains(configChanged, "storage.images_volume") {
daemonStorageMove(s, "images", project.Name, req.Config["storage.images_volume"], false)

Check failure on line 747 in lxd/api_project.go

View workflow job for this annotation

GitHub Actions / Code

Error return value is not checked (errcheck)
}

// Update the database entry.
err = s.DB.Cluster.Transaction(context.TODO(), func(ctx context.Context, tx *db.ClusterTx) error {
err := limits.AllowProjectUpdate(s.GlobalConfig, tx, project.Name, req.Config, configChanged)
Expand Down Expand Up @@ -1381,6 +1441,20 @@
// defaultdesc: `block`
// shortdesc: Whether to prevent creating instance or volume snapshots
"restricted.snapshots": isEitherAllowOrBlock,
// lxdmeta:generate(entities=server; group=miscellaneous; key=storage.backups_volume)
// Specify the volume using the syntax `POOL/VOLUME`.
// ---
// type: string
// scope: local
// shortdesc: Volume to use to store backup tarballs
"storage.backups_volume": validate.Optional(),
// lxdmeta:generate(entities=server; group=miscellaneous; key=storage.images_volume)
// Specify the volume using the syntax `POOL/VOLUME`.
// ---
// type: string
// scope: local
// shortdesc: Volume to use to store the image tarballs
"storage.images_volume": validate.Optional(),
}

// Add the storage pool keys.
Expand Down
10 changes: 5 additions & 5 deletions lxd/backup.go
Original file line number Diff line number Diff line change
Expand Up @@ -101,7 +101,7 @@ func backupCreate(s *state.State, args db.InstanceBackup, sourceInst instance.In
}

// Create the target path if needed.
backupsPath := shared.VarPath("backups", "instances", project.Instance(sourceInst.Project().Name, sourceInst.Name()))
backupsPath := shared.VarPath("backups", fmt.Sprintf("project_%s", op.Project()), "instances", project.Instance(sourceInst.Project().Name, sourceInst.Name()))
if !shared.PathExists(backupsPath) {
err := os.MkdirAll(backupsPath, 0700)
if err != nil {
Expand All @@ -111,7 +111,7 @@ func backupCreate(s *state.State, args db.InstanceBackup, sourceInst instance.In
revert.Add(func() { _ = os.Remove(backupsPath) })
}

target := shared.VarPath("backups", "instances", project.Instance(sourceInst.Project().Name, b.Name()))
target := shared.VarPath("backups", fmt.Sprintf("project_%s", op.Project()), "instances", project.Instance(sourceInst.Project().Name, b.Name()))

// Setup the tarball writer.
l.Debug("Opening backup tarball for writing", logger.Ctx{"path": target})
Expand Down Expand Up @@ -373,7 +373,7 @@ func pruneExpiredInstanceBackups(ctx context.Context, s *state.State) error {
}

instBackup := backup.NewInstanceBackup(s, inst, b.ID, b.Name, b.CreationDate, b.ExpiryDate, b.InstanceOnly, b.OptimizedStorage)
err = instBackup.Delete()
err = instBackup.Delete(inst.Project().Name)
if err != nil {
return fmt.Errorf("Error deleting instance backup %q: %w", b.Name, err)
}
Expand Down Expand Up @@ -437,7 +437,7 @@ func volumeBackupCreate(s *state.State, args db.StoragePoolVolumeBackup, project
}

// Create the target path if needed.
backupsPath := shared.VarPath("backups", "custom", pool.Name(), project.StorageVolume(projectName, volumeName))
backupsPath := shared.VarPath("backups", fmt.Sprintf("project_%s", projectName), "custom", pool.Name(), project.StorageVolume(projectName, volumeName))
if !shared.PathExists(backupsPath) {
err := os.MkdirAll(backupsPath, 0700)
if err != nil {
Expand All @@ -447,7 +447,7 @@ func volumeBackupCreate(s *state.State, args db.StoragePoolVolumeBackup, project
revert.Add(func() { _ = os.Remove(backupsPath) })
}

target := shared.VarPath("backups", "custom", pool.Name(), project.StorageVolume(projectName, backupRow.Name))
target := shared.VarPath("backups", fmt.Sprintf("project_%s", projectName), "custom", pool.Name(), project.StorageVolume(projectName, backupRow.Name))

// Setup the tarball writer.
l.Debug("Opening backup tarball for writing", logger.Ctx{"path": target})
Expand Down
17 changes: 9 additions & 8 deletions lxd/backup/backup_instance.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@ package backup

import (
"context"
"fmt"
"os"
"strings"
"time"
Expand Down Expand Up @@ -58,17 +59,17 @@ func (b *InstanceBackup) Instance() Instance {
}

// Rename renames an instance backup.
func (b *InstanceBackup) Rename(newName string) error {
oldBackupPath := shared.VarPath("backups", "instances", project.Instance(b.instance.Project().Name, b.name))
newBackupPath := shared.VarPath("backups", "instances", project.Instance(b.instance.Project().Name, newName))
func (b *InstanceBackup) Rename(newName string, projectName string) error {
oldBackupPath := shared.VarPath("backups", fmt.Sprintf("project_%s", projectName), "instances", project.Instance(b.instance.Project().Name, b.name))
newBackupPath := shared.VarPath("backups", fmt.Sprintf("project_%s", projectName), "instances", project.Instance(b.instance.Project().Name, newName))

// Extract the old and new parent backup paths from the old and new backup names rather than use
// instance.Name() as this may be in flux if the instance itself is being renamed, whereas the relevant
// instance name is encoded into the backup names.
oldParentName, _, _ := api.GetParentAndSnapshotName(b.name)
oldParentBackupsPath := shared.VarPath("backups", "instances", project.Instance(b.instance.Project().Name, oldParentName))
oldParentBackupsPath := shared.VarPath("backups", fmt.Sprintf("project_%s", projectName), "instances", project.Instance(b.instance.Project().Name, oldParentName))
newParentName, _, _ := api.GetParentAndSnapshotName(newName)
newParentBackupsPath := shared.VarPath("backups", "instances", project.Instance(b.instance.Project().Name, newParentName))
newParentBackupsPath := shared.VarPath("backups", fmt.Sprintf("project_%s", projectName), "instances", project.Instance(b.instance.Project().Name, newParentName))

// Create the new backup path if doesn't exist.
if !shared.PathExists(newParentBackupsPath) {
Expand Down Expand Up @@ -108,8 +109,8 @@ func (b *InstanceBackup) Rename(newName string) error {
}

// Delete removes an instance backup.
func (b *InstanceBackup) Delete() error {
backupPath := shared.VarPath("backups", "instances", project.Instance(b.instance.Project().Name, b.name))
func (b *InstanceBackup) Delete(projectName string) error {
backupPath := shared.VarPath("backups", fmt.Sprintf("project_%s", projectName), "instances", project.Instance(b.instance.Project().Name, b.name))

// Delete the on-disk data.
if shared.PathExists(backupPath) {
Expand All @@ -120,7 +121,7 @@ func (b *InstanceBackup) Delete() error {
}

// Check if we can remove the instance directory.
backupsPath := shared.VarPath("backups", "instances", project.Instance(b.instance.Project().Name, b.instance.Name()))
backupsPath := shared.VarPath("backups", fmt.Sprintf("project_%s", projectName), "instances", project.Instance(b.instance.Project().Name, b.instance.Name()))
empty, _ := shared.PathIsEmpty(backupsPath)
if empty {
err := os.Remove(backupsPath)
Expand Down
13 changes: 7 additions & 6 deletions lxd/backup/backup_volume.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@ package backup

import (
"context"
"fmt"
"os"
"strings"
"time"
Expand Down Expand Up @@ -54,16 +55,16 @@ func (b *VolumeBackup) OptimizedStorage() bool {

// Rename renames a volume backup.
func (b *VolumeBackup) Rename(newName string) error {
oldBackupPath := shared.VarPath("backups", "custom", b.poolName, project.StorageVolume(b.projectName, b.name))
newBackupPath := shared.VarPath("backups", "custom", b.poolName, project.StorageVolume(b.projectName, newName))
oldBackupPath := shared.VarPath("backups", fmt.Sprintf("project_%s", b.projectName), "custom", b.poolName, project.StorageVolume(b.projectName, b.name))
newBackupPath := shared.VarPath("backups", fmt.Sprintf("project_%s", b.projectName), "custom", b.poolName, project.StorageVolume(b.projectName, newName))

// Extract the old and new parent backup paths from the old and new backup names rather than use
// instance.Name() as this may be in flux if the instance itself is being renamed, whereas the relevant
// instance name is encoded into the backup names.
oldParentName, _, _ := api.GetParentAndSnapshotName(b.name)
oldParentBackupsPath := shared.VarPath("backups", "custom", b.poolName, project.StorageVolume(b.projectName, oldParentName))
oldParentBackupsPath := shared.VarPath("backups", fmt.Sprintf("project_%s", b.projectName), "custom", b.poolName, project.StorageVolume(b.projectName, oldParentName))
newParentName, _, _ := api.GetParentAndSnapshotName(newName)
newParentBackupsPath := shared.VarPath("backups", "custom", b.poolName, project.StorageVolume(b.projectName, newParentName))
newParentBackupsPath := shared.VarPath("backups", fmt.Sprintf("project_%s", b.projectName), "custom", b.poolName, project.StorageVolume(b.projectName, newParentName))

revert := revert.New()
defer revert.Fail()
Expand Down Expand Up @@ -107,7 +108,7 @@ func (b *VolumeBackup) Rename(newName string) error {

// Delete removes a volume backup.
func (b *VolumeBackup) Delete() error {
backupPath := shared.VarPath("backups", "custom", b.poolName, project.StorageVolume(b.projectName, b.name))
backupPath := shared.VarPath("backups", fmt.Sprintf("project_%s", b.projectName), "custom", b.poolName, project.StorageVolume(b.projectName, b.name))
// Delete the on-disk data.
if shared.PathExists(backupPath) {
err := os.RemoveAll(backupPath)
Expand All @@ -117,7 +118,7 @@ func (b *VolumeBackup) Delete() error {
}

// Check if we can remove the volume directory.
backupsPath := shared.VarPath("backups", "custom", b.poolName, project.StorageVolume(b.projectName, b.volumeName))
backupsPath := shared.VarPath("backups", fmt.Sprintf("project_%s", b.projectName), "custom", b.poolName, project.StorageVolume(b.projectName, b.volumeName))
empty, _ := shared.PathIsEmpty(backupsPath)
if empty {
err := os.Remove(backupsPath)
Expand Down
4 changes: 2 additions & 2 deletions lxd/daemon.go
Original file line number Diff line number Diff line change
Expand Up @@ -1506,7 +1506,7 @@ func (d *Daemon) init() error {

// Mount any daemon storage volumes.
logger.Infof("Initializing daemon storage mounts")
err = daemonStorageMount(d.State())
err = daemonStorageMount(d.State(), api.ProjectDefaultName)
if err != nil {
return err
}
Expand Down Expand Up @@ -1974,7 +1974,7 @@ func (d *Daemon) Stop(ctx context.Context, sig os.Signal) error {
logger.Info("Stopping daemon storage volumes")
done := make(chan struct{})
go func() {
err := daemonStorageVolumesUnmount(s)
err := daemonStorageVolumesUnmount(s, api.ProjectDefaultName)
if err != nil {
logger.Error("Failed to unmount image and backup volumes", logger.Ctx{"err": err})
}
Expand Down
4 changes: 2 additions & 2 deletions lxd/daemon_images.go
Original file line number Diff line number Diff line change
Expand Up @@ -321,7 +321,7 @@ func ImageDownload(r *http.Request, s *state.State, op *operations.Operation, ar
logger.Info("Downloading image", ctxMap)

// Cleanup any leftover from a past attempt
destDir := shared.VarPath("images")
destDir := shared.VarPath("images", fmt.Sprintf("project_%s", args.ProjectName))
destName := filepath.Join(destDir, fp)

failure := true
Expand Down Expand Up @@ -408,7 +408,7 @@ func ImageDownload(r *http.Request, s *state.State, op *operations.Operation, ar
ProgressHandler: progress,
Canceler: canceler,
DeltaSourceRetriever: func(fingerprint string, file string) string {
path := shared.VarPath("images", fmt.Sprintf("%s.%s", fingerprint, file))
path := shared.VarPath("images", fmt.Sprintf("project_%s", args.ProjectName), fmt.Sprintf("%s.%s", fingerprint, file))
if shared.PathExists(path) {
return path
}
Expand Down
Loading
Loading