Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Per-pool project limits (from Incus) #14078

Merged
merged 11 commits into from
Sep 12, 2024
4 changes: 4 additions & 0 deletions doc/api-extensions.md
Original file line number Diff line number Diff line change
Expand Up @@ -2464,3 +2464,7 @@ Adds the following internal metrics:

* Total completed requests
* Number of ongoing requests

## `projects_limits_disk_pool`

This introduces per-pool project disk limits, introducing a `limits.disk.pool.NAME` configuration option to the project limits.
MggMuggins marked this conversation as resolved.
Show resolved Hide resolved
10 changes: 10 additions & 0 deletions doc/metadata.txt
Original file line number Diff line number Diff line change
@@ -1,5 +1,15 @@
// Code generated by lxd-metadata; DO NOT EDIT.

<!-- config group -limits, start -->
```{config:option} limits.disk.pool.POOL_NAME -limits,
:shortdesc: "Maximum disk space used by the project on this pool"
:type: "string"
This value is the maximum value of the aggregate disk
space used by all instance volumes, custom volumes, and images of the
project on this specific storage pool.
```

<!-- config group -limits, end -->
<!-- config group cluster-cluster start -->
```{config:option} scheduler.instance cluster-cluster
:defaultdesc: "`all`"
Expand Down
15 changes: 12 additions & 3 deletions lxc/project.go
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@ import (
"fmt"
"io"
"os"
"slices"
"sort"
"strings"

Expand Down Expand Up @@ -959,23 +960,31 @@ func (c *cmdProjectInfo) run(cmd *cobra.Command, args []string) error {
byteLimits := []string{"disk", "memory"}
data := [][]string{}
for k, v := range projectState.Resources {
shortKey := strings.SplitN(k, ".", 2)[0]

limit := i18n.G("UNLIMITED")
if v.Limit >= 0 {
if shared.ValueInSlice(k, byteLimits) {
if slices.Contains(byteLimits, shortKey) {
limit = units.GetByteSizeStringIEC(v.Limit, 2)
} else {
limit = fmt.Sprintf("%d", v.Limit)
}
}

usage := ""
if shared.ValueInSlice(k, byteLimits) {
if slices.Contains(byteLimits, shortKey) {
usage = units.GetByteSizeStringIEC(v.Usage, 2)
} else {
usage = fmt.Sprintf("%d", v.Usage)
}

data = append(data, []string{strings.ToUpper(k), limit, usage})
columnName := strings.ToUpper(k)
fields := strings.SplitN(columnName, ".", 2)
if len(fields) == 2 {
columnName = fmt.Sprintf("%s (%s)", fields[0], fields[1])
}

data = append(data, []string{columnName, limit, usage})
}

sort.Sort(cli.SortColumnsNaturally(data))
Expand Down
28 changes: 28 additions & 0 deletions lxd/api_project.go
Original file line number Diff line number Diff line change
Expand Up @@ -1380,6 +1380,34 @@ func projectValidateConfig(s *state.State, config map[string]string) error {
"restricted.snapshots": isEitherAllowOrBlock,
}

// Add the storage pool keys.
err := s.DB.Cluster.Transaction(context.TODO(), func(ctx context.Context, tx *db.ClusterTx) error {
var err error

// Load all the pools.
pools, err := tx.GetStoragePoolNames(ctx)
if err != nil {
return err
}

// Add the storage-pool specific config keys.
for _, poolName := range pools {
// lxdmeta:generate(entity=project, group=limits, key=limits.disk.pool.POOL_NAME)
MggMuggins marked this conversation as resolved.
Show resolved Hide resolved
// This value is the maximum value of the aggregate disk
// space used by all instance volumes, custom volumes, and images of the
// project on this specific storage pool.
// ---
// type: string
// shortdesc: Maximum disk space used by the project on this pool
projectConfigKeys[fmt.Sprintf("limits.disk.pool.%s", poolName)] = validate.Optional(validate.IsSize)
}

return nil
})
if err != nil {
return fmt.Errorf("Failed loading storage pool names: %w", err)
}

for k, v := range config {
key := k

Expand Down
13 changes: 13 additions & 0 deletions lxd/metadata/configuration.json
Original file line number Diff line number Diff line change
@@ -1,5 +1,18 @@
{
"configs": {
"": {
"limits,": {
"keys": [
{
"limits.disk.pool.POOL_NAME": {
"longdesc": "This value is the maximum value of the aggregate disk\nspace used by all instance volumes, custom volumes, and images of the\nproject on this specific storage pool.",
"shortdesc": "Maximum disk space used by the project on this pool",
"type": "string"
}
}
]
}
},
"cluster": {
"cluster": {
"keys": [
Expand Down
111 changes: 100 additions & 11 deletions lxd/project/limits/permissions.go
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@ import (
"context"
"fmt"
"net/http"
"slices"
"strconv"
"strings"

Expand All @@ -23,6 +24,36 @@ import (
"github.com/canonical/lxd/shared/validate"
)

// projectLimitDiskPool is the prefix used for pool-specific disk limits.
var projectLimitDiskPool = "limits.disk.pool."

// HiddenStoragePools returns a list of storage pools that should be hidden from users of the project.
func HiddenStoragePools(ctx context.Context, tx *db.ClusterTx, projectName string) ([]string, error) {
dbProject, err := cluster.GetProject(ctx, tx.Tx(), projectName)
if err != nil {
return nil, fmt.Errorf("Failed getting project: %w", err)
}

project, err := dbProject.ToAPI(ctx, tx.Tx())
if err != nil {
return nil, err
}

hiddenPools := []string{}
for k, v := range project.Config {
if !strings.HasPrefix(k, projectLimitDiskPool) || v != "0" {
continue
}

fields := strings.SplitN(k, projectLimitDiskPool, 2)
if len(fields) == 2 {
hiddenPools = append(hiddenPools, fields[1])
}
}

return hiddenPools, nil
}

// AllowInstanceCreation returns an error if any project-specific limit or
// restriction is violated when creating a new instance.
func AllowInstanceCreation(globalConfig *clusterConfig.Config, tx *db.ClusterTx, projectName string, req api.InstancesPost) error {
Expand Down Expand Up @@ -234,7 +265,7 @@ func checkRestrictionsOnVolatileConfig(project api.Project, instanceType instanc

// AllowVolumeCreation returns an error if any project-specific limit or
// restriction is violated when creating a new custom volume in a project.
func AllowVolumeCreation(globalConfig *clusterConfig.Config, tx *db.ClusterTx, projectName string, req api.StorageVolumesPost) error {
func AllowVolumeCreation(globalConfig *clusterConfig.Config, tx *db.ClusterTx, projectName string, poolName string, req api.StorageVolumesPost) error {
var globalConfigDump map[string]any
if globalConfig != nil {
globalConfigDump = globalConfig.Dump()
Expand All @@ -256,8 +287,9 @@ func AllowVolumeCreation(globalConfig *clusterConfig.Config, tx *db.ClusterTx, p

// Add the volume being created.
info.Volumes = append(info.Volumes, db.StorageVolumeArgs{
Name: req.Name,
Config: req.Config,
Name: req.Name,
Config: req.Config,
PoolName: poolName,
})

err = checkRestrictionsAndAggregateLimits(globalConfig, tx, info)
Expand Down Expand Up @@ -329,8 +361,9 @@ func checkRestrictionsAndAggregateLimits(globalConfig *clusterConfig.Config, tx
// across all project instances.
aggregateKeys := []string{}
isRestricted := false

for key, value := range info.Project.Config {
if shared.ValueInSlice(key, allAggregateLimits) {
if slices.Contains(allAggregateLimits, key) || strings.HasPrefix(key, projectLimitDiskPool) {
aggregateKeys = append(aggregateKeys, key)
continue
}
Expand Down Expand Up @@ -388,7 +421,14 @@ func getAggregateLimits(info *projectInfo, aggregateKeys []string) (map[string]a
max := int64(-1)
limit := info.Project.Config[key]
if limit != "" {
parser := aggregateLimitConfigValueParsers[key]
keyName := key

// Handle pool-specific limits.
if strings.HasPrefix(key, projectLimitDiskPool) {
keyName = "limits.disk"
}

parser := aggregateLimitConfigValueParsers[keyName]
max, err = parser(info.Project.Config[key])
if err != nil {
return nil, err
Expand Down Expand Up @@ -417,7 +457,14 @@ func checkAggregateLimits(info *projectInfo, aggregateKeys []string) error {
}

for _, key := range aggregateKeys {
parser := aggregateLimitConfigValueParsers[key]
keyName := key

// Handle pool-specific limits.
if strings.HasPrefix(key, projectLimitDiskPool) {
keyName = "limits.disk"
}

parser := aggregateLimitConfigValueParsers[keyName]
max, err := parser(info.Project.Config[key])
if err != nil {
return err
Expand All @@ -427,6 +474,7 @@ func checkAggregateLimits(info *projectInfo, aggregateKeys []string) error {
return fmt.Errorf("Reached maximum aggregate value %q for %q in project %q", info.Project.Config[key], key, info.Project.Name)
}
}

return nil
}

Expand Down Expand Up @@ -1125,15 +1173,29 @@ func validateAggregateLimit(totals map[string]int64, key, value string) error {
return nil
}

parser := aggregateLimitConfigValueParsers[key]
keyName := key

// Handle pool-specific limits.
if strings.HasPrefix(key, projectLimitDiskPool) {
keyName = "limits.disk"
}

parser := aggregateLimitConfigValueParsers[keyName]
limit, err := parser(value)
if err != nil {
return fmt.Errorf("Invalid value %q for limit %q: %w", value, key, err)
}

total := totals[key]
if limit < total {
printer := aggregateLimitConfigValuePrinters[key]
keyName := key

// Handle pool-specific limits.
if strings.HasPrefix(key, projectLimitDiskPool) {
keyName = "limits.disk"
}

printer := aggregateLimitConfigValuePrinters[keyName]
return fmt.Errorf("%q is too low: current total is %q", key, printer(total))
}

Expand Down Expand Up @@ -1287,8 +1349,18 @@ func getTotalsAcrossProjectEntities(info *projectInfo, keys []string, skipUnset

for _, key := range keys {
totals[key] = 0
if key == "limits.disk" {
if key == "limits.disk" || strings.HasPrefix(key, projectLimitDiskPool) {
poolName := ""
fields := strings.SplitN(key, projectLimitDiskPool, 2)
if len(fields) == 2 {
poolName = fields[1]
}

for _, volume := range info.Volumes {
if poolName != "" && volume.PoolName != poolName {
continue
}

value, ok := volume.Config["size"]
if !ok {
if skipUnset {
Expand Down Expand Up @@ -1329,14 +1401,31 @@ func getInstanceLimits(instance api.Instance, keys []string, skipUnset bool, sto

for _, key := range keys {
var limit int64
parser := aggregateLimitConfigValueParsers[key]
keyName := key

// Handle pool-specific limits.
if strings.HasPrefix(key, projectLimitDiskPool) {
keyName = "limits.disk"
}

parser := aggregateLimitConfigValueParsers[keyName]

if key == "limits.disk" || strings.HasPrefix(key, projectLimitDiskPool) {
poolName := ""
fields := strings.SplitN(key, projectLimitDiskPool, 2)
if len(fields) == 2 {
poolName = fields[1]
}

if key == "limits.disk" {
_, device, err := instancetype.GetRootDiskDevice(instance.Devices)
if err != nil {
return nil, fmt.Errorf("Failed getting root disk device for instance %q in project %q: %w", instance.Name, instance.Project, err)
}

if poolName != "" && device["pool"] != poolName {
continue
}

value, ok := device["size"]
if !ok || value == "" {
if skipUnset {
Expand Down
18 changes: 18 additions & 0 deletions lxd/project/limits/state.go
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@ import (
"context"
"fmt"
"strconv"
"strings"

"github.com/canonical/lxd/lxd/db"
"github.com/canonical/lxd/lxd/instance/instancetype"
Expand All @@ -29,6 +30,16 @@ func GetCurrentAllocations(globalConfig map[string]any, ctx context.Context, tx
return nil, err
}

// Get per-pool limits.
poolLimits := []string{}
for k := range info.Project.Config {
if strings.HasPrefix(k, projectLimitDiskPool) {
poolLimits = append(poolLimits, k)
}
}

allAggregateLimits := append(allAggregateLimits, poolLimits...)

// Get the instance aggregated values.
raw, err := getAggregateLimits(info, allAggregateLimits)
if err != nil {
Expand All @@ -41,6 +52,13 @@ func GetCurrentAllocations(globalConfig map[string]any, ctx context.Context, tx
result["networks"] = raw["limits.networks"]
result["processes"] = raw["limits.processes"]

// Add the pool-specific disk limits.
for k, v := range raw {
if strings.HasPrefix(k, projectLimitDiskPool) && v.Limit > 0 {
result[fmt.Sprintf("disk.%s", strings.SplitN(k, ".", 4)[3])] = v
}
}

// Get the instance count values.
count, limit, err := getTotalInstanceCountLimit(info)
if err != nil {
Expand Down
4 changes: 2 additions & 2 deletions lxd/storage/backend_lxd.go
Original file line number Diff line number Diff line change
Expand Up @@ -7572,7 +7572,7 @@ func (b *lxdBackend) CreateCustomVolumeFromISO(projectName string, volName strin
}

err := b.state.DB.Cluster.Transaction(b.state.ShutdownCtx, func(ctx context.Context, tx *db.ClusterTx) error {
return limits.AllowVolumeCreation(b.state.GlobalConfig, tx, projectName, req)
return limits.AllowVolumeCreation(b.state.GlobalConfig, tx, projectName, b.name, req)
})
if err != nil {
return fmt.Errorf("Failed checking volume creation allowed: %w", err)
Expand Down Expand Up @@ -7677,7 +7677,7 @@ func (b *lxdBackend) CreateCustomVolumeFromBackup(srcBackup backup.Info, srcData
}

err = b.state.DB.Cluster.Transaction(b.state.ShutdownCtx, func(ctx context.Context, tx *db.ClusterTx) error {
return limits.AllowVolumeCreation(b.state.GlobalConfig, tx, srcBackup.Project, req)
return limits.AllowVolumeCreation(b.state.GlobalConfig, tx, srcBackup.Project, b.name, req)
})
if err != nil {
return fmt.Errorf("Failed checking volume creation allowed: %w", err)
Expand Down
Loading
Loading