diff --git a/doc/.wordlist.txt b/doc/.wordlist.txt index 087487e610a7..681a6a79cbb6 100644 --- a/doc/.wordlist.txt +++ b/doc/.wordlist.txt @@ -26,7 +26,7 @@ BPF Btrfs bugfix bugfixes -Centos +CentOS Ceph CephFS Ceph's @@ -303,6 +303,7 @@ VDPA VFS VFs VirtIO +virtiofs virtualize virtualized VLAN diff --git a/doc/api-extensions.md b/doc/api-extensions.md index a783440831fe..7642f78c9f09 100644 --- a/doc/api-extensions.md +++ b/doc/api-extensions.md @@ -2348,3 +2348,8 @@ This API extension provides the ability to use flags `--device` when importing a ## `instances_uefi_vars` This API extension indicates that the `/1.0/instances/{name}/uefi-vars` endpoint is supported on the server. This endpoint allows to get the full list of UEFI variables (HTTP method GET) or replace the entire set of UEFI variables (HTTP method PUT). + +## `instances_migration_stateful` + +This API extension allows newly created VMs to have their `migration.stateful` configuration key automatically set +through the new server-level configuration key `instances.migration.stateful`. If `migration.stateful` is already set at the profile or instance level then `instances.migration.stateful` is not applied. diff --git a/doc/config_options.txt b/doc/config_options.txt index 364d48111c9c..ff1090c3a991 100644 --- a/doc/config_options.txt +++ b/doc/config_options.txt @@ -150,7 +150,7 @@ Using incremental memory transfer of the instance's memory can reduce downtime. ```{config:option} migration.stateful instance-migration :condition: "virtual machine" -:defaultdesc: "`false`" +:defaultdesc: "`false` or value from profiles or `instances.migration.stateful` (if set)" :liveupdate: "no" :shortdesc: "Whether to allow for stateful stop/start and snapshots" :type: "bool" @@ -1657,6 +1657,13 @@ The events can be any combination of `lifecycle`, `logging`, and `ovn`. Possible values are `bzip2`, `gzip`, `lzma`, `xz`, or `none`. ``` +```{config:option} instances.migration.stateful server-miscellaneous +:scope: "global" +:shortdesc: "Whether to set `migration.stateful` to `true` for the instances" +:type: "bool" +You can override this setting for relevant instances, either in the instance-specific configuration or through a profile. +``` + ```{config:option} instances.nic.host_name server-miscellaneous :defaultdesc: "`random`" :scope: "global" diff --git a/doc/howto/move_instances.md b/doc/howto/move_instances.md index 6ba502b69e6e..1693ca54f557 100644 --- a/doc/howto/move_instances.md +++ b/doc/howto/move_instances.md @@ -59,6 +59,14 @@ To do so, ensure the following configuration: * Set {config:option}`instance-migration:migration.stateful` to `true` on the instance. * Set [`size.state`](devices-disk) of the virtual machine's root disk device to at least the size of the virtual machine's {config:option}`instance-resource-limits:limits.memory` setting. +```{note} +If you are using a shared storage pool like Ceph RBD to back your instance, you don't need to set [`size.state`](devices-disk) to perform live migration. +``` + +```{note} +When {config:option}`instance-migration:migration.stateful` is enabled in LXD, virtiofs shares are disabled, and files are only shared via the 9P protocol. Consequently, guest OSes lacking 9P support, such as CentOS 8, cannot share files with the host unless stateful migration is disabled. Additionally, the `lxd-agent` will not function for these guests under these conditions. +``` + (live-migration-containers)= ### Live migration for containers diff --git a/lxd/api_project.go b/lxd/api_project.go index 768d1aef4db2..b42234106d71 100644 --- a/lxd/api_project.go +++ b/lxd/api_project.go @@ -703,7 +703,7 @@ func projectChange(s *state.State, project *api.Project, req api.ProjectPut) res // Update the database entry. err = s.DB.Cluster.Transaction(context.TODO(), func(ctx context.Context, tx *db.ClusterTx) error { - err := projecthelpers.AllowProjectUpdate(tx, project.Name, req.Config, configChanged) + err := projecthelpers.AllowProjectUpdate(s.GlobalConfig, tx, project.Name, req.Config, configChanged) if err != nil { return err } @@ -972,7 +972,7 @@ func projectStateGet(d *Daemon, r *http.Request) response.Response { // Get current limits and usage. err = s.DB.Cluster.Transaction(context.TODO(), func(ctx context.Context, tx *db.ClusterTx) error { - result, err := projecthelpers.GetCurrentAllocations(ctx, tx, name) + result, err := projecthelpers.GetCurrentAllocations(s.GlobalConfig.Dump(), ctx, tx, name) if err != nil { return err } diff --git a/lxd/cluster/config/config.go b/lxd/cluster/config/config.go index 38471c50e1f5..e1d2f9821ea6 100644 --- a/lxd/cluster/config/config.go +++ b/lxd/cluster/config/config.go @@ -198,6 +198,11 @@ func (c *Config) InstancesPlacementScriptlet() string { return c.m.GetString("instances.placement.scriptlet") } +// InstancesMigrationStateful returns the whether or not to auto enable migration.stateful for all VM instances. +func (c *Config) InstancesMigrationStateful() bool { + return c.m.GetBool("instances.migration.stateful") +} + // LokiServer returns all the Loki settings needed to connect to a server. func (c *Config) LokiServer() (apiURL string, authUsername string, authPassword string, apiCACert string, instance string, logLevel string, labels []string, types []string) { if c.m.GetString("loki.types") != "" { @@ -576,6 +581,14 @@ var ConfigSchema = config.Schema{ // shortdesc: Instance placement scriptlet for automatic instance placement "instances.placement.scriptlet": {Validator: validate.Optional(scriptletLoad.InstancePlacementValidate)}, + // lxdmeta:generate(entities=server; group=miscellaneous; key=instances.migration.stateful) + // You can override this setting for relevant instances, either in the instance-specific configuration or through a profile. + // --- + // type: bool + // scope: global + // shortdesc: Whether to set `migration.stateful` to `true` for the instances + "instances.migration.stateful": {Type: config.Bool, Default: "false"}, + // lxdmeta:generate(entities=server; group=loki; key=loki.auth.username) // // --- diff --git a/lxd/db/cluster/instances.go b/lxd/db/cluster/instances.go index 90e35f928d87..3ca0e6709d37 100644 --- a/lxd/db/cluster/instances.go +++ b/lxd/db/cluster/instances.go @@ -77,7 +77,7 @@ type InstanceFilter struct { } // ToAPI converts the database Instance to API type. -func (i *Instance) ToAPI(ctx context.Context, tx *sql.Tx) (*api.Instance, error) { +func (i *Instance) ToAPI(ctx context.Context, tx *sql.Tx, globalConfig map[string]any) (*api.Instance, error) { profiles, err := GetInstanceProfiles(ctx, tx, i.ID) if err != nil { return nil, err @@ -108,7 +108,7 @@ func (i *Instance) ToAPI(ctx context.Context, tx *sql.Tx) (*api.Instance, error) return nil, err } - expandedConfig := instancetype.ExpandInstanceConfig(config, apiProfiles) + expandedConfig := instancetype.ExpandInstanceConfig(globalConfig, config, apiProfiles) archName, err := osarch.ArchitectureName(i.Architecture) if err != nil { diff --git a/lxd/db/instances_test.go b/lxd/db/instances_test.go index 197ca645dfb8..461cb9944593 100644 --- a/lxd/db/instances_test.go +++ b/lxd/db/instances_test.go @@ -267,7 +267,7 @@ func TestInstanceList(t *testing.T) { err = c.Transaction(context.TODO(), func(ctx context.Context, tx *db.ClusterTx) error { return tx.InstanceList(ctx, func(dbInst db.InstanceArgs, p api.Project) error { - dbInst.Config = instancetype.ExpandInstanceConfig(dbInst.Config, dbInst.Profiles) + dbInst.Config = instancetype.ExpandInstanceConfig(nil, dbInst.Config, dbInst.Profiles) dbInst.Devices = instancetype.ExpandInstanceDevices(dbInst.Devices, dbInst.Profiles) instances = append(instances, dbInst) diff --git a/lxd/images.go b/lxd/images.go index 58f0c6e0feae..94d947013ca8 100644 --- a/lxd/images.go +++ b/lxd/images.go @@ -1035,7 +1035,7 @@ func imagesPost(d *Daemon, r *http.Request) response.Response { // allowed to use. var budget int64 err = s.DB.Cluster.Transaction(context.TODO(), func(ctx context.Context, tx *db.ClusterTx) error { - budget, err = projectutils.GetImageSpaceBudget(tx, projectName) + budget, err = projectutils.GetImageSpaceBudget(s.GlobalConfig, tx, projectName) return err }) if err != nil { diff --git a/lxd/instance/drivers/driver_common.go b/lxd/instance/drivers/driver_common.go index 9c1bb441adaf..9406d169e0d8 100644 --- a/lxd/instance/drivers/driver_common.go +++ b/lxd/instance/drivers/driver_common.go @@ -509,7 +509,12 @@ func (d *common) deviceVolatileSetFunc(devName string) func(save map[string]stri // expandConfig applies the config of each profile in order, followed by the local config. func (d *common) expandConfig() error { - d.expandedConfig = instancetype.ExpandInstanceConfig(d.localConfig, d.profiles) + var globalConfigDump map[string]any + if d.state.GlobalConfig != nil { + globalConfigDump = d.state.GlobalConfig.Dump() + } + + d.expandedConfig = instancetype.ExpandInstanceConfig(globalConfigDump, d.localConfig, d.profiles) d.expandedDevices = instancetype.ExpandInstanceDevices(d.localDevices, d.profiles) return nil diff --git a/lxd/instance/instancetype/instance.go b/lxd/instance/instancetype/instance.go index 255989d39033..1cace42ac7cf 100644 --- a/lxd/instance/instancetype/instance.go +++ b/lxd/instance/instancetype/instance.go @@ -912,7 +912,7 @@ var InstanceConfigKeysVM = map[string]func(value string) error{ // Enabling this option prevents the use of some features that are incompatible with it. // --- // type: bool - // defaultdesc: `false` + // defaultdesc: `false` or value from profiles or `instances.migration.stateful` (if set) // liveupdate: no // condition: virtual machine // shortdesc: Whether to allow for stateful stop/start and snapshots diff --git a/lxd/instance/instancetype/instance_utils.go b/lxd/instance/instancetype/instance_utils.go index 93f0ae03ecc1..4df3a518939e 100644 --- a/lxd/instance/instancetype/instance_utils.go +++ b/lxd/instance/instancetype/instance_utils.go @@ -1,14 +1,27 @@ package instancetype import ( + "strconv" + deviceConfig "github.com/canonical/lxd/lxd/device/config" "github.com/canonical/lxd/shared/api" ) // ExpandInstanceConfig expands the given instance config with the config values of the given profiles. -func ExpandInstanceConfig(config map[string]string, profiles []api.Profile) map[string]string { +func ExpandInstanceConfig(globalConfig map[string]any, config map[string]string, profiles []api.Profile) map[string]string { expandedConfig := map[string]string{} + // Apply global config overriding + if globalConfig != nil { + globalInstancesMigrationStatefulStr, ok := globalConfig["instances.migration.stateful"].(string) + if ok { + globalInstancesMigrationStateful, _ := strconv.ParseBool(globalInstancesMigrationStatefulStr) + if globalInstancesMigrationStateful { + expandedConfig["migration.stateful"] = globalInstancesMigrationStatefulStr + } + } + } + // Apply all the profiles. profileConfigs := make([]map[string]string, len(profiles)) for i, profile := range profiles { diff --git a/lxd/instance_patch.go b/lxd/instance_patch.go index d865db8bbe1b..9bf1bfbc0ac4 100644 --- a/lxd/instance_patch.go +++ b/lxd/instance_patch.go @@ -200,7 +200,7 @@ func instancePatch(d *Daemon, r *http.Request) response.Response { apiProfiles = append(apiProfiles, *apiProfile) } - return projecthelpers.AllowInstanceUpdate(tx, projectName, name, req, c.LocalConfig()) + return projecthelpers.AllowInstanceUpdate(s.GlobalConfig, tx, projectName, name, req, c.LocalConfig()) }) if err != nil { return response.SmartError(err) diff --git a/lxd/instance_put.go b/lxd/instance_put.go index 066d22421e9c..638af54c5529 100644 --- a/lxd/instance_put.go +++ b/lxd/instance_put.go @@ -148,7 +148,7 @@ func instancePut(d *Daemon, r *http.Request) response.Response { apiProfiles = append(apiProfiles, *apiProfile) } - return projecthelpers.AllowInstanceUpdate(tx, projectName, name, configRaw, inst.LocalConfig()) + return projecthelpers.AllowInstanceUpdate(s.GlobalConfig, tx, projectName, name, configRaw, inst.LocalConfig()) }) if err != nil { return response.SmartError(err) diff --git a/lxd/instances_post.go b/lxd/instances_post.go index 3bff84c37054..fc9480035c92 100644 --- a/lxd/instances_post.go +++ b/lxd/instances_post.go @@ -51,7 +51,7 @@ func ensureDownloadedImageFitWithinBudget(s *state.State, r *http.Request, op *o var budget int64 err = s.DB.Cluster.Transaction(context.TODO(), func(ctx context.Context, tx *db.ClusterTx) error { - budget, err = project.GetImageSpaceBudget(tx, p.Name) + budget, err = project.GetImageSpaceBudget(s.GlobalConfig, tx, p.Name) return err }) if err != nil { @@ -642,7 +642,7 @@ func createFromBackup(s *state.State, r *http.Request, projectName string, data Type: api.InstanceType(bInfo.Config.Container.Type), } - return project.AllowInstanceCreation(tx, projectName, req) + return project.AllowInstanceCreation(s.GlobalConfig, tx, projectName, req) }) if err != nil { return response.SmartError(err) @@ -1102,7 +1102,7 @@ func instancesPost(d *Daemon, r *http.Request) response.Response { if !clusterNotification { // Check that the project's limits are not violated. Note this check is performed after // automatically generated config values (such as ones from an InstanceType) have been set. - err = project.AllowInstanceCreation(tx, targetProjectName, req) + err = project.AllowInstanceCreation(s.GlobalConfig, tx, targetProjectName, req) if err != nil { return err } @@ -1134,7 +1134,12 @@ func instancesPost(d *Daemon, r *http.Request) response.Response { Reason: apiScriptlet.InstancePlacementReasonNew, } - reqExpanded.Config = instancetype.ExpandInstanceConfig(reqExpanded.Config, profiles) + var globalConfigDump map[string]any + if s.GlobalConfig != nil { + globalConfigDump = s.GlobalConfig.Dump() + } + + reqExpanded.Config = instancetype.ExpandInstanceConfig(globalConfigDump, reqExpanded.Config, profiles) reqExpanded.Devices = instancetype.ExpandInstanceDevices(deviceConfig.NewDevices(reqExpanded.Devices), profiles).CloneNative() targetMemberInfo, err = scriptlet.InstancePlacementRun(r.Context(), logger.Log, s, &reqExpanded, candidateMembers, leaderAddress) diff --git a/lxd/metadata/configuration.json b/lxd/metadata/configuration.json index c7ba1dae4734..f03624b4acd9 100644 --- a/lxd/metadata/configuration.json +++ b/lxd/metadata/configuration.json @@ -176,7 +176,7 @@ { "migration.stateful": { "condition": "virtual machine", - "defaultdesc": "`false`", + "defaultdesc": "`false` or value from profiles or `instances.migration.stateful` (if set)", "liveupdate": "no", "longdesc": "Enabling this option prevents the use of some features that are incompatible with it.", "shortdesc": "Whether to allow for stateful stop/start and snapshots", @@ -1808,6 +1808,14 @@ "type": "string" } }, + { + "instances.migration.stateful": { + "longdesc": "You can override this setting for relevant instances, either in the instance-specific configuration or through a profile.", + "scope": "global", + "shortdesc": "Whether to set `migration.stateful` to `true` for the instances", + "type": "bool" + } + }, { "instances.nic.host_name": { "defaultdesc": "`random`", diff --git a/lxd/network/network_utils_sriov.go b/lxd/network/network_utils_sriov.go index 4072643d79f9..eec8a1ca30f7 100644 --- a/lxd/network/network_utils_sriov.go +++ b/lxd/network/network_utils_sriov.go @@ -62,7 +62,12 @@ func SRIOVGetHostDevicesInUse(s *state.State) (map[string]struct{}, error) { err = s.DB.Cluster.Transaction(context.TODO(), func(ctx context.Context, tx *db.ClusterTx) error { return tx.InstanceList(ctx, func(dbInst db.InstanceArgs, p api.Project) error { // Expand configs so we take into account profile devices. - dbInst.Config = instancetype.ExpandInstanceConfig(dbInst.Config, dbInst.Profiles) + var globalConfigDump map[string]any + if s.GlobalConfig != nil { + globalConfigDump = s.GlobalConfig.Dump() + } + + dbInst.Config = instancetype.ExpandInstanceConfig(globalConfigDump, dbInst.Config, dbInst.Profiles) dbInst.Devices = instancetype.ExpandInstanceDevices(dbInst.Devices, dbInst.Profiles) for name, dev := range dbInst.Devices { diff --git a/lxd/profiles_utils.go b/lxd/profiles_utils.go index 7e89dc0eea0d..89c7f31b81e4 100644 --- a/lxd/profiles_utils.go +++ b/lxd/profiles_utils.go @@ -17,7 +17,7 @@ import ( func doProfileUpdate(s *state.State, p api.Project, profileName string, id int64, profile *api.Profile, req api.ProfilePut) error { // Check project limits. err := s.DB.Cluster.Transaction(context.TODO(), func(ctx context.Context, tx *db.ClusterTx) error { - return project.AllowProfileUpdate(tx, p.Name, profileName, req) + return project.AllowProfileUpdate(s.GlobalConfig, tx, p.Name, profileName, req) }) if err != nil { return err diff --git a/lxd/project/permissions.go b/lxd/project/permissions.go index fdf9a1a318f7..856929791c85 100644 --- a/lxd/project/permissions.go +++ b/lxd/project/permissions.go @@ -10,6 +10,7 @@ import ( "strings" "github.com/canonical/lxd/lxd/auth" + clusterConfig "github.com/canonical/lxd/lxd/cluster/config" "github.com/canonical/lxd/lxd/db" "github.com/canonical/lxd/lxd/db/cluster" deviceconfig "github.com/canonical/lxd/lxd/device/config" @@ -25,8 +26,13 @@ import ( // AllowInstanceCreation returns an error if any project-specific limit or // restriction is violated when creating a new instance. -func AllowInstanceCreation(tx *db.ClusterTx, projectName string, req api.InstancesPost) error { - info, err := fetchProject(tx, projectName, true) +func AllowInstanceCreation(globalConfig *clusterConfig.Config, tx *db.ClusterTx, projectName string, req api.InstancesPost) error { + var globalConfigDump map[string]any + if globalConfig != nil { + globalConfigDump = globalConfig.Dump() + } + + info, err := fetchProject(globalConfigDump, tx, projectName, true) if err != nil { return err } @@ -80,7 +86,7 @@ func AllowInstanceCreation(tx *db.ClusterTx, projectName string, req api.Instanc return err } - err = checkRestrictionsAndAggregateLimits(tx, info) + err = checkRestrictionsAndAggregateLimits(globalConfig, tx, info) if err != nil { return fmt.Errorf("Failed checking if instance creation allowed: %w", err) } @@ -227,8 +233,13 @@ func checkRestrictionsOnVolatileConfig(project api.Project, instanceType instanc // AllowVolumeCreation returns an error if any project-specific limit or // restriction is violated when creating a new custom volume in a project. -func AllowVolumeCreation(tx *db.ClusterTx, projectName string, req api.StorageVolumesPost) error { - info, err := fetchProject(tx, projectName, true) +func AllowVolumeCreation(globalConfig *clusterConfig.Config, tx *db.ClusterTx, projectName string, req api.StorageVolumesPost) error { + var globalConfigDump map[string]any + if globalConfig != nil { + globalConfigDump = globalConfig.Dump() + } + + info, err := fetchProject(globalConfigDump, tx, projectName, true) if err != nil { return err } @@ -248,7 +259,7 @@ func AllowVolumeCreation(tx *db.ClusterTx, projectName string, req api.StorageVo Config: req.Config, }) - err = checkRestrictionsAndAggregateLimits(tx, info) + err = checkRestrictionsAndAggregateLimits(globalConfig, tx, info) if err != nil { return fmt.Errorf("Failed checking if volume creation allowed: %w", err) } @@ -260,8 +271,13 @@ func AllowVolumeCreation(tx *db.ClusterTx, projectName string, req api.StorageVo // for writing images. // // If no limit is in place, return -1. -func GetImageSpaceBudget(tx *db.ClusterTx, projectName string) (int64, error) { - info, err := fetchProject(tx, projectName, true) +func GetImageSpaceBudget(globalConfig *clusterConfig.Config, tx *db.ClusterTx, projectName string) (int64, error) { + var globalConfigDump map[string]any + if globalConfig != nil { + globalConfigDump = globalConfig.Dump() + } + + info, err := fetchProject(globalConfigDump, tx, projectName, true) if err != nil { return -1, err } @@ -286,7 +302,7 @@ func GetImageSpaceBudget(tx *db.ClusterTx, projectName string) (int64, error) { return -1, err } - instances, err := expandInstancesConfigAndDevices(info.Instances, info.Profiles) + instances, err := expandInstancesConfigAndDevices(globalConfigDump, info.Instances, info.Profiles) if err != nil { return -1, err } @@ -307,7 +323,7 @@ func GetImageSpaceBudget(tx *db.ClusterTx, projectName string) (int64, error) { // Check that we would not violate the project limits or restrictions if we // were to commit the given instances and profiles. -func checkRestrictionsAndAggregateLimits(tx *db.ClusterTx, info *projectInfo) error { +func checkRestrictionsAndAggregateLimits(globalConfig *clusterConfig.Config, tx *db.ClusterTx, info *projectInfo) error { // List of config keys for which we need to check aggregate values // across all project instances. aggregateKeys := []string{} @@ -328,7 +344,12 @@ func checkRestrictionsAndAggregateLimits(tx *db.ClusterTx, info *projectInfo) er return nil } - instances, err := expandInstancesConfigAndDevices(info.Instances, info.Profiles) + var globalConfigDump map[string]any + if globalConfig != nil { + globalConfigDump = globalConfig.Dump() + } + + instances, err := expandInstancesConfigAndDevices(globalConfigDump, info.Instances, info.Profiles) if err != nil { return err } @@ -857,9 +878,15 @@ func isVMLowLevelOptionForbidden(key string) bool { // AllowInstanceUpdate returns an error if any project-specific limit or // restriction is violated when updating an existing instance. -func AllowInstanceUpdate(tx *db.ClusterTx, projectName, instanceName string, req api.InstancePut, currentConfig map[string]string) error { +func AllowInstanceUpdate(globalConfig *clusterConfig.Config, tx *db.ClusterTx, projectName, instanceName string, req api.InstancePut, currentConfig map[string]string) error { var updatedInstance *api.Instance - info, err := fetchProject(tx, projectName, true) + + var globalConfigDump map[string]any + if globalConfig != nil { + globalConfigDump = globalConfig.Dump() + } + + info, err := fetchProject(globalConfigDump, tx, projectName, true) if err != nil { return err } @@ -893,7 +920,7 @@ func AllowInstanceUpdate(tx *db.ClusterTx, projectName, instanceName string, req return err } - err = checkRestrictionsAndAggregateLimits(tx, info) + err = checkRestrictionsAndAggregateLimits(globalConfig, tx, info) if err != nil { return fmt.Errorf("Failed checking if instance update allowed: %w", err) } @@ -903,8 +930,13 @@ func AllowInstanceUpdate(tx *db.ClusterTx, projectName, instanceName string, req // AllowVolumeUpdate returns an error if any project-specific limit or // restriction is violated when updating an existing custom volume. -func AllowVolumeUpdate(tx *db.ClusterTx, projectName, volumeName string, req api.StorageVolumePut, currentConfig map[string]string) error { - info, err := fetchProject(tx, projectName, true) +func AllowVolumeUpdate(globalConfig *clusterConfig.Config, tx *db.ClusterTx, projectName, volumeName string, req api.StorageVolumePut, currentConfig map[string]string) error { + var globalConfigDump map[string]any + if globalConfig != nil { + globalConfigDump = globalConfig.Dump() + } + + info, err := fetchProject(globalConfigDump, tx, projectName, true) if err != nil { return err } @@ -927,7 +959,7 @@ func AllowVolumeUpdate(tx *db.ClusterTx, projectName, volumeName string, req api info.Volumes[i].Config = req.Config } - err = checkRestrictionsAndAggregateLimits(tx, info) + err = checkRestrictionsAndAggregateLimits(globalConfig, tx, info) if err != nil { return fmt.Errorf("Failed checking if volume update allowed: %w", err) } @@ -937,8 +969,13 @@ func AllowVolumeUpdate(tx *db.ClusterTx, projectName, volumeName string, req api // AllowProfileUpdate checks that project limits and restrictions are not // violated when changing a profile. -func AllowProfileUpdate(tx *db.ClusterTx, projectName, profileName string, req api.ProfilePut) error { - info, err := fetchProject(tx, projectName, true) +func AllowProfileUpdate(globalConfig *clusterConfig.Config, tx *db.ClusterTx, projectName, profileName string, req api.ProfilePut) error { + var globalConfigDump map[string]any + if globalConfig != nil { + globalConfigDump = globalConfig.Dump() + } + + info, err := fetchProject(globalConfigDump, tx, projectName, true) if err != nil { return err } @@ -957,7 +994,7 @@ func AllowProfileUpdate(tx *db.ClusterTx, projectName, profileName string, req a info.Profiles[i].Devices = req.Devices } - err = checkRestrictionsAndAggregateLimits(tx, info) + err = checkRestrictionsAndAggregateLimits(globalConfig, tx, info) if err != nil { return fmt.Errorf("Failed checking if profile update allowed: %w", err) } @@ -966,13 +1003,18 @@ func AllowProfileUpdate(tx *db.ClusterTx, projectName, profileName string, req a } // AllowProjectUpdate checks the new config to be set on a project is valid. -func AllowProjectUpdate(tx *db.ClusterTx, projectName string, config map[string]string, changed []string) error { - info, err := fetchProject(tx, projectName, false) +func AllowProjectUpdate(globalConfig *clusterConfig.Config, tx *db.ClusterTx, projectName string, config map[string]string, changed []string) error { + var globalConfigDump map[string]any + if globalConfig != nil { + globalConfigDump = globalConfig.Dump() + } + + info, err := fetchProject(globalConfigDump, tx, projectName, false) if err != nil { return err } - info.Instances, err = expandInstancesConfigAndDevices(info.Instances, info.Profiles) + info.Instances, err = expandInstancesConfigAndDevices(globalConfigDump, info.Instances, info.Profiles) if err != nil { return err } @@ -1151,7 +1193,7 @@ type projectInfo struct { // If the skipIfNoLimits flag is true, then profiles, instances and volumes // won't be loaded if the profile has no limits set on it, and nil will be // returned. -func fetchProject(tx *db.ClusterTx, projectName string, skipIfNoLimits bool) (*projectInfo, error) { +func fetchProject(globalConfig map[string]any, tx *db.ClusterTx, projectName string, skipIfNoLimits bool) (*projectInfo, error) { ctx := context.Background() dbProject, err := cluster.GetProject(ctx, tx.Tx(), projectName) if err != nil { @@ -1201,7 +1243,7 @@ func fetchProject(tx *db.ClusterTx, projectName string, skipIfNoLimits bool) (*p instances := make([]api.Instance, 0, len(dbInstances)) for _, instance := range dbInstances { - apiInstance, err := instance.ToAPI(ctx, tx.Tx()) + apiInstance, err := instance.ToAPI(ctx, tx.Tx(), globalConfig) if err != nil { return nil, fmt.Errorf("Failed to get API data for instance %q in project %q: %w", instance.Name, instance.Project, err) } @@ -1226,7 +1268,7 @@ func fetchProject(tx *db.ClusterTx, projectName string, skipIfNoLimits bool) (*p // Expand the configuration and devices of the given instances, taking the give // project profiles into account. -func expandInstancesConfigAndDevices(instances []api.Instance, profiles []api.Profile) ([]api.Instance, error) { +func expandInstancesConfigAndDevices(globalConfig map[string]any, instances []api.Instance, profiles []api.Profile) ([]api.Instance, error) { expandedInstances := make([]api.Instance, len(instances)) // Index of all profiles by name. @@ -1244,7 +1286,7 @@ func expandInstancesConfigAndDevices(instances []api.Instance, profiles []api.Pr } expandedInstances[i] = instance - expandedInstances[i].Config = instancetype.ExpandInstanceConfig(instance.Config, apiProfiles) + expandedInstances[i].Config = instancetype.ExpandInstanceConfig(globalConfig, instance.Config, apiProfiles) expandedInstances[i].Devices = instancetype.ExpandInstanceDevices(deviceconfig.NewDevices(instance.Devices), apiProfiles).CloneNative() } diff --git a/lxd/project/permissions_test.go b/lxd/project/permissions_test.go index 180e4a109d76..61f08c186a49 100644 --- a/lxd/project/permissions_test.go +++ b/lxd/project/permissions_test.go @@ -30,7 +30,7 @@ func TestAllowInstanceCreation_NotConfigured(t *testing.T) { Type: api.InstanceTypeContainer, } - err := project.AllowInstanceCreation(tx, "default", req) + err := project.AllowInstanceCreation(nil, tx, "default", req) assert.NoError(t, err) } @@ -60,7 +60,7 @@ func TestAllowInstanceCreation_Below(t *testing.T) { Type: api.InstanceTypeContainer, } - err = project.AllowInstanceCreation(tx, "p1", req) + err = project.AllowInstanceCreation(nil, tx, "p1", req) assert.NoError(t, err) } @@ -91,7 +91,7 @@ func TestAllowInstanceCreation_Above(t *testing.T) { Type: api.InstanceTypeContainer, } - err = project.AllowInstanceCreation(tx, "p1", req) + err = project.AllowInstanceCreation(nil, tx, "p1", req) assert.EqualError(t, err, `Reached maximum number of instances of type "container" in project "p1"`) } @@ -122,7 +122,7 @@ func TestAllowInstanceCreation_DifferentType(t *testing.T) { Type: api.InstanceTypeContainer, } - err = project.AllowInstanceCreation(tx, "p1", req) + err = project.AllowInstanceCreation(nil, tx, "p1", req) assert.NoError(t, err) } @@ -153,7 +153,7 @@ func TestAllowInstanceCreation_AboveInstances(t *testing.T) { Type: api.InstanceTypeContainer, } - err = project.AllowInstanceCreation(tx, "p1", req) + err = project.AllowInstanceCreation(nil, tx, "p1", req) assert.EqualError(t, err, `Reached maximum number of instances in project "p1"`) } diff --git a/lxd/project/state.go b/lxd/project/state.go index c92a214ad4cf..63ae198a8de3 100644 --- a/lxd/project/state.go +++ b/lxd/project/state.go @@ -11,11 +11,11 @@ import ( ) // GetCurrentAllocations returns the current resource utilization for a given project. -func GetCurrentAllocations(ctx context.Context, tx *db.ClusterTx, projectName string) (map[string]api.ProjectStateResource, error) { +func GetCurrentAllocations(globalConfig map[string]any, ctx context.Context, tx *db.ClusterTx, projectName string) (map[string]api.ProjectStateResource, error) { result := map[string]api.ProjectStateResource{} // Get the project. - info, err := fetchProject(tx, projectName, false) + info, err := fetchProject(globalConfig, tx, projectName, false) if err != nil { return nil, err } @@ -24,7 +24,7 @@ func GetCurrentAllocations(ctx context.Context, tx *db.ClusterTx, projectName st return nil, fmt.Errorf("Project %q returned empty info struct", projectName) } - info.Instances, err = expandInstancesConfigAndDevices(info.Instances, info.Profiles) + info.Instances, err = expandInstancesConfigAndDevices(globalConfig, info.Instances, info.Profiles) if err != nil { return nil, err } diff --git a/lxd/storage/backend_lxd.go b/lxd/storage/backend_lxd.go index e889083d85d9..c84fd48e51e3 100644 --- a/lxd/storage/backend_lxd.go +++ b/lxd/storage/backend_lxd.go @@ -7102,7 +7102,7 @@ func (b *lxdBackend) CreateCustomVolumeFromISO(projectName string, volName strin } err := b.state.DB.Cluster.Transaction(b.state.ShutdownCtx, func(ctx context.Context, tx *db.ClusterTx) error { - return project.AllowVolumeCreation(tx, projectName, req) + return project.AllowVolumeCreation(b.state.GlobalConfig, tx, projectName, req) }) if err != nil { return fmt.Errorf("Failed checking volume creation allowed: %w", err) @@ -7187,7 +7187,7 @@ func (b *lxdBackend) CreateCustomVolumeFromBackup(srcBackup backup.Info, srcData } err := b.state.DB.Cluster.Transaction(b.state.ShutdownCtx, func(ctx context.Context, tx *db.ClusterTx) error { - return project.AllowVolumeCreation(tx, srcBackup.Project, req) + return project.AllowVolumeCreation(b.state.GlobalConfig, tx, srcBackup.Project, req) }) if err != nil { return fmt.Errorf("Failed checking volume creation allowed: %w", err) diff --git a/lxd/storage_volumes.go b/lxd/storage_volumes.go index b88b99821ee3..81e727728aca 100644 --- a/lxd/storage_volumes.go +++ b/lxd/storage_volumes.go @@ -704,7 +704,7 @@ func storagePoolVolumesPost(d *Daemon, r *http.Request) response.Response { return err } - err = project.AllowVolumeCreation(tx, projectName, req) + err = project.AllowVolumeCreation(s.GlobalConfig, tx, projectName, req) if err != nil { return err } @@ -1883,7 +1883,7 @@ func storagePoolVolumePut(d *Daemon, r *http.Request) response.Response { if req.Config != nil || req.Restore == "" { // Possibly check if project limits are honored. err = s.DB.Cluster.Transaction(r.Context(), func(ctx context.Context, tx *db.ClusterTx) error { - return project.AllowVolumeUpdate(tx, projectName, volumeName, req, dbVolume.Config) + return project.AllowVolumeUpdate(s.GlobalConfig, tx, projectName, volumeName, req, dbVolume.Config) }) if err != nil { return response.SmartError(err) diff --git a/shared/version/api.go b/shared/version/api.go index 230763c3af23..2482febb63e6 100644 --- a/shared/version/api.go +++ b/shared/version/api.go @@ -397,6 +397,7 @@ var APIExtensions = []string{ "storage_volatile_uuid", "import_instance_devices", "instances_uefi_vars", + "instances_migration_stateful", } // APIExtensionsCount returns the number of available API extensions.